Compare commits
1 Commits
fix/chunki
...
fix/chunki
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
64b92a04a7 |
@@ -96,6 +96,7 @@ def get_model_token_limit(model_name: str) -> int:
|
|||||||
logger.warning(f"Unknown model '{model_name}', using default 512 token limit")
|
logger.warning(f"Unknown model '{model_name}', using default 512 token limit")
|
||||||
return 512
|
return 512
|
||||||
|
|
||||||
|
|
||||||
# Set up logger with proper level
|
# Set up logger with proper level
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
LOG_LEVEL = os.getenv("LEANN_LOG_LEVEL", "WARNING").upper()
|
||||||
@@ -866,7 +867,9 @@ def compute_embeddings_ollama(
|
|||||||
if retry_count >= max_retries:
|
if retry_count >= max_retries:
|
||||||
# Enhanced error detection for token limit violations
|
# Enhanced error detection for token limit violations
|
||||||
error_msg = str(e).lower()
|
error_msg = str(e).lower()
|
||||||
if "token" in error_msg and ("limit" in error_msg or "exceed" in error_msg or "length" in error_msg):
|
if "token" in error_msg and (
|
||||||
|
"limit" in error_msg or "exceed" in error_msg or "length" in error_msg
|
||||||
|
):
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Token limit exceeded for batch. Error: {e}. "
|
f"Token limit exceeded for batch. Error: {e}. "
|
||||||
f"Consider reducing chunk sizes or check token truncation."
|
f"Consider reducing chunk sizes or check token truncation."
|
||||||
|
|||||||
Reference in New Issue
Block a user