parser
¶
Full name: tenets.core.prompt.parser
parser¶
Prompt parsing and understanding system with modular components.
This module analyzes user prompts to extract intent, keywords, entities, temporal context, and external references using a comprehensive set of specialized components and NLP techniques.
Classes¶
PromptParser¶
PromptParser(config: TenetsConfig, cache_manager: Optional[Any] = None, use_cache: bool = True, use_ml: bool = None, use_nlp_ner: bool = None, use_fuzzy_matching: bool = True)
Comprehensive prompt parser with modular components and caching.
Source code in tenets/core/prompt/parser.py
def __init__(
self,
config: TenetsConfig,
cache_manager: Optional[Any] = None,
use_cache: bool = True,
use_ml: bool = None,
use_nlp_ner: bool = None,
use_fuzzy_matching: bool = True,
):
self.config = config
self.logger = get_logger(__name__)
if use_ml is None:
use_ml = config.nlp.embeddings_enabled
if use_nlp_ner is None:
use_nlp_ner = config.nlp.enabled
self.cache = None
if use_cache:
self.cache = PromptCache(
cache_manager=cache_manager,
enable_memory_cache=True,
enable_disk_cache=cache_manager is not None,
memory_cache_size=100,
)
self._init_components(
cache_manager=cache_manager,
use_ml=use_ml,
use_nlp_ner=use_nlp_ner,
use_fuzzy_matching=use_fuzzy_matching,
)
self._init_patterns()
Functions¶
get_cache_stats¶
Get cache statistics.
RETURNS | DESCRIPTION |
---|---|
Optional[Dict[str, Any]] | Dictionary with cache statistics or None if cache is disabled |
Example
stats = parser.get_cache_stats() if stats: ... print(f"Cache hit rate: {stats['hit_rate']:.2%}")
Source code in tenets/core/prompt/parser.py
def get_cache_stats(self) -> Optional[Dict[str, Any]]:
"""Get cache statistics.
Returns:
Dictionary with cache statistics or None if cache is disabled
Example:
>>> stats = parser.get_cache_stats()
>>> if stats:
... print(f"Cache hit rate: {stats['hit_rate']:.2%}")
"""
if self.cache:
return self.cache.get_stats()
return None
clear_cache¶
Clear all cached data.
This removes all cached parsing results, external content, entities, and intents from both memory and disk cache.
Example
parser.clear_cache() print("Cache cleared")
Source code in tenets/core/prompt/parser.py
def clear_cache(self) -> None:
"""Clear all cached data.
This removes all cached parsing results, external content,
entities, and intents from both memory and disk cache.
Example:
>>> parser.clear_cache()
>>> print("Cache cleared")
"""
if self.cache:
self.cache.clear_all()
self.logger.info("Cleared prompt parser cache")
warm_cache¶
Pre-warm cache with common prompts.
This method pre-parses a list of common prompts to populate the cache, improving performance for frequently used queries.
PARAMETER | DESCRIPTION |
---|---|
common_prompts | List of common prompts to pre-parse |
Example
common = [ ... "implement authentication", ... "fix bug", ... "understand architecture" ... ] parser.warm_cache(common)
Source code in tenets/core/prompt/parser.py
def warm_cache(self, common_prompts: List[str]) -> None:
"""Pre-warm cache with common prompts.
This method pre-parses a list of common prompts to populate
the cache, improving performance for frequently used queries.
Args:
common_prompts: List of common prompts to pre-parse
Example:
>>> common = [
... "implement authentication",
... "fix bug",
... "understand architecture"
... ]
>>> parser.warm_cache(common)
"""
if not self.cache:
return
self.logger.info(f"Pre-warming cache with {len(common_prompts)} prompts")
for prompt in common_prompts:
# Parse without using cache to generate fresh results
# Use positional args to match tests that assert on call args
_ = self._parse_internal(
prompt,
False, # fetch_external
0.5, # min_entity_confidence
0.3, # min_intent_confidence
)
self.logger.info("Cache pre-warming complete")