cache
¶
Full name: tenets.core.prompt.cache
cache¶
Caching system for prompt parsing results.
Provides intelligent caching for parsed prompts, external content fetches, and entity recognition results with proper invalidation strategies.
Classes¶
CacheEntrydataclass
¶
CacheEntry(key: str, value: Any, created_at: datetime, accessed_at: datetime, ttl_seconds: int, hit_count: int = 0, metadata: Dict[str, Any] = None)
PromptCache¶
PromptCache(cache_manager: Optional[Any] = None, enable_memory_cache: bool = True, enable_disk_cache: bool = True, memory_cache_size: int = 100)
Intelligent caching for prompt parsing operations.
Initialize prompt cache.
PARAMETER | DESCRIPTION |
---|---|
cache_manager | External cache manager to use |
enable_memory_cache | Whether to use in-memory caching TYPE: |
enable_disk_cache | Whether to use disk caching TYPE: |
memory_cache_size | Maximum items in memory cache TYPE: |
Source code in tenets/core/prompt/cache.py
def __init__(
self,
cache_manager: Optional[Any] = None,
enable_memory_cache: bool = True,
enable_disk_cache: bool = True,
memory_cache_size: int = 100,
):
"""Initialize prompt cache.
Args:
cache_manager: External cache manager to use
enable_memory_cache: Whether to use in-memory caching
enable_disk_cache: Whether to use disk caching
memory_cache_size: Maximum items in memory cache
"""
self.logger = get_logger(__name__)
self.cache_manager = cache_manager if cache_manager and CacheManager else None
self.enable_memory = enable_memory_cache
self.enable_disk = enable_disk_cache and self.cache_manager is not None
# In-memory cache
self.memory_cache: Dict[str, CacheEntry] = {}
self.memory_cache_size = memory_cache_size
# Cache statistics
self.stats = {
"hits": 0,
"misses": 0,
"evictions": 0,
"expirations": 0,
}
Functions¶
get¶
Get a value from cache.
PARAMETER | DESCRIPTION |
---|---|
key | Cache key TYPE: |
check_disk | Whether to check disk cache if not in memory TYPE: |
RETURNS | DESCRIPTION |
---|---|
Optional[Any] | Cached value or None if not found/expired |
Source code in tenets/core/prompt/cache.py
def get(self, key: str, check_disk: bool = True) -> Optional[Any]:
"""Get a value from cache.
Args:
key: Cache key
check_disk: Whether to check disk cache if not in memory
Returns:
Cached value or None if not found/expired
"""
# Check memory cache first
if self.enable_memory and key in self.memory_cache:
entry = self.memory_cache[key]
if entry.is_expired():
# Remove expired entry
del self.memory_cache[key]
self.stats["expirations"] += 1
self.logger.debug(f"Cache expired for key: {key}")
else:
# Update access time
entry.touch()
self.stats["hits"] += 1
self.logger.debug(f"Cache hit for key: {key} (memory)")
return entry.value
# Check disk cache if enabled
if check_disk and self.enable_disk and self.cache_manager:
disk_value = self.cache_manager.general.get(key)
if disk_value is not None:
self.stats["hits"] += 1
self.logger.debug(f"Cache hit for key: {key} (disk)")
# Promote to memory cache
if self.enable_memory:
self._add_to_memory(
key, disk_value, self.DEFAULT_TTLS.get("parsed_prompt", 3600)
)
return disk_value
self.stats["misses"] += 1
self.logger.debug(f"Cache miss for key: {key}")
return None
put¶
put(key: str, value: Any, ttl_seconds: Optional[int] = None, metadata: Optional[Dict[str, Any]] = None, write_disk: bool = True) -> None
Put a value in cache.
PARAMETER | DESCRIPTION |
---|---|
key | Cache key TYPE: |
value | Value to cache TYPE: |
ttl_seconds | TTL in seconds (uses default if not specified) |
metadata | Additional metadata for TTL calculation |
write_disk | Whether to write to disk cache TYPE: |
Source code in tenets/core/prompt/cache.py
def put(
self,
key: str,
value: Any,
ttl_seconds: Optional[int] = None,
metadata: Optional[Dict[str, Any]] = None,
write_disk: bool = True,
) -> None:
"""Put a value in cache.
Args:
key: Cache key
value: Value to cache
ttl_seconds: TTL in seconds (uses default if not specified)
metadata: Additional metadata for TTL calculation
write_disk: Whether to write to disk cache
"""
# Use default TTL if not specified
if ttl_seconds is None:
ttl_seconds = self.DEFAULT_TTLS.get("parsed_prompt", 3600)
# Add to memory cache
if self.enable_memory:
self._add_to_memory(key, value, ttl_seconds, metadata)
# Add to disk cache
if write_disk and self.enable_disk and self.cache_manager:
self.cache_manager.general.put(key, value, ttl=ttl_seconds, metadata=metadata)
self.logger.debug(f"Cached to disk: {key} (TTL: {ttl_seconds}s)")
cache_parsed_prompt¶
cache_parsed_prompt(prompt: str, result: Any, metadata: Optional[Dict[str, Any]] = None) -> None
Cache a parsed prompt result.
PARAMETER | DESCRIPTION |
---|---|
prompt | Original prompt text TYPE: |
result | Parsing result TYPE: |
metadata | Additional metadata |
Source code in tenets/core/prompt/cache.py
def cache_parsed_prompt(
self, prompt: str, result: Any, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Cache a parsed prompt result.
Args:
prompt: Original prompt text
result: Parsing result
metadata: Additional metadata
"""
key = self._generate_key("prompt", prompt)
ttl = self._calculate_ttl(self.DEFAULT_TTLS["parsed_prompt"], "parsed_prompt", metadata)
self.put(key, result, ttl, metadata)
get_parsed_prompt¶
cache_external_content¶
cache_external_content(url: str, content: Any, metadata: Optional[Dict[str, Any]] = None) -> None
Cache external content fetch result.
PARAMETER | DESCRIPTION |
---|---|
url | URL that was fetched TYPE: |
content | Fetched content TYPE: |
metadata | Additional metadata (source, state, etc.) |
Source code in tenets/core/prompt/cache.py
def cache_external_content(
self, url: str, content: Any, metadata: Optional[Dict[str, Any]] = None
) -> None:
"""Cache external content fetch result.
Args:
url: URL that was fetched
content: Fetched content
metadata: Additional metadata (source, state, etc.)
"""
key = self._generate_key("external", url)
# Add URL to metadata
if metadata is None:
metadata = {}
metadata["url"] = url
ttl = self._calculate_ttl(
self.DEFAULT_TTLS["external_content"], "external_content", metadata
)
self.put(key, content, ttl, metadata)
get_external_content¶
cache_entities¶
Cache entity recognition results.
PARAMETER | DESCRIPTION |
---|---|
text | Text that was analyzed TYPE: |
entities | Recognized entities |
confidence | Average confidence score TYPE: |
Source code in tenets/core/prompt/cache.py
def cache_entities(self, text: str, entities: List[Any], confidence: float = 0.0) -> None:
"""Cache entity recognition results.
Args:
text: Text that was analyzed
entities: Recognized entities
confidence: Average confidence score
"""
key = self._generate_key("entities", text)
metadata = {"confidence": confidence, "count": len(entities)}
ttl = self._calculate_ttl(
self.DEFAULT_TTLS["entity_recognition"], "entity_recognition", metadata
)
self.put(key, entities, ttl, metadata)
get_entities¶
cache_intent¶
Cache intent detection result.
PARAMETER | DESCRIPTION |
---|---|
text | Text that was analyzed TYPE: |
intent | Detected intent TYPE: |
confidence | Confidence score TYPE: |
Source code in tenets/core/prompt/cache.py
def cache_intent(self, text: str, intent: Any, confidence: float = 0.0) -> None:
"""Cache intent detection result.
Args:
text: Text that was analyzed
intent: Detected intent
confidence: Confidence score
"""
key = self._generate_key("intent", text)
metadata = {"confidence": confidence}
ttl = self._calculate_ttl(
self.DEFAULT_TTLS["intent_detection"], "intent_detection", metadata
)
self.put(key, intent, ttl, metadata)
get_intent¶
invalidate¶
Invalidate cache entries matching a pattern.
PARAMETER | DESCRIPTION |
---|---|
pattern | Key pattern to match (prefix) TYPE: |
RETURNS | DESCRIPTION |
---|---|
int | Number of entries invalidated |
Source code in tenets/core/prompt/cache.py
def invalidate(self, pattern: str) -> int:
"""Invalidate cache entries matching a pattern.
Args:
pattern: Key pattern to match (prefix)
Returns:
Number of entries invalidated
"""
count = 0
# Invalidate memory cache
if self.enable_memory:
keys_to_remove = [k for k in self.memory_cache.keys() if k.startswith(pattern)]
for key in keys_to_remove:
del self.memory_cache[key]
count += 1
if count > 0:
self.logger.info(f"Invalidated {count} memory cache entries matching: {pattern}")
# Invalidate disk cache
if self.enable_disk and self.cache_manager:
# Note: This assumes the cache manager supports pattern-based deletion
# If not, we'd need to iterate through all keys
pass
return count
clear_all¶
Clear all cache entries.
Source code in tenets/core/prompt/cache.py
def clear_all(self) -> None:
"""Clear all cache entries."""
# Clear memory cache
if self.enable_memory:
self.memory_cache.clear()
self.logger.info("Cleared memory cache")
# Clear disk cache
if self.enable_disk and self.cache_manager:
self.cache_manager.general.clear()
self.logger.info("Cleared disk cache")
# Reset statistics
self.stats = {
"hits": 0,
"misses": 0,
"evictions": 0,
"expirations": 0,
}
cleanup_expired¶
Remove expired entries from cache.
RETURNS | DESCRIPTION |
---|---|
int | Number of entries removed |
Source code in tenets/core/prompt/cache.py
def cleanup_expired(self) -> int:
"""Remove expired entries from cache.
Returns:
Number of entries removed
"""
count = 0
if self.enable_memory:
expired_keys = [k for k, v in self.memory_cache.items() if v.is_expired()]
for key in expired_keys:
del self.memory_cache[key]
count += 1
if count > 0:
self.logger.info(f"Cleaned up {count} expired cache entries")
return count
get_stats¶
Get cache statistics.
RETURNS | DESCRIPTION |
---|---|
Dict[str, Any] | Cache statistics dictionary |
Source code in tenets/core/prompt/cache.py
def get_stats(self) -> Dict[str, Any]:
"""Get cache statistics.
Returns:
Cache statistics dictionary
"""
total_requests = self.stats["hits"] + self.stats["misses"]
hit_rate = self.stats["hits"] / total_requests if total_requests > 0 else 0
return {
"hits": self.stats["hits"],
"misses": self.stats["misses"],
"hit_rate": hit_rate,
"evictions": self.stats["evictions"],
"expirations": self.stats["expirations"],
"memory_entries": len(self.memory_cache) if self.enable_memory else 0,
"memory_size": (
sum(len(str(e.value)) for e in self.memory_cache.values())
if self.enable_memory
else 0
),
}
warm_cache¶
Pre-warm cache with common prompts.
PARAMETER | DESCRIPTION |
---|---|
common_prompts | List of common prompts to pre-cache |