Skip to content

cache

Full name: tenets.core.prompt.cache

cache

Caching system for prompt parsing results.

Provides intelligent caching for parsed prompts, external content fetches, and entity recognition results with proper invalidation strategies.

Classes

CacheEntrydataclass

Python
CacheEntry(key: str, value: Any, created_at: datetime, accessed_at: datetime, ttl_seconds: int, hit_count: int = 0, metadata: Dict[str, Any] = None)

A cache entry with metadata.

Functions
is_expired
Python
is_expired() -> bool

Check if this entry has expired.

Source code in tenets/core/prompt/cache.py
Python
def is_expired(self) -> bool:
    """Check if this entry has expired."""
    if self.ttl_seconds <= 0:
        return False  # No expiration

    age = datetime.now() - self.created_at
    return age.total_seconds() > self.ttl_seconds
touch
Python
touch()

Update access time and increment hit count.

Source code in tenets/core/prompt/cache.py
Python
def touch(self):
    """Update access time and increment hit count."""
    self.accessed_at = datetime.now()
    self.hit_count += 1

PromptCache

Python
PromptCache(cache_manager: Optional[Any] = None, enable_memory_cache: bool = True, enable_disk_cache: bool = True, memory_cache_size: int = 100)

Intelligent caching for prompt parsing operations.

Initialize prompt cache.

PARAMETERDESCRIPTION
cache_manager

External cache manager to use

TYPE:Optional[Any]DEFAULT:None

enable_memory_cache

Whether to use in-memory caching

TYPE:boolDEFAULT:True

enable_disk_cache

Whether to use disk caching

TYPE:boolDEFAULT:True

memory_cache_size

Maximum items in memory cache

TYPE:intDEFAULT:100

Source code in tenets/core/prompt/cache.py
Python
def __init__(
    self,
    cache_manager: Optional[Any] = None,
    enable_memory_cache: bool = True,
    enable_disk_cache: bool = True,
    memory_cache_size: int = 100,
):
    """Initialize prompt cache.

    Args:
        cache_manager: External cache manager to use
        enable_memory_cache: Whether to use in-memory caching
        enable_disk_cache: Whether to use disk caching
        memory_cache_size: Maximum items in memory cache
    """
    self.logger = get_logger(__name__)
    self.cache_manager = cache_manager if cache_manager and CacheManager else None
    self.enable_memory = enable_memory_cache
    self.enable_disk = enable_disk_cache and self.cache_manager is not None

    # In-memory cache
    self.memory_cache: Dict[str, CacheEntry] = {}
    self.memory_cache_size = memory_cache_size

    # Cache statistics
    self.stats = {
        "hits": 0,
        "misses": 0,
        "evictions": 0,
        "expirations": 0,
    }
Functions
get
Python
get(key: str, check_disk: bool = True) -> Optional[Any]

Get a value from cache.

PARAMETERDESCRIPTION
key

Cache key

TYPE:str

check_disk

Whether to check disk cache if not in memory

TYPE:boolDEFAULT:True

RETURNSDESCRIPTION
Optional[Any]

Cached value or None if not found/expired

Source code in tenets/core/prompt/cache.py
Python
def get(self, key: str, check_disk: bool = True) -> Optional[Any]:
    """Get a value from cache.

    Args:
        key: Cache key
        check_disk: Whether to check disk cache if not in memory

    Returns:
        Cached value or None if not found/expired
    """
    # Check memory cache first
    if self.enable_memory and key in self.memory_cache:
        entry = self.memory_cache[key]

        if entry.is_expired():
            # Remove expired entry
            del self.memory_cache[key]
            self.stats["expirations"] += 1
            self.logger.debug(f"Cache expired for key: {key}")
        else:
            # Update access time
            entry.touch()
            self.stats["hits"] += 1
            self.logger.debug(f"Cache hit for key: {key} (memory)")
            return entry.value

    # Check disk cache if enabled
    if check_disk and self.enable_disk and self.cache_manager:
        disk_value = self.cache_manager.general.get(key)
        if disk_value is not None:
            self.stats["hits"] += 1
            self.logger.debug(f"Cache hit for key: {key} (disk)")

            # Promote to memory cache
            if self.enable_memory:
                self._add_to_memory(
                    key, disk_value, self.DEFAULT_TTLS.get("parsed_prompt", 3600)
                )

            return disk_value

    self.stats["misses"] += 1
    self.logger.debug(f"Cache miss for key: {key}")
    return None
put
Python
put(key: str, value: Any, ttl_seconds: Optional[int] = None, metadata: Optional[Dict[str, Any]] = None, write_disk: bool = True) -> None

Put a value in cache.

PARAMETERDESCRIPTION
key

Cache key

TYPE:str

value

Value to cache

TYPE:Any

ttl_seconds

TTL in seconds (uses default if not specified)

TYPE:Optional[int]DEFAULT:None

metadata

Additional metadata for TTL calculation

TYPE:Optional[Dict[str, Any]]DEFAULT:None

write_disk

Whether to write to disk cache

TYPE:boolDEFAULT:True

Source code in tenets/core/prompt/cache.py
Python
def put(
    self,
    key: str,
    value: Any,
    ttl_seconds: Optional[int] = None,
    metadata: Optional[Dict[str, Any]] = None,
    write_disk: bool = True,
) -> None:
    """Put a value in cache.

    Args:
        key: Cache key
        value: Value to cache
        ttl_seconds: TTL in seconds (uses default if not specified)
        metadata: Additional metadata for TTL calculation
        write_disk: Whether to write to disk cache
    """
    # Use default TTL if not specified
    if ttl_seconds is None:
        ttl_seconds = self.DEFAULT_TTLS.get("parsed_prompt", 3600)

    # Add to memory cache
    if self.enable_memory:
        self._add_to_memory(key, value, ttl_seconds, metadata)

    # Add to disk cache
    if write_disk and self.enable_disk and self.cache_manager:
        self.cache_manager.general.put(key, value, ttl=ttl_seconds, metadata=metadata)
        self.logger.debug(f"Cached to disk: {key} (TTL: {ttl_seconds}s)")
cache_parsed_prompt
Python
cache_parsed_prompt(prompt: str, result: Any, metadata: Optional[Dict[str, Any]] = None) -> None

Cache a parsed prompt result.

PARAMETERDESCRIPTION
prompt

Original prompt text

TYPE:str

result

Parsing result

TYPE:Any

metadata

Additional metadata

TYPE:Optional[Dict[str, Any]]DEFAULT:None

Source code in tenets/core/prompt/cache.py
Python
def cache_parsed_prompt(
    self, prompt: str, result: Any, metadata: Optional[Dict[str, Any]] = None
) -> None:
    """Cache a parsed prompt result.

    Args:
        prompt: Original prompt text
        result: Parsing result
        metadata: Additional metadata
    """
    key = self._generate_key("prompt", prompt)
    ttl = self._calculate_ttl(self.DEFAULT_TTLS["parsed_prompt"], "parsed_prompt", metadata)
    self.put(key, result, ttl, metadata)
get_parsed_prompt
Python
get_parsed_prompt(prompt: str) -> Optional[Any]

Get cached parsed prompt result.

PARAMETERDESCRIPTION
prompt

Original prompt text

TYPE:str

RETURNSDESCRIPTION
Optional[Any]

Cached result or None

Source code in tenets/core/prompt/cache.py
Python
def get_parsed_prompt(self, prompt: str) -> Optional[Any]:
    """Get cached parsed prompt result.

    Args:
        prompt: Original prompt text

    Returns:
        Cached result or None
    """
    key = self._generate_key("prompt", prompt)
    return self.get(key)
cache_external_content
Python
cache_external_content(url: str, content: Any, metadata: Optional[Dict[str, Any]] = None) -> None

Cache external content fetch result.

PARAMETERDESCRIPTION
url

URL that was fetched

TYPE:str

content

Fetched content

TYPE:Any

metadata

Additional metadata (source, state, etc.)

TYPE:Optional[Dict[str, Any]]DEFAULT:None

Source code in tenets/core/prompt/cache.py
Python
def cache_external_content(
    self, url: str, content: Any, metadata: Optional[Dict[str, Any]] = None
) -> None:
    """Cache external content fetch result.

    Args:
        url: URL that was fetched
        content: Fetched content
        metadata: Additional metadata (source, state, etc.)
    """
    key = self._generate_key("external", url)

    # Add URL to metadata
    if metadata is None:
        metadata = {}
    metadata["url"] = url

    ttl = self._calculate_ttl(
        self.DEFAULT_TTLS["external_content"], "external_content", metadata
    )
    self.put(key, content, ttl, metadata)
get_external_content
Python
get_external_content(url: str) -> Optional[Any]

Get cached external content.

PARAMETERDESCRIPTION
url

URL to check

TYPE:str

RETURNSDESCRIPTION
Optional[Any]

Cached content or None

Source code in tenets/core/prompt/cache.py
Python
def get_external_content(self, url: str) -> Optional[Any]:
    """Get cached external content.

    Args:
        url: URL to check

    Returns:
        Cached content or None
    """
    key = self._generate_key("external", url)
    return self.get(key)
cache_entities
Python
cache_entities(text: str, entities: List[Any], confidence: float = 0.0) -> None

Cache entity recognition results.

PARAMETERDESCRIPTION
text

Text that was analyzed

TYPE:str

entities

Recognized entities

TYPE:List[Any]

confidence

Average confidence score

TYPE:floatDEFAULT:0.0

Source code in tenets/core/prompt/cache.py
Python
def cache_entities(self, text: str, entities: List[Any], confidence: float = 0.0) -> None:
    """Cache entity recognition results.

    Args:
        text: Text that was analyzed
        entities: Recognized entities
        confidence: Average confidence score
    """
    key = self._generate_key("entities", text)
    metadata = {"confidence": confidence, "count": len(entities)}
    ttl = self._calculate_ttl(
        self.DEFAULT_TTLS["entity_recognition"], "entity_recognition", metadata
    )
    self.put(key, entities, ttl, metadata)
get_entities
Python
get_entities(text: str) -> Optional[List[Any]]

Get cached entity recognition results.

PARAMETERDESCRIPTION
text

Text to check

TYPE:str

RETURNSDESCRIPTION
Optional[List[Any]]

Cached entities or None

Source code in tenets/core/prompt/cache.py
Python
def get_entities(self, text: str) -> Optional[List[Any]]:
    """Get cached entity recognition results.

    Args:
        text: Text to check

    Returns:
        Cached entities or None
    """
    key = self._generate_key("entities", text)
    return self.get(key)
cache_intent
Python
cache_intent(text: str, intent: Any, confidence: float = 0.0) -> None

Cache intent detection result.

PARAMETERDESCRIPTION
text

Text that was analyzed

TYPE:str

intent

Detected intent

TYPE:Any

confidence

Confidence score

TYPE:floatDEFAULT:0.0

Source code in tenets/core/prompt/cache.py
Python
def cache_intent(self, text: str, intent: Any, confidence: float = 0.0) -> None:
    """Cache intent detection result.

    Args:
        text: Text that was analyzed
        intent: Detected intent
        confidence: Confidence score
    """
    key = self._generate_key("intent", text)
    metadata = {"confidence": confidence}
    ttl = self._calculate_ttl(
        self.DEFAULT_TTLS["intent_detection"], "intent_detection", metadata
    )
    self.put(key, intent, ttl, metadata)
get_intent
Python
get_intent(text: str) -> Optional[Any]

Get cached intent detection result.

PARAMETERDESCRIPTION
text

Text to check

TYPE:str

RETURNSDESCRIPTION
Optional[Any]

Cached intent or None

Source code in tenets/core/prompt/cache.py
Python
def get_intent(self, text: str) -> Optional[Any]:
    """Get cached intent detection result.

    Args:
        text: Text to check

    Returns:
        Cached intent or None
    """
    key = self._generate_key("intent", text)
    return self.get(key)
invalidate
Python
invalidate(pattern: str) -> int

Invalidate cache entries matching a pattern.

PARAMETERDESCRIPTION
pattern

Key pattern to match (prefix)

TYPE:str

RETURNSDESCRIPTION
int

Number of entries invalidated

Source code in tenets/core/prompt/cache.py
Python
def invalidate(self, pattern: str) -> int:
    """Invalidate cache entries matching a pattern.

    Args:
        pattern: Key pattern to match (prefix)

    Returns:
        Number of entries invalidated
    """
    count = 0

    # Invalidate memory cache
    if self.enable_memory:
        keys_to_remove = [k for k in self.memory_cache.keys() if k.startswith(pattern)]
        for key in keys_to_remove:
            del self.memory_cache[key]
            count += 1

        if count > 0:
            self.logger.info(f"Invalidated {count} memory cache entries matching: {pattern}")

    # Invalidate disk cache
    if self.enable_disk and self.cache_manager:
        # Note: This assumes the cache manager supports pattern-based deletion
        # If not, we'd need to iterate through all keys
        pass

    return count
clear_all
Python
clear_all() -> None

Clear all cache entries.

Source code in tenets/core/prompt/cache.py
Python
def clear_all(self) -> None:
    """Clear all cache entries."""
    # Clear memory cache
    if self.enable_memory:
        self.memory_cache.clear()
        self.logger.info("Cleared memory cache")

    # Clear disk cache
    if self.enable_disk and self.cache_manager:
        self.cache_manager.general.clear()
        self.logger.info("Cleared disk cache")

    # Reset statistics
    self.stats = {
        "hits": 0,
        "misses": 0,
        "evictions": 0,
        "expirations": 0,
    }
cleanup_expired
Python
cleanup_expired() -> int

Remove expired entries from cache.

RETURNSDESCRIPTION
int

Number of entries removed

Source code in tenets/core/prompt/cache.py
Python
def cleanup_expired(self) -> int:
    """Remove expired entries from cache.

    Returns:
        Number of entries removed
    """
    count = 0

    if self.enable_memory:
        expired_keys = [k for k, v in self.memory_cache.items() if v.is_expired()]
        for key in expired_keys:
            del self.memory_cache[key]
            count += 1

        if count > 0:
            self.logger.info(f"Cleaned up {count} expired cache entries")

    return count
get_stats
Python
get_stats() -> Dict[str, Any]

Get cache statistics.

RETURNSDESCRIPTION
Dict[str, Any]

Cache statistics dictionary

Source code in tenets/core/prompt/cache.py
Python
def get_stats(self) -> Dict[str, Any]:
    """Get cache statistics.

    Returns:
        Cache statistics dictionary
    """
    total_requests = self.stats["hits"] + self.stats["misses"]
    hit_rate = self.stats["hits"] / total_requests if total_requests > 0 else 0

    return {
        "hits": self.stats["hits"],
        "misses": self.stats["misses"],
        "hit_rate": hit_rate,
        "evictions": self.stats["evictions"],
        "expirations": self.stats["expirations"],
        "memory_entries": len(self.memory_cache) if self.enable_memory else 0,
        "memory_size": (
            sum(len(str(e.value)) for e in self.memory_cache.values())
            if self.enable_memory
            else 0
        ),
    }
warm_cache
Python
warm_cache(common_prompts: List[str]) -> None

Pre-warm cache with common prompts.

PARAMETERDESCRIPTION
common_prompts

List of common prompts to pre-cache

TYPE:List[str]

Source code in tenets/core/prompt/cache.py
Python
def warm_cache(self, common_prompts: List[str]) -> None:
    """Pre-warm cache with common prompts.

    Args:
        common_prompts: List of common prompts to pre-cache
    """
    # This would be called during initialization to pre-populate
    # the cache with commonly used prompts
    pass

Functions