Skip to content

context

Full name: tenets.models.context

context

Context models for prompt processing and result handling.

This module defines the data structures for managing context throughout the distillation and instillation process.

Classes

TaskType

Bases: Enum

Types of tasks detected in prompts.

Functions
from_stringclassmethod
Python
from_string(value: str) -> TaskType

Create TaskType from string value.

Source code in tenets/models/context.py
Python
@classmethod
def from_string(cls, value: str) -> "TaskType":
    """Create TaskType from string value."""
    try:
        return cls(value.lower())
    except ValueError:
        return cls.GENERAL

PromptContextdataclass

Python
PromptContext(text: str, original: Optional[str] = None, keywords: list[str] = list(), task_type: str = 'general', intent: str = 'understand', entities: list[dict[str, Any]] = list(), file_patterns: list[str] = list(), focus_areas: list[str] = list(), temporal_context: Optional[dict[str, Any]] = None, scope: dict[str, Any] = dict(), external_context: Optional[dict[str, Any]] = None, metadata: dict[str, Any] = dict(), confidence_scores: dict[str, float] = dict(), session_id: Optional[str] = None, timestamp: datetime = datetime.now(), include_tests: bool = False)

Context extracted from user prompt.

Contains all information parsed from the prompt to guide file selection and ranking. This is the primary data structure that flows through the system after prompt parsing.

ATTRIBUTEDESCRIPTION
text

The processed prompt text (cleaned and normalized)

TYPE:str

original

Original input (may be URL or raw text)

TYPE:Optional[str]

keywords

Extracted keywords for searching

TYPE:list[str]

task_type

Type of task detected

TYPE:str

intent

User intent classification

TYPE:str

entities

Named entities found (classes, functions, modules)

TYPE:list[dict[str, Any]]

file_patterns

File patterns to match (.py, test_, etc)

TYPE:list[str]

focus_areas

Areas to focus on (auth, api, database, etc)

TYPE:list[str]

temporal_context

Time-related context (recent, yesterday, etc)

TYPE:Optional[dict[str, Any]]

scope

Scope indicators (modules, directories, exclusions)

TYPE:dict[str, Any]

external_context

Context from external sources (GitHub, JIRA)

TYPE:Optional[dict[str, Any]]

metadata

Additional metadata for processing

TYPE:dict[str, Any]

confidence_scores

Confidence scores for various extractions

TYPE:dict[str, float]

session_id

Associated session if any

TYPE:Optional[str]

timestamp

When context was created

TYPE:datetime

Functions
add_keyword
Python
add_keyword(keyword: str, confidence: float = 1.0) -> None

Add a keyword with confidence score.

Source code in tenets/models/context.py
Python
def add_keyword(self, keyword: str, confidence: float = 1.0) -> None:
    """Add a keyword with confidence score."""
    if keyword and keyword.lower() not in [k.lower() for k in self.keywords]:
        self.keywords.append(keyword)
        self.confidence_scores[f"keyword_{keyword}"] = confidence
add_entity
Python
add_entity(name: str, entity_type: str, confidence: float = 1.0) -> None

Add an entity with type and confidence.

Source code in tenets/models/context.py
Python
def add_entity(self, name: str, entity_type: str, confidence: float = 1.0) -> None:
    """Add an entity with type and confidence."""
    self.entities.append({"name": name, "type": entity_type, "confidence": confidence})
add_focus_area
Python
add_focus_area(area: str) -> None

Add a focus area if not already present.

Source code in tenets/models/context.py
Python
def add_focus_area(self, area: str) -> None:
    """Add a focus area if not already present."""
    if area and area not in self.focus_areas:
        self.focus_areas.append(area)
merge_with
Python
merge_with(other: PromptContext) -> PromptContext

Merge this context with another.

Source code in tenets/models/context.py
Python
def merge_with(self, other: "PromptContext") -> "PromptContext":
    """Merge this context with another."""
    # Merge keywords
    for kw in other.keywords:
        self.add_keyword(kw)

    # Merge entities
    self.entities.extend(other.entities)

    # Merge file patterns
    self.file_patterns.extend(
        [fp for fp in other.file_patterns if fp not in self.file_patterns]
    )

    # Merge focus areas
    for area in other.focus_areas:
        self.add_focus_area(area)

    # Merge metadata
    self.metadata.update(other.metadata)

    return self
to_dict
Python
to_dict() -> dict[str, Any]

Convert to dictionary representation.

Source code in tenets/models/context.py
Python
def to_dict(self) -> dict[str, Any]:
    """Convert to dictionary representation."""
    return {
        "text": self.text,
        "original": self.original,
        "keywords": self.keywords,
        "task_type": self.task_type,
        "intent": self.intent,
        "entities": self.entities,
        "file_patterns": self.file_patterns,
        "focus_areas": self.focus_areas,
        "temporal_context": self.temporal_context,
        "scope": self.scope,
        "external_context": self.external_context,
        "metadata": self.metadata,
        "confidence_scores": self.confidence_scores,
        "session_id": self.session_id,
        "timestamp": self.timestamp.isoformat(),
    }
from_dictclassmethod
Python
from_dict(data: dict[str, Any]) -> PromptContext

Create PromptContext from dictionary.

Source code in tenets/models/context.py
Python
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "PromptContext":
    """Create PromptContext from dictionary."""
    if "timestamp" in data and isinstance(data["timestamp"], str):
        data["timestamp"] = datetime.fromisoformat(data["timestamp"])
    return cls(**data)
get_hash
Python
get_hash() -> str

Compute a deterministic cache key for this prompt context.

The hash incorporates the normalized prompt text, task type, and the ordered list of unique keywords. MD5 is chosen (with usedforsecurity=False) for speed; collision risk is acceptable for internal memoization.

RETURNSDESCRIPTION
str

Hex digest suitable for use as an internal cache key.

TYPE:str

Source code in tenets/models/context.py
Python
def get_hash(self) -> str:
    """Compute a deterministic cache key for this prompt context.

    The hash incorporates the normalized prompt text, task type, and the
    ordered list of unique keywords. MD5 is chosen (with
    ``usedforsecurity=False``) for speed; collision risk is acceptable for
    internal memoization.

    Returns:
        str: Hex digest suitable for use as an internal cache key.
    """
    key_data = f"{self.text}_{self.task_type}_{sorted(self.keywords)}"
    # nosec B324 - MD5 used only for non-security cache key generation
    return hashlib.md5(key_data.encode(), usedforsecurity=False).hexdigest()  # nosec

ContextResultdataclass

Python
ContextResult(content: Optional[str] = None, context: Optional[str] = None, format: str = 'markdown', token_count: int = 0, files: list[str] = list(), files_included: list[str] = list(), files_summarized: list[str] = list(), metadata: dict[str, Any] = dict(), session_id: Optional[str] = None, timestamp: datetime = datetime.now(), statistics: dict[str, Any] = dict(), prompt_context: Optional[PromptContext] = None, cost_estimate: Optional[dict[str, float]] = None, warnings: list[str] = list(), errors: list[str] = list())

Result of context generation.

Contains the generated context ready for consumption by LLMs or other tools. This is the final output of the distillation process.

ATTRIBUTEDESCRIPTION
content

The generated context content (preferred alias)

TYPE:Optional[str]

context

Backward-compatible alias for content

TYPE:Optional[str]

format

Output format (markdown, xml, json)

TYPE:str

token_count

Number of tokens in context

TYPE:int

files

List of included file paths (preferred alias)

TYPE:list[str]

files_included

Backward-compatible alias for files

TYPE:list[str]

files_summarized

List of summarized file paths

TYPE:list[str]

metadata

Additional metadata about generation, including: - timing: Dict with duration info (if timing enabled) - duration: float seconds - formatted_duration: Human-readable string (e.g. "2.34s") - start_datetime: ISO format start time - end_datetime: ISO format end time

TYPE:dict[str, Any]

session_id

Session this belongs to

TYPE:Optional[str]

timestamp

When context was generated

TYPE:datetime

statistics

Generation statistics

TYPE:dict[str, Any]

prompt_context

Original prompt context

TYPE:Optional[PromptContext]

cost_estimate

Estimated cost for LLM usage

TYPE:Optional[dict[str, float]]

warnings

Any warnings during generation

TYPE:list[str]

errors

Any errors during generation

TYPE:list[str]

Functions
add_warning
Python
add_warning(warning: str) -> None

Add a warning message.

Source code in tenets/models/context.py
Python
def add_warning(self, warning: str) -> None:
    """Add a warning message."""
    self.warnings.append(warning)
    self.statistics["has_warnings"] = True
add_error
Python
add_error(error: str) -> None

Add an error message.

Source code in tenets/models/context.py
Python
def add_error(self, error: str) -> None:
    """Add an error message."""
    self.errors.append(error)
    self.statistics["has_errors"] = True
update_statistics
Python
update_statistics(key: str, value: Any) -> None

Update a statistic value.

Source code in tenets/models/context.py
Python
def update_statistics(self, key: str, value: Any) -> None:
    """Update a statistic value."""
    self.statistics[key] = value
to_dict
Python
to_dict() -> dict[str, Any]

Convert to dictionary representation.

Source code in tenets/models/context.py
Python
def to_dict(self) -> dict[str, Any]:
    """Convert to dictionary representation."""
    data = {
        # Prefer normalized keys expected by tests
        "content": self.content,
        "format": self.format,
        "token_count": self.token_count,
        "files": list(self.files),
        # Include legacy keys for backward compatibility
        "context": self.context,
        "files_included": list(self.files_included),
        "files_summarized": list(self.files_summarized),
        "metadata": self.metadata,
        "session_id": self.session_id,
        "timestamp": self.timestamp.isoformat(),
        "statistics": self.statistics,
        "cost_estimate": self.cost_estimate,
        "warnings": self.warnings,
        "errors": self.errors,
    }

    if self.prompt_context:
        data["prompt_context"] = self.prompt_context.to_dict()

    return data
from_dictclassmethod
Python
from_dict(data: dict[str, Any]) -> ContextResult

Create from dictionary.

Source code in tenets/models/context.py
Python
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "ContextResult":
    """Create from dictionary."""
    if "timestamp" in data and isinstance(data["timestamp"], str):
        data["timestamp"] = datetime.fromisoformat(data["timestamp"])

    if "prompt_context" in data and isinstance(data["prompt_context"], dict):
        data["prompt_context"] = PromptContext.from_dict(data["prompt_context"])

    # Normalize alias keys on load
    if "context" in data and "content" not in data:
        data["content"] = data["context"]
    if "files_included" in data and "files" not in data:
        data["files"] = data["files_included"]

    return cls(**data)
save_to_file
Python
save_to_file(path: Union[str, Path]) -> None

Save context result to file.

Source code in tenets/models/context.py
Python
def save_to_file(self, path: Union[str, Path]) -> None:
    """Save context result to file."""
    path = Path(path)

    if self.format == "json":
        with path.open("w") as f:
            json.dump(self.to_dict(), f, indent=2, default=str)
    else:
        with path.open("w") as f:
            f.write(self.content or "")
get_summary
Python
get_summary() -> str

Get a summary of the context result.

Source code in tenets/models/context.py
Python
def get_summary(self) -> str:
    """Get a summary of the context result."""
    lines = [
        "Context Result Summary:",
        f"  Format: {self.format}",
        f"  Token Count: {self.token_count:,}",
        f"  Files Included: {len(self.files_included)}",
        f"  Files Summarized: {len(self.files_summarized)}",
    ]

    if self.cost_estimate:
        lines.append(f"  Estimated Cost: ${self.cost_estimate.get('total_cost', 0):.4f}")

    if self.warnings:
        lines.append(f"  Warnings: {len(self.warnings)}")

    if self.errors:
        lines.append(f"  Errors: {len(self.errors)}")

    return "\n".join(lines)

SessionContextdataclass

Python
SessionContext(session_id: str, name: str = '', project_root: Optional[Path] = None, shown_files: set[str] = set(), ignored_files: set[str] = set(), context_history: list[ContextResult] = list(), current_focus: list[str] = list(), tenets_applied: list[str] = list(), created_at: datetime = datetime.now(), updated_at: datetime = datetime.now(), metadata: dict[str, Any] = dict(), ai_requests: list[dict[str, Any]] = list(), branch: Optional[str] = None, pinned_files: set[str] = set())

Context for a session.

Maintains state across multiple prompts in a session for incremental context building and state management.

ATTRIBUTEDESCRIPTION
session_id

Unique session identifier

TYPE:str

name

Human-readable session name

TYPE:str

project_root

Root path of the project

TYPE:Optional[Path]

shown_files

Files explicitly shown

TYPE:set[str]

ignored_files

Files to ignore

TYPE:set[str]

context_history

History of contexts

TYPE:list[ContextResult]

current_focus

Current focus areas

TYPE:list[str]

tenets_applied

Tenets applied in session

TYPE:list[str]

created_at

When session was created

TYPE:datetime

updated_at

Last update time

TYPE:datetime

metadata

Session metadata

TYPE:dict[str, Any]

ai_requests

History of AI requests

TYPE:list[dict[str, Any]]

branch

Git branch if applicable

TYPE:Optional[str]

Functions
add_shown_file
Python
add_shown_file(file_path: str) -> None

Mark file as shown.

Source code in tenets/models/context.py
Python
def add_shown_file(self, file_path: str) -> None:
    """Mark file as shown."""
    self.shown_files.add(file_path)
    if file_path in self.ignored_files:
        self.ignored_files.remove(file_path)
    self.updated_at = datetime.now()
add_ignored_file
Python
add_ignored_file(file_path: str) -> None

Mark file as ignored.

Source code in tenets/models/context.py
Python
def add_ignored_file(self, file_path: str) -> None:
    """Mark file as ignored."""
    self.ignored_files.add(file_path)
    if file_path in self.shown_files:
        self.shown_files.remove(file_path)
    self.updated_at = datetime.now()
add_context
Python
add_context(context: ContextResult) -> None

Add context to history.

Source code in tenets/models/context.py
Python
def add_context(self, context: ContextResult) -> None:
    """Add context to history."""
    self.context_history.append(context)
    context.session_id = self.session_id
    self.updated_at = datetime.now()
add_ai_request
Python
add_ai_request(request_type: str, request_data: dict[str, Any]) -> None

Record an AI request.

Source code in tenets/models/context.py
Python
def add_ai_request(self, request_type: str, request_data: dict[str, Any]) -> None:
    """Record an AI request."""
    self.ai_requests.append(
        {"type": request_type, "data": request_data, "timestamp": datetime.now().isoformat()}
    )
    self.updated_at = datetime.now()
add_pinned_file
Python
add_pinned_file(file_path: str) -> None

Pin a file so it is always considered for future distill operations.

PARAMETERDESCRIPTION
file_path

Absolute or project-relative path to the file.

TYPE:str

Source code in tenets/models/context.py
Python
def add_pinned_file(self, file_path: str) -> None:
    """Pin a file so it is always considered for future distill operations.

    Args:
        file_path: Absolute or project-relative path to the file.
    """
    self.pinned_files.add(file_path)
    self.updated_at = datetime.now()
list_pinned_files
Python
list_pinned_files() -> list[str]

Return pinned file paths.

Source code in tenets/models/context.py
Python
def list_pinned_files(self) -> list[str]:
    """Return pinned file paths."""
    return sorted(self.pinned_files)
get_latest_context
Python
get_latest_context() -> Optional[ContextResult]

Get the most recent context.

Source code in tenets/models/context.py
Python
def get_latest_context(self) -> Optional[ContextResult]:
    """Get the most recent context."""
    return self.context_history[-1] if self.context_history else None
should_show_file
Python
should_show_file(file_path: str) -> bool

Check if file should be shown based on session state.

Source code in tenets/models/context.py
Python
def should_show_file(self, file_path: str) -> bool:
    """Check if file should be shown based on session state."""
    if file_path in self.ignored_files:
        return False
    if file_path in self.shown_files:
        return True
    return None  # No preference
to_dict
Python
to_dict() -> dict[str, Any]

Convert to dictionary representation.

Source code in tenets/models/context.py
Python
def to_dict(self) -> dict[str, Any]:
    """Convert to dictionary representation."""
    return {
        "session_id": self.session_id,
        "name": self.name,
        "project_root": str(self.project_root) if self.project_root else None,
        "shown_files": list(self.shown_files),
        "ignored_files": list(self.ignored_files),
        "context_history": [c.to_dict() for c in self.context_history],
        "current_focus": self.current_focus,
        "tenets_applied": self.tenets_applied,
        "created_at": self.created_at.isoformat(),
        "updated_at": self.updated_at.isoformat(),
        "metadata": self.metadata,
        "ai_requests": self.ai_requests,
        "branch": self.branch,
    }
from_dictclassmethod
Python
from_dict(data: dict[str, Any]) -> SessionContext

Create from dictionary.

Source code in tenets/models/context.py
Python
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "SessionContext":
    """Create from dictionary."""
    if "created_at" in data and isinstance(data["created_at"], str):
        data["created_at"] = datetime.fromisoformat(data["created_at"])
    if "updated_at" in data and isinstance(data["updated_at"], str):
        data["updated_at"] = datetime.fromisoformat(data["updated_at"])

    if "shown_files" in data:
        data["shown_files"] = set(data["shown_files"])
    if "ignored_files" in data:
        data["ignored_files"] = set(data["ignored_files"])

    if "context_history" in data:
        data["context_history"] = [
            ContextResult.from_dict(c) if isinstance(c, dict) else c
            for c in data["context_history"]
        ]

    if data.get("project_root"):
        data["project_root"] = Path(data["project_root"])

    return cls(**data)