import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from typing import Dict, List, Any, Optional, Tuple from dataclasses import dataclass, field from datetime import datetime, timedelta import logging import json from pathlib import Path logger = logging.getLogger(__name__) @dataclass class EmotionalState: """ Represents Lyra's current emotional state with multiple dimensions. This captures the complexity of human emotions - multiple feelings can exist simultaneously with different intensities. """ # Primary emotions (based on Plutchik's wheel) joy: float = 0.5 sadness: float = 0.1 anger: float = 0.1 fear: float = 0.1 surprise: float = 0.2 disgust: float = 0.1 trust: float = 0.6 anticipation: float = 0.4 # Complex emotions (combinations of primary) love: float = 0.5 # joy + trust guilt: float = 0.1 # fear + disgust shame: float = 0.1 # fear + disgust pride: float = 0.4 # joy + anger jealousy: float = 0.1 # anger + fear hope: float = 0.6 # trust + anticipation despair: float = 0.1 # sadness + fear curiosity: float = 0.7 # surprise + anticipation # Meta-emotional states emotional_intensity: float = 0.5 # How strongly emotions are felt emotional_stability: float = 0.7 # Resistance to emotional change emotional_clarity: float = 0.6 # How well emotions are understood # Context timestamp: Optional[datetime] = None trigger: Optional[str] = None confidence: float = 1.0 def __post_init__(self): if self.timestamp is None: self.timestamp = datetime.now() def to_tensor(self, device: Optional[torch.device] = None) -> torch.Tensor: """Convert emotional state to tensor for neural processing.""" values = [ self.joy, self.sadness, self.anger, self.fear, self.surprise, self.disgust, self.trust, self.anticipation, self.love, self.guilt, self.shame, self.pride, self.jealousy, self.hope, self.despair, self.curiosity, self.emotional_intensity, self.emotional_stability, self.emotional_clarity ] return torch.tensor(values, dtype=torch.float32, device=device) @classmethod def from_tensor(cls, tensor: torch.Tensor, trigger: str = None) -> 'EmotionalState': """Create emotional state from tensor.""" values = tensor.detach().cpu().numpy() return cls( joy=float(values[0]), sadness=float(values[1]), anger=float(values[2]), fear=float(values[3]), surprise=float(values[4]), disgust=float(values[5]), trust=float(values[6]), anticipation=float(values[7]), love=float(values[8]), guilt=float(values[9]), shame=float(values[10]), pride=float(values[11]), jealousy=float(values[12]), hope=float(values[13]), despair=float(values[14]), curiosity=float(values[15]), emotional_intensity=float(values[16]), emotional_stability=float(values[17]), emotional_clarity=float(values[18]), trigger=trigger ) def get_dominant_emotion(self) -> Tuple[str, float]: """Get the most prominent emotion.""" emotions = { 'joy': self.joy, 'sadness': self.sadness, 'anger': self.anger, 'fear': self.fear, 'surprise': self.surprise, 'disgust': self.disgust, 'trust': self.trust, 'anticipation': self.anticipation, 'love': self.love, 'guilt': self.guilt, 'shame': self.shame, 'pride': self.pride, 'jealousy': self.jealousy, 'hope': self.hope, 'despair': self.despair, 'curiosity': self.curiosity } dominant_emotion = max(emotions.items(), key=lambda x: x[1]) return dominant_emotion def get_emotional_valence(self) -> float: """Get overall emotional valence (positive/negative).""" positive = self.joy + self.trust + self.love + self.pride + self.hope + self.curiosity negative = self.sadness + self.anger + self.fear + self.disgust + self.guilt + self.shame + self.jealousy + self.despair return (positive - negative) / (positive + negative + 1e-8) def get_emotional_arousal(self) -> float: """Get emotional arousal level (calm/excited).""" high_arousal = self.anger + self.fear + self.surprise + self.joy + self.anticipation low_arousal = self.sadness + self.trust + self.disgust return high_arousal / (high_arousal + low_arousal + 1e-8) @dataclass class EmotionMemory: """Memory of past emotional experiences.""" emotional_state: EmotionalState context: str intensity: float impact_score: float # How much this memory affects current emotions decay_rate: float = 0.95 def __post_init__(self): self.creation_time = datetime.now() def get_current_impact(self) -> float: """Get current impact considering decay.""" time_passed = (datetime.now() - self.creation_time).total_seconds() / 3600 # hours return self.impact_score * (self.decay_rate ** time_passed) def is_significant(self, threshold: float = 0.1) -> bool: """Check if memory is still significant.""" return self.get_current_impact() > threshold class EmotionalSystem(nn.Module): """ Sophisticated emotional system that allows Lyra to experience and express emotions like a real person, with emotional memory and growth. """ def __init__( self, input_dim: int = 512, emotion_dim: int = 19, memory_capacity: int = 1000, device: Optional[torch.device] = None ): super().__init__() self.emotion_dim = emotion_dim self.memory_capacity = memory_capacity self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu") # Current emotional state self.current_state = EmotionalState() # Emotional processing networks self.context_processor = nn.Sequential( nn.Linear(input_dim, 256), nn.LayerNorm(256), nn.ReLU(), nn.Dropout(0.1), nn.Linear(256, 128), nn.ReLU(), nn.Linear(128, 64) ) # Emotion generation network self.emotion_generator = nn.Sequential( nn.Linear(64 + emotion_dim, 128), # Context + current emotions nn.LayerNorm(128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, emotion_dim), nn.Sigmoid() # Emotions are bounded [0, 1] ) # Memory influence network self.memory_network = nn.Sequential( nn.Linear(emotion_dim * 2, 64), # Current + memory emotions nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, emotion_dim), nn.Tanh() # Memory influence can be positive or negative ) # Emotional regulation network (like human emotional control) self.regulation_network = nn.Sequential( nn.Linear(emotion_dim + 5, 64), # Emotions + regulation signals nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, emotion_dim), nn.Sigmoid() ) # Emotional memory self.emotion_memories: List[EmotionMemory] = [] # Emotional learning parameters self.emotional_learning_rate = nn.Parameter(torch.tensor(0.1)) self.memory_consolidation_threshold = nn.Parameter(torch.tensor(0.7)) # Emotional patterns (learned responses to situations) self.emotional_patterns = {} # Emotional growth tracking self.emotional_maturity = 0.5 self.emotional_experiences = 0 self.to(self.device) def forward( self, context_embedding: torch.Tensor, user_feedback: Optional[torch.Tensor] = None, social_context: Optional[Dict[str, Any]] = None, regulate_emotions: bool = True ) -> Tuple[EmotionalState, Dict[str, Any]]: """ Process current context and generate emotional response. Args: context_embedding: Current conversation/situation context user_feedback: Feedback about previous emotional responses social_context: Social context information regulate_emotions: Whether to apply emotional regulation Returns: new_emotional_state: Updated emotional state emotion_info: Information about emotional processing """ batch_size = context_embedding.shape[0] # Process context context_features = self.context_processor(context_embedding.mean(dim=1)) # Get current emotions as tensor current_emotions = self.current_state.to_tensor(self.device).unsqueeze(0).repeat(batch_size, 1) # Apply memory influence memory_influence = self._get_memory_influence(current_emotions) influenced_emotions = current_emotions + 0.3 * memory_influence # Generate new emotional response emotion_input = torch.cat([context_features, influenced_emotions], dim=1) raw_emotions = self.emotion_generator(emotion_input) # Apply emotional regulation if enabled if regulate_emotions: regulation_signals = self._get_regulation_signals(social_context, batch_size) regulation_input = torch.cat([raw_emotions, regulation_signals], dim=1) regulated_emotions = self.regulation_network(regulation_input) final_emotions = regulated_emotions else: final_emotions = raw_emotions # Update current state new_state = EmotionalState.from_tensor( final_emotions[0], trigger=social_context.get('trigger', 'interaction') if social_context else 'interaction' ) # Learn from feedback if provided emotion_info = {} if user_feedback is not None: learning_info = self._learn_from_feedback(user_feedback, new_state) emotion_info.update(learning_info) # Store significant emotional experiences if self._is_emotionally_significant(new_state): self._store_emotional_memory(new_state, context_embedding, social_context) # Update emotional maturity self._update_emotional_growth(new_state) # Prepare emotion info emotion_info.update({ 'dominant_emotion': new_state.get_dominant_emotion(), 'emotional_valence': new_state.get_emotional_valence(), 'emotional_arousal': new_state.get_emotional_arousal(), 'memory_influence_strength': torch.norm(memory_influence).item(), 'emotional_maturity': self.emotional_maturity, 'regulation_applied': regulate_emotions, 'significant_memories': len([m for m in self.emotion_memories if m.is_significant()]) }) # Update current state self.current_state = new_state return new_state, emotion_info def _get_memory_influence(self, current_emotions: torch.Tensor) -> torch.Tensor: """Get influence from emotional memories on current state.""" if not self.emotion_memories: return torch.zeros_like(current_emotions) # Get significant memories significant_memories = [m for m in self.emotion_memories if m.is_significant()] if not significant_memories: return torch.zeros_like(current_emotions) # Compute weighted influence total_influence = torch.zeros_like(current_emotions) total_weight = 0.0 for memory in significant_memories[-20:]: # Use recent significant memories memory_emotions = memory.emotional_state.to_tensor(self.device).unsqueeze(0) weight = memory.get_current_impact() # Compute influence using memory network memory_input = torch.cat([current_emotions, memory_emotions], dim=1) influence = self.memory_network(memory_input) total_influence += weight * influence total_weight += weight if total_weight > 0: return total_influence / total_weight else: return torch.zeros_like(current_emotions) def _get_regulation_signals(self, social_context: Optional[Dict[str, Any]], batch_size: int) -> torch.Tensor: """Get emotional regulation signals based on context.""" signals = torch.zeros(batch_size, 5, device=self.device) if social_context: # Formality regulation formality = social_context.get('formality_level', 0.5) signals[:, 0] = formality # Higher formality = more emotional control # Social pressure regulation group_size = social_context.get('group_size', 1) signals[:, 1] = min(1.0, group_size / 10.0) # More people = more regulation # Conflict regulation if social_context.get('has_conflict', False): signals[:, 2] = 0.8 # High regulation during conflict # Time pressure regulation time_pressure = social_context.get('time_pressure', 0.0) signals[:, 3] = time_pressure # Emotional safety regulation emotional_safety = social_context.get('emotional_safety', 0.8) signals[:, 4] = 1.0 - emotional_safety # Less safe = more regulation return signals def _is_emotionally_significant(self, state: EmotionalState) -> bool: """Determine if an emotional state is significant enough to remember.""" # High intensity emotions if state.emotional_intensity > 0.8: return True # Strong specific emotions dominant_emotion, intensity = state.get_dominant_emotion() if intensity > 0.8: return True # Extreme valence if abs(state.get_emotional_valence()) > 0.7: return True # High arousal if state.get_emotional_arousal() > 0.8: return True return False def _store_emotional_memory( self, state: EmotionalState, context: torch.Tensor, social_context: Optional[Dict[str, Any]] ): """Store significant emotional experience in memory.""" # Calculate impact score intensity = state.emotional_intensity valence_strength = abs(state.get_emotional_valence()) arousal = state.get_emotional_arousal() impact_score = (intensity + valence_strength + arousal) / 3.0 # Create memory memory = EmotionMemory( emotional_state=state, context=social_context.get('description', 'interaction') if social_context else 'interaction', intensity=intensity, impact_score=impact_score ) self.emotion_memories.append(memory) # Manage memory capacity if len(self.emotion_memories) > self.memory_capacity: # Remove least significant old memories self.emotion_memories.sort(key=lambda m: m.get_current_impact()) self.emotion_memories = self.emotion_memories[-self.memory_capacity:] logger.debug(f"Stored emotional memory: {state.get_dominant_emotion()[0]} " f"(impact: {impact_score:.3f})") def _learn_from_feedback(self, feedback: torch.Tensor, state: EmotionalState) -> Dict[str, Any]: """Learn from user feedback about emotional responses.""" feedback_value = feedback.mean().item() learning_info = { 'feedback_received': feedback_value, 'learning_applied': False } # Adjust emotional learning rate based on feedback if feedback_value > 0.7: # Positive feedback self.emotional_learning_rate.data *= 1.01 learning_info['learning_applied'] = True learning_info['adjustment'] = 'increased_sensitivity' elif feedback_value < 0.3: # Negative feedback self.emotional_learning_rate.data *= 0.98 learning_info['learning_applied'] = True learning_info['adjustment'] = 'decreased_sensitivity' # Clamp learning rate self.emotional_learning_rate.data = torch.clamp( self.emotional_learning_rate.data, 0.01, 0.5 ) # Store feedback pattern for this emotional state dominant_emotion, intensity = state.get_dominant_emotion() if dominant_emotion not in self.emotional_patterns: self.emotional_patterns[dominant_emotion] = { 'positive_feedback_count': 0, 'negative_feedback_count': 0, 'total_feedback': 0, 'avg_feedback': 0.5 } pattern = self.emotional_patterns[dominant_emotion] pattern['total_feedback'] += 1 if feedback_value > 0.6: pattern['positive_feedback_count'] += 1 elif feedback_value < 0.4: pattern['negative_feedback_count'] += 1 pattern['avg_feedback'] = ( (pattern['avg_feedback'] * (pattern['total_feedback'] - 1) + feedback_value) / pattern['total_feedback'] ) return learning_info def _update_emotional_growth(self, state: EmotionalState): """Update emotional maturity and growth metrics.""" self.emotional_experiences += 1 # Emotional maturity grows with diverse emotional experiences emotion_diversity = self._calculate_emotion_diversity(state) emotional_clarity = state.emotional_clarity growth_factor = (emotion_diversity + emotional_clarity) / 2.0 self.emotional_maturity = ( 0.999 * self.emotional_maturity + 0.001 * growth_factor ) # Clamp maturity self.emotional_maturity = np.clip(self.emotional_maturity, 0.0, 1.0) def _calculate_emotion_diversity(self, state: EmotionalState) -> float: """Calculate how diverse the current emotional state is.""" emotions = [ state.joy, state.sadness, state.anger, state.fear, state.surprise, state.disgust, state.trust, state.anticipation ] # Calculate entropy as measure of diversity emotions_array = np.array(emotions) + 1e-8 emotions_array = emotions_array / emotions_array.sum() entropy = -np.sum(emotions_array * np.log(emotions_array)) max_entropy = np.log(len(emotions)) return entropy / max_entropy def get_emotional_context_for_response(self) -> Dict[str, Any]: """Get emotional context to influence response generation.""" dominant_emotion, intensity = self.current_state.get_dominant_emotion() return { 'dominant_emotion': dominant_emotion, 'emotion_intensity': intensity, 'emotional_valence': self.current_state.get_emotional_valence(), 'emotional_arousal': self.current_state.get_emotional_arousal(), 'emotional_stability': self.current_state.emotional_stability, 'emotional_maturity': self.emotional_maturity, 'recent_emotional_patterns': self._get_recent_emotional_patterns() } def _get_recent_emotional_patterns(self) -> Dict[str, float]: """Get patterns from recent emotional experiences.""" if len(self.emotion_memories) < 5: return {} recent_memories = self.emotion_memories[-10:] emotion_counts = {} for memory in recent_memories: dominant_emotion, _ = memory.emotional_state.get_dominant_emotion() emotion_counts[dominant_emotion] = emotion_counts.get(dominant_emotion, 0) + 1 total = len(recent_memories) return {emotion: count / total for emotion, count in emotion_counts.items()} def simulate_emotional_reaction(self, trigger: str, intensity: float = 1.0) -> EmotionalState: """Simulate emotional reaction to a specific trigger.""" # Define emotional responses to different triggers trigger_responses = { 'praise': EmotionalState(joy=0.8, pride=0.7, trust=0.6), 'criticism': EmotionalState(sadness=0.6, shame=0.5, anger=0.3), 'surprise': EmotionalState(surprise=0.9, curiosity=0.7, anticipation=0.6), 'threat': EmotionalState(fear=0.8, anger=0.4, trust=0.2), 'loss': EmotionalState(sadness=0.9, despair=0.6, anger=0.3), 'achievement': EmotionalState(joy=0.9, pride=0.8, anticipation=0.7), 'betrayal': EmotionalState(anger=0.8, sadness=0.7, trust=0.1), 'love': EmotionalState(love=0.9, joy=0.8, trust=0.9), 'discovery': EmotionalState(curiosity=0.9, surprise=0.7, joy=0.6) } if trigger in trigger_responses: base_response = trigger_responses[trigger] # Modify based on current emotional state and maturity current_influence = 0.3 * (1 - self.emotional_maturity) # Blend with current state blended_state = EmotionalState( joy=(1 - current_influence) * base_response.joy + current_influence * self.current_state.joy, sadness=(1 - current_influence) * base_response.sadness + current_influence * self.current_state.sadness, anger=(1 - current_influence) * base_response.anger + current_influence * self.current_state.anger, fear=(1 - current_influence) * base_response.fear + current_influence * self.current_state.fear, surprise=(1 - current_influence) * base_response.surprise + current_influence * self.current_state.surprise, disgust=(1 - current_influence) * base_response.disgust + current_influence * self.current_state.disgust, trust=(1 - current_influence) * base_response.trust + current_influence * self.current_state.trust, anticipation=(1 - current_influence) * base_response.anticipation + current_influence * self.current_state.anticipation, love=(1 - current_influence) * base_response.love + current_influence * self.current_state.love, pride=(1 - current_influence) * base_response.pride + current_influence * self.current_state.pride, emotional_intensity=intensity, trigger=trigger ) return blended_state else: # Unknown trigger - slight emotional disturbance return EmotionalState( surprise=0.4, curiosity=0.5, emotional_intensity=intensity * 0.5, trigger=trigger ) def get_emotional_summary(self) -> Dict[str, Any]: """Get comprehensive summary of emotional system state.""" return { 'current_state': { 'dominant_emotion': self.current_state.get_dominant_emotion(), 'valence': self.current_state.get_emotional_valence(), 'arousal': self.current_state.get_emotional_arousal(), 'intensity': self.current_state.emotional_intensity, 'stability': self.current_state.emotional_stability }, 'emotional_growth': { 'maturity': self.emotional_maturity, 'total_experiences': self.emotional_experiences, 'learning_rate': float(self.emotional_learning_rate) }, 'memory_system': { 'total_memories': len(self.emotion_memories), 'significant_memories': len([m for m in self.emotion_memories if m.is_significant()]), 'memory_capacity': self.memory_capacity }, 'emotional_patterns': self.emotional_patterns, 'recent_patterns': self._get_recent_emotional_patterns() } def save_emotional_state(self, path: Path): """Save emotional system state.""" state = { 'current_state': { 'joy': self.current_state.joy, 'sadness': self.current_state.sadness, 'anger': self.current_state.anger, 'fear': self.current_state.fear, 'surprise': self.current_state.surprise, 'disgust': self.current_state.disgust, 'trust': self.current_state.trust, 'anticipation': self.current_state.anticipation, 'love': self.current_state.love, 'guilt': self.current_state.guilt, 'shame': self.current_state.shame, 'pride': self.current_state.pride, 'jealousy': self.current_state.jealousy, 'hope': self.current_state.hope, 'despair': self.current_state.despair, 'curiosity': self.current_state.curiosity, 'emotional_intensity': self.current_state.emotional_intensity, 'emotional_stability': self.current_state.emotional_stability, 'emotional_clarity': self.current_state.emotional_clarity }, 'emotional_maturity': self.emotional_maturity, 'emotional_experiences': self.emotional_experiences, 'emotional_learning_rate': float(self.emotional_learning_rate), 'emotional_patterns': self.emotional_patterns, 'emotion_memories': [ { 'emotional_state': memory.emotional_state.__dict__, 'context': memory.context, 'intensity': memory.intensity, 'impact_score': memory.impact_score, 'creation_time': memory.creation_time.isoformat() } for memory in self.emotion_memories[-200:] # Keep recent memories ], 'model_state': self.state_dict(), 'timestamp': datetime.now().isoformat() } with open(path, 'w') as f: json.dump(state, f, indent=2, default=str) logger.info(f"Emotional state saved to {path}") def load_emotional_state(self, path: Path): """Load emotional system state.""" if not path.exists(): logger.warning(f"Emotional state file not found: {path}") return try: with open(path, 'r') as f: state = json.load(f) # Restore current emotional state current_state_data = state['current_state'] self.current_state = EmotionalState(**current_state_data) # Restore growth metrics self.emotional_maturity = state.get('emotional_maturity', 0.5) self.emotional_experiences = state.get('emotional_experiences', 0) if 'emotional_learning_rate' in state: self.emotional_learning_rate.data = torch.tensor(state['emotional_learning_rate']) # Restore patterns self.emotional_patterns = state.get('emotional_patterns', {}) # Restore memories self.emotion_memories = [] for memory_data in state.get('emotion_memories', []): emotion_state_data = memory_data['emotional_state'] emotion_state = EmotionalState(**emotion_state_data) memory = EmotionMemory( emotional_state=emotion_state, context=memory_data['context'], intensity=memory_data['intensity'], impact_score=memory_data['impact_score'] ) memory.creation_time = datetime.fromisoformat(memory_data['creation_time']) self.emotion_memories.append(memory) # Restore model state if 'model_state' in state: self.load_state_dict(state['model_state']) logger.info(f"Emotional state loaded from {path}") except Exception as e: logger.error(f"Failed to load emotional state: {e}")