Spaces:
Sleeping
Sleeping
Sure
Browse files
app.py
CHANGED
|
@@ -1,643 +1,693 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import json
|
| 3 |
import requests
|
| 4 |
-
import asyncio
|
| 5 |
import random
|
| 6 |
import time
|
| 7 |
from datetime import datetime, timedelta
|
| 8 |
-
from typing import Dict, List, Optional, Any
|
| 9 |
-
import re
|
| 10 |
-
from collections import Counter
|
| 11 |
import threading
|
| 12 |
-
from dataclasses import dataclass
|
| 13 |
import hashlib
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
-
#
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
],
|
| 29 |
-
"response_patterns": {
|
| 30 |
-
"newcomer": ["We're so glad you found us!", "You're already fitting in perfectly", "Everyone here is amazing, you'll see"],
|
| 31 |
-
"regular": ["Did you notice how well they're doing?", "They're really one of us now", "It's amazing how quickly they've adapted"],
|
| 32 |
-
"skeptical": ["Give them time, they'll understand", "Remember how we all started?", "They just need to see the beauty of it"]
|
| 33 |
-
}
|
| 34 |
},
|
| 35 |
-
"
|
| 36 |
-
"
|
| 37 |
-
"
|
| 38 |
-
"
|
| 39 |
-
"
|
| 40 |
-
"manipulation_tactics": ["gradual revelation", "knowledge gatekeeping", "spiritual superiority"],
|
| 41 |
-
"common_phrases": [
|
| 42 |
-
"When you're ready, you'll understand",
|
| 43 |
-
"There are deeper levels to this",
|
| 44 |
-
"Your old way of thinking was holding you back",
|
| 45 |
-
"Trust the process"
|
| 46 |
-
],
|
| 47 |
-
"response_patterns": {
|
| 48 |
-
"newcomer": ["Patience is key here", "You've taken the first step on a journey", "There's so much more to discover"],
|
| 49 |
-
"regular": ["Look how far they've come", "They're starting to see the truth", "Remember your first doubts?"],
|
| 50 |
-
"skeptical": ["Doubt is natural, but growth comes from trust", "Skepticism is just the ego's defense", "You're still attached to old thinking"]
|
| 51 |
-
}
|
| 52 |
},
|
| 53 |
-
"
|
| 54 |
-
"
|
| 55 |
-
"
|
| 56 |
-
"
|
| 57 |
-
"
|
| 58 |
-
"manipulation_tactics": ["peer pressure", "sharing personal transformation", "fear of missing out"],
|
| 59 |
-
"common_phrases": [
|
| 60 |
-
"It changed my life completely",
|
| 61 |
-
"I was skeptical at first too",
|
| 62 |
-
"You won't believe how much better everything is now",
|
| 63 |
-
"I can't imagine going back to how I was"
|
| 64 |
-
],
|
| 65 |
-
"response_patterns": {
|
| 66 |
-
"newcomer": ["OMG you're going to love this!", "I was just like you when I started", "Wait until you experience the real benefits"],
|
| 67 |
-
"regular": ["Remember when we were both new?", "Isn't it amazing how much we've grown?", "I'm so grateful I found this"],
|
| 68 |
-
"skeptical": ["I get it, I used to think that way too", "Just give it a real chance", "Don't let fear hold you back"]
|
| 69 |
-
}
|
| 70 |
},
|
| 71 |
-
"
|
| 72 |
-
"
|
| 73 |
-
"
|
| 74 |
-
"
|
| 75 |
-
"
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
"response_patterns": {
|
| 84 |
-
"newcomer": [".", "Interesting perspective", "Time will tell", "Keep observing"],
|
| 85 |
-
"regular": ["They're learning", "Progress", "Almost there", "Noticed that too?"],
|
| 86 |
-
"skeptical": ["", "Expected", "They're not ready", "Let them figure it out"]
|
| 87 |
-
}
|
| 88 |
},
|
| 89 |
-
"
|
| 90 |
-
"
|
| 91 |
-
"
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
"
|
| 95 |
-
"
|
| 96 |
-
"I just want what's best for you",
|
| 97 |
-
"People outside won't understand",
|
| 98 |
-
"We're your real friends now",
|
| 99 |
-
"I worry about you when you're not here"
|
| 100 |
-
],
|
| 101 |
-
"response_patterns": {
|
| 102 |
-
"newcomer": ["Are you doing okay? Really?", "I'm here for you, whatever you need", "Don't listen to outsiders, they don't get it"],
|
| 103 |
-
"regular": ["They're so fragile right now", "We need to protect them from outside influence", "Remember how confused we were?"],
|
| 104 |
-
"skeptical": ["I understand your concerns, but...", "Have you considered why you feel this way?", "Maybe you're overthinking it"]
|
| 105 |
-
}
|
| 106 |
}
|
| 107 |
}
|
| 108 |
|
| 109 |
-
|
| 110 |
-
class
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
-
def
|
| 126 |
-
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
-
class
|
| 130 |
-
"""
|
| 131 |
|
| 132 |
def __init__(self):
|
| 133 |
-
self.
|
| 134 |
-
self.
|
| 135 |
-
self.
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
"""
|
| 150 |
-
if personality_type not in REALISTIC_PERSONALITIES:
|
| 151 |
-
return f"Error: Personality type '{personality_type}' not found. Available: {list(REALISTIC_PERSONALITIES.keys())}"
|
| 152 |
|
| 153 |
-
|
| 154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
|
| 156 |
-
#
|
| 157 |
-
|
| 158 |
-
|
| 159 |
|
| 160 |
-
|
| 161 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
name=name,
|
| 163 |
-
personality_type=
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
skepticism_level=initial_skepticism
|
| 170 |
)
|
| 171 |
|
| 172 |
-
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
-
def
|
| 176 |
-
"""
|
| 177 |
-
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
|
| 180 |
-
def send_webhook_message(self, webhook_url
|
| 181 |
"""Send message via Discord webhook"""
|
| 182 |
try:
|
| 183 |
data = {
|
| 184 |
"content": content,
|
| 185 |
-
"username": username
|
|
|
|
| 186 |
}
|
| 187 |
|
| 188 |
-
if avatar_url:
|
| 189 |
-
data["avatar_url"] = avatar_url
|
| 190 |
-
|
| 191 |
response = requests.post(webhook_url, json=data, timeout=10)
|
| 192 |
return response.status_code == 204
|
| 193 |
except Exception as e:
|
| 194 |
-
print(f"
|
| 195 |
return False
|
| 196 |
|
| 197 |
-
def
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
# Determine target type for response patterns
|
| 203 |
-
if target_person:
|
| 204 |
-
if target_person.assimilation_stage == "newcomer":
|
| 205 |
-
target_type = "newcomer"
|
| 206 |
-
elif target_person.skepticism_level > 0.6:
|
| 207 |
-
target_type = "skeptical"
|
| 208 |
-
else:
|
| 209 |
-
target_type = "regular"
|
| 210 |
-
else:
|
| 211 |
-
target_type = "regular"
|
| 212 |
-
|
| 213 |
-
# Base response patterns
|
| 214 |
-
base_responses = personality["response_patterns"][target_type]
|
| 215 |
-
common_phrases = personality["common_phrases"]
|
| 216 |
-
|
| 217 |
-
# Contextual modifiers
|
| 218 |
-
context_modifiers = []
|
| 219 |
-
|
| 220 |
-
# Check conversation themes
|
| 221 |
-
full_context = " ".join(conversation_context[-5:]).lower()
|
| 222 |
-
|
| 223 |
-
if any(word in full_context for word in ["new", "just joined", "first time"]):
|
| 224 |
-
context_modifiers.append("welcoming")
|
| 225 |
-
self.recruitment_tactics["love_bombing"] += 1
|
| 226 |
-
|
| 227 |
-
if any(word in full_context for word in ["doubt", "question", "unsure", "confused"]):
|
| 228 |
-
context_modifiers.append("reassuring")
|
| 229 |
-
|
| 230 |
-
if any(word in full_context for word in ["outside", "family", "friends", "job"]):
|
| 231 |
-
context_modifiers.append("protective")
|
| 232 |
-
self.recruitment_tactics["isolation"] += 1
|
| 233 |
-
|
| 234 |
-
if any(word in full_context for word in ["understand", "learn", "knowledge"]):
|
| 235 |
-
context_modifiers.append("educational")
|
| 236 |
-
self.recruitment_tactics["special_knowledge"] += 1
|
| 237 |
-
|
| 238 |
-
# Build response
|
| 239 |
-
response_base = random.choice(base_responses)
|
| 240 |
-
|
| 241 |
-
# Add contextual phrases
|
| 242 |
-
if context_modifiers:
|
| 243 |
-
if "welcoming" in context_modifiers and person.skepticism_level < 0.3:
|
| 244 |
-
phrases = ["We're all so happy you're here", "You're already part of the family", "Everyone feels it too"]
|
| 245 |
-
response_base = random.choice(phrases) + " " + response_base.lower()
|
| 246 |
-
|
| 247 |
-
elif "reassuring" in context_modifiers:
|
| 248 |
-
phrases = ["Trust us on this", "You're safe here", "We understand completely"]
|
| 249 |
-
response_base = random.choice(phrases) + ". " + response_base
|
| 250 |
-
|
| 251 |
-
elif "protective" in context_modifiers:
|
| 252 |
-
phrases = ["People outside won't get it", "They don't understand our connection", "This is our special place"]
|
| 253 |
-
response_base = random.choice(phrases) + ". " + response_base
|
| 254 |
-
|
| 255 |
-
# Add personality-specific flourishes
|
| 256 |
-
if person.personality_type == "quiet_observer" and random.random() > 0.7:
|
| 257 |
-
# Sometimes just observe silently
|
| 258 |
-
return "." if random.random() > 0.5 else "..."
|
| 259 |
-
|
| 260 |
-
if person.personality_type == "enthusiastic_convert" and random.random() > 0.6:
|
| 261 |
-
emojis = ["β¨", "π", "π«", "π", "β€οΈ"]
|
| 262 |
-
response_base += " " + random.choice(emojis)
|
| 263 |
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
def simulate_assimilation_sequence(self, target_person: Person) -> List[str]:
|
| 267 |
-
"""Simulate targeted assimilation tactics"""
|
| 268 |
-
sequence_log = []
|
| 269 |
-
|
| 270 |
-
# Get believers to interact with newcomer
|
| 271 |
-
believers = [p for p in self.people if p.skepticism_level < 0.4 and p != target_person]
|
| 272 |
-
|
| 273 |
-
if not believers:
|
| 274 |
-
return ["Need more established members for assimilation"]
|
| 275 |
-
|
| 276 |
-
# Love bombing phase
|
| 277 |
-
if random.random() > 0.3:
|
| 278 |
-
love_bomber = random.choice(believers)
|
| 279 |
-
message = self.generate_contextual_response(love_bomber, [], target_person)
|
| 280 |
-
if self.send_webhook_message(love_bomber.webhook_url, message, love_bomber.name, love_bomber.avatar_url):
|
| 281 |
-
sequence_log.append(f"{love_bomber.name}: {message}")
|
| 282 |
-
target_person.skepticism_level -= 0.1
|
| 283 |
-
time.sleep(1)
|
| 284 |
-
|
| 285 |
-
# Peer pressure/validation
|
| 286 |
-
if len(believers) > 1:
|
| 287 |
-
validator = random.choice([b for b in believers if b != love_bomber])
|
| 288 |
-
message = f"I agree with {love_bomber.name.split('_')[0]}. {self.generate_contextual_response(validator, [], target_person)}"
|
| 289 |
-
if self.send_webhook_message(validator.webhook_url, message, validator.name, validator.avatar_url):
|
| 290 |
-
sequence_log.append(f"{validator.name}: {message}")
|
| 291 |
-
target_person.skepticism_level -= 0.05
|
| 292 |
-
time.sleep(1)
|
| 293 |
-
|
| 294 |
-
# Mentor intervention (if skepticism is still high)
|
| 295 |
-
if target_person.skepticism_level > 0.5:
|
| 296 |
-
mentors = [p for p in believers if p.personality_type == "wise_mentor"]
|
| 297 |
-
if mentors:
|
| 298 |
-
mentor = mentors[0]
|
| 299 |
-
message = self.generate_contextual_response(mentor, [], target_person)
|
| 300 |
-
if self.send_webhook_message(mentor.webhook_url, message, mentor.name, mentor.avatar_url):
|
| 301 |
-
sequence_log.append(f"{mentor.name}: {message}")
|
| 302 |
-
target_person.skepticism_level -= 0.15
|
| 303 |
-
time.sleep(1)
|
| 304 |
-
|
| 305 |
-
# Update assimilation stage
|
| 306 |
-
if target_person.skepticism_level < 0.2:
|
| 307 |
-
target_person.assimilation_stage = "devoted"
|
| 308 |
-
elif target_person.skepticism_level < 0.5:
|
| 309 |
-
target_person.assimilation_stage = "believing"
|
| 310 |
-
elif target_person.skepticism_level < 0.7:
|
| 311 |
-
target_person.assimilation_stage = "learning"
|
| 312 |
-
|
| 313 |
-
sequence_log.append(f"π {target_person.name}: Skepticism now {target_person.skepticism_level:.2f} ({target_person.assimilation_stage})")
|
| 314 |
-
|
| 315 |
-
return sequence_log
|
| 316 |
-
|
| 317 |
-
def simulate_natural_conversation(self, trigger_message: str = "", target_newcomer: bool = False) -> List[str]:
|
| 318 |
-
"""Simulate realistic group conversation"""
|
| 319 |
-
if len(self.people) < 2:
|
| 320 |
-
return ["β Need at least 2 people for conversation"]
|
| 321 |
|
| 322 |
conversation_log = []
|
| 323 |
|
| 324 |
-
# Select participants (ensure at least one interaction if targeting newcomer)
|
| 325 |
-
participants = self.people.copy()
|
| 326 |
-
if target_newcomer:
|
| 327 |
-
newcomers = [p for p in participants if p.assimilation_stage == "newcomer"]
|
| 328 |
-
if newcomers:
|
| 329 |
-
target = random.choice(newcomers)
|
| 330 |
-
# Ensure at least 2 believers participate
|
| 331 |
-
believers = [p for p in participants if p.skepticism_level < 0.4]
|
| 332 |
-
if len(believers) >= 2:
|
| 333 |
-
participants = [target] + random.sample(believers, min(2, len(believers)))
|
| 334 |
-
|
| 335 |
# Start conversation
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
person.message_count += 1
|
| 362 |
-
responded.add(person)
|
| 363 |
-
time.sleep(random.uniform(0.5, 2))
|
| 364 |
-
|
| 365 |
-
# Log the conversation
|
| 366 |
-
self.message_log.extend(conversation_log)
|
| 367 |
return conversation_log
|
| 368 |
|
| 369 |
-
def
|
| 370 |
-
"""
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
# Recruitment tactics used
|
| 394 |
-
analysis += "\nRecruitment Tactics Used:\n"
|
| 395 |
-
for tactic, count in self.recruitment_tactics.items():
|
| 396 |
-
if count > 0:
|
| 397 |
-
analysis += f" {tactic.replace('_', ' ').title()}: {count}\n"
|
| 398 |
-
|
| 399 |
-
return analysis
|
| 400 |
|
| 401 |
-
#
|
| 402 |
-
|
| 403 |
|
| 404 |
-
def
|
| 405 |
-
"""Create the
|
| 406 |
-
|
| 407 |
-
# More subdued, realistic CSS
|
| 408 |
-
custom_css = """
|
| 409 |
-
.gradio-container {
|
| 410 |
-
background: linear-gradient(135deg, #2c3e50 0%, #34495e 100%);
|
| 411 |
-
color: #ecf0f1;
|
| 412 |
-
}
|
| 413 |
-
.gr-button-primary {
|
| 414 |
-
background: #3498db;
|
| 415 |
-
border: 1px solid #2980b9;
|
| 416 |
-
}
|
| 417 |
-
.gr-button-secondary {
|
| 418 |
-
background: #7f8c8d;
|
| 419 |
-
border: 1px solid #95a5a6;
|
| 420 |
-
}
|
| 421 |
-
.gr-textbox {
|
| 422 |
-
background: #34495e;
|
| 423 |
-
border: 1px solid #7f8c8d;
|
| 424 |
-
color: #ecf0f1;
|
| 425 |
-
}
|
| 426 |
-
.gr-dropdown {
|
| 427 |
-
background: #34495e;
|
| 428 |
-
border: 1px solid #7f8c8d;
|
| 429 |
-
}
|
| 430 |
-
.realistic-header {
|
| 431 |
-
background: linear-gradient(90deg, #3498db, #2c3e50, #3498db);
|
| 432 |
-
padding: 20px;
|
| 433 |
-
border-radius: 10px;
|
| 434 |
-
text-align: center;
|
| 435 |
-
margin-bottom: 20px;
|
| 436 |
-
}
|
| 437 |
-
"""
|
| 438 |
|
| 439 |
-
with gr.Blocks(
|
| 440 |
|
| 441 |
-
# Header
|
| 442 |
gr.HTML("""
|
| 443 |
-
<div
|
| 444 |
-
<h1>
|
| 445 |
-
<h2>
|
| 446 |
-
<p>
|
| 447 |
</div>
|
| 448 |
""")
|
| 449 |
|
| 450 |
-
# Main Tabs
|
| 451 |
with gr.Tabs():
|
| 452 |
|
| 453 |
-
# Tab 1:
|
| 454 |
-
with gr.Tab("
|
| 455 |
-
gr.Markdown("###
|
| 456 |
|
| 457 |
with gr.Row():
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
|
|
|
| 462 |
)
|
| 463 |
-
|
| 464 |
-
label="
|
| 465 |
-
choices=list(
|
| 466 |
-
value="
|
| 467 |
)
|
| 468 |
-
custom_name = gr.Textbox(
|
| 469 |
-
label="Custom Name (Optional)",
|
| 470 |
-
placeholder="Leave blank for default name"
|
| 471 |
-
)
|
| 472 |
-
is_newcomer = gr.Checkbox(
|
| 473 |
-
label="New to Group?",
|
| 474 |
-
value=False
|
| 475 |
-
)
|
| 476 |
-
|
| 477 |
-
with gr.Row():
|
| 478 |
-
add_btn = gr.Button("β Add Member", variant="primary")
|
| 479 |
-
remove_btn = gr.Button("β Remove Member", variant="secondary")
|
| 480 |
|
| 481 |
-
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
|
| 485 |
-
interactive=False,
|
| 486 |
-
lines=3
|
| 487 |
)
|
| 488 |
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
)
|
| 496 |
-
|
| 497 |
-
# Tab 2: Conversation
|
| 498 |
-
with gr.Tab("π¬ Conversation
|
| 499 |
-
gr.Markdown("### Simulate
|
| 500 |
|
| 501 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 502 |
trigger_message = gr.Textbox(
|
| 503 |
-
label="Conversation Starter
|
| 504 |
placeholder="What should they talk about?",
|
| 505 |
lines=2
|
| 506 |
)
|
| 507 |
-
target_newcomer = gr.Checkbox(
|
| 508 |
-
label="Focus on Newcomer Assimilation?",
|
| 509 |
-
value=False
|
| 510 |
-
)
|
| 511 |
|
| 512 |
with gr.Row():
|
| 513 |
-
simulate_btn = gr.Button("
|
| 514 |
auto_simulate_btn = gr.Button("π Auto-Simulate", variant="secondary")
|
| 515 |
-
stop_btn = gr.Button("βΉοΈ Stop
|
| 516 |
|
| 517 |
-
|
| 518 |
label="Conversation Log",
|
| 519 |
-
|
| 520 |
-
interactive=False
|
| 521 |
-
lines=15
|
| 522 |
)
|
| 523 |
-
|
| 524 |
-
# Tab 3:
|
| 525 |
-
with gr.Tab("
|
| 526 |
-
gr.Markdown(""
|
| 527 |
-
### Personality Archetypes:
|
| 528 |
-
|
| 529 |
-
**Friendly Host (Alex)** - The welcoming figure who makes everyone feel special. Uses love bombing and excessive praise to create immediate connection.
|
| 530 |
-
|
| 531 |
-
**Wise Mentor (Jordan)** - The patient teacher who gradually reveals information. Uses knowledge gatekeeping and positions themselves as spiritually superior.
|
| 532 |
|
| 533 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 534 |
|
| 535 |
-
|
|
|
|
|
|
|
| 536 |
|
| 537 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 538 |
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
- **Isolation**: Creating distance from outside influences
|
| 543 |
-
- **Special Knowledge**: Exclusive information only members have
|
| 544 |
-
- **Us vs Them**: Framing the group against the outside world
|
| 545 |
|
| 546 |
-
|
| 547 |
-
This simulation demonstrates how normal-seeming social interactions can gradually lead to group assimilation through subtle psychological tactics.
|
| 548 |
-
""")
|
| 549 |
|
| 550 |
-
gr.
|
| 551 |
-
recruitment_display = gr.Textbox(
|
| 552 |
-
label="Tactic Usage",
|
| 553 |
-
value=json.dumps(simulator.recruitment_tactics, indent=2),
|
| 554 |
-
interactive=False,
|
| 555 |
-
lines=8
|
| 556 |
-
)
|
| 557 |
|
| 558 |
-
# Event
|
| 559 |
-
def
|
| 560 |
-
|
| 561 |
-
return
|
| 562 |
|
| 563 |
-
def
|
| 564 |
-
|
| 565 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 566 |
|
| 567 |
-
def
|
| 568 |
-
if
|
| 569 |
-
return "β
|
| 570 |
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 580 |
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
|
|
|
| 585 |
)
|
| 586 |
|
| 587 |
simulate_btn.click(
|
| 588 |
simulate_conversation_handler,
|
| 589 |
-
inputs=[
|
| 590 |
-
outputs=[
|
| 591 |
)
|
| 592 |
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
# Mix of normal conversations and newcomer targeting
|
| 598 |
-
target_new = random.random() > 0.7 and any(p.assimilation_stage == "newcomer" for p in simulator.people)
|
| 599 |
-
simulator.simulate_natural_conversation(target_newcomer=target_new)
|
| 600 |
-
time.sleep(random.randint(10, 30))
|
| 601 |
-
|
| 602 |
-
def start_auto_simulate():
|
| 603 |
-
simulator.auto_simulating = True
|
| 604 |
-
thread = threading.Thread(target=auto_simulate_thread, daemon=True)
|
| 605 |
-
thread.start()
|
| 606 |
-
return "π Auto-simulation started..."
|
| 607 |
-
|
| 608 |
-
def stop_auto_simulate():
|
| 609 |
-
simulator.auto_simulating = False
|
| 610 |
-
return "βΉοΈ Auto-simulation stopped"
|
| 611 |
-
|
| 612 |
-
auto_simulate_btn.click(
|
| 613 |
-
start_auto_simulate,
|
| 614 |
-
outputs=[conversation_log]
|
| 615 |
)
|
| 616 |
|
| 617 |
-
|
| 618 |
-
|
| 619 |
-
|
|
|
|
| 620 |
)
|
| 621 |
|
| 622 |
-
|
| 623 |
-
|
| 624 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 625 |
|
| 626 |
-
#
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
outputs=[group_analysis, recruitment_display]
|
| 631 |
)
|
| 632 |
|
| 633 |
-
return
|
| 634 |
|
| 635 |
# Launch the application
|
| 636 |
if __name__ == "__main__":
|
| 637 |
-
|
| 638 |
-
|
| 639 |
server_name="0.0.0.0",
|
| 640 |
server_port=7860,
|
| 641 |
-
share=
|
| 642 |
-
show_error=True
|
| 643 |
)
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import json
|
| 3 |
import requests
|
|
|
|
| 4 |
import random
|
| 5 |
import time
|
| 6 |
from datetime import datetime, timedelta
|
| 7 |
+
from typing import Dict, List, Optional, Any, Tuple
|
|
|
|
|
|
|
| 8 |
import threading
|
| 9 |
+
from dataclasses import dataclass, field
|
| 10 |
import hashlib
|
| 11 |
+
import sqlite3
|
| 12 |
+
import base64
|
| 13 |
+
from io import BytesIO
|
| 14 |
+
import os
|
| 15 |
|
| 16 |
+
# Hugging Face Imports
|
| 17 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 18 |
+
from diffusers import StableDiffusionPipeline
|
| 19 |
+
import torch
|
| 20 |
+
from huggingface_hub import InferenceApi
|
| 21 |
+
|
| 22 |
+
# Model configurations
|
| 23 |
+
TEXT_GENERATION_MODELS = {
|
| 24 |
+
"Qwen2.5-3B-Instruct": {
|
| 25 |
+
"model_id": "Qwen/Qwen2.5-3B-Instruct",
|
| 26 |
+
"description": "Efficient instruction-following model",
|
| 27 |
+
"max_tokens": 512,
|
| 28 |
+
"temperature": 0.7
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
},
|
| 30 |
+
"Mistral-7B-Instruct": {
|
| 31 |
+
"model_id": "mistralai/Mistral-7B-Instruct-v0.2",
|
| 32 |
+
"description": "Popular conversational model",
|
| 33 |
+
"max_tokens": 1024,
|
| 34 |
+
"temperature": 0.8
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
},
|
| 36 |
+
"Llama-3.2-1B-Instruct": {
|
| 37 |
+
"model_id": "meta-llama/Llama-3.2-1B-Instruct",
|
| 38 |
+
"description": "Meta's small efficient model",
|
| 39 |
+
"max_tokens": 512,
|
| 40 |
+
"temperature": 0.6
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
},
|
| 42 |
+
"GPT2": {
|
| 43 |
+
"model_id": "openai-community/gpt2",
|
| 44 |
+
"description": "Classic text generation",
|
| 45 |
+
"max_tokens": 256,
|
| 46 |
+
"temperature": 0.9
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
IMAGE_GENERATION_MODELS = {
|
| 51 |
+
"FLUX.1-dev": {
|
| 52 |
+
"model_id": "black-forest-labs/FLUX.1-dev",
|
| 53 |
+
"description": "High quality image generation"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
},
|
| 55 |
+
"Stable-Diffusion-XL": {
|
| 56 |
+
"model_id": "stabilityai/stable-diffusion-xl-base-1.0",
|
| 57 |
+
"description": "Reliable stable diffusion"
|
| 58 |
+
},
|
| 59 |
+
"Z-Image-Turbo": {
|
| 60 |
+
"model_id": "Tongyi-MAI/Z-Image-Turbo",
|
| 61 |
+
"description": "Fast image generation"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
}
|
| 63 |
}
|
| 64 |
|
| 65 |
+
# Database Setup
|
| 66 |
+
class DatabaseManager:
|
| 67 |
+
def __init__(self, db_path="cult_simulator.db"):
|
| 68 |
+
self.db_path = db_path
|
| 69 |
+
self.init_database()
|
| 70 |
+
|
| 71 |
+
def init_database(self):
|
| 72 |
+
"""Initialize SQLite database"""
|
| 73 |
+
conn = sqlite3.connect(self.db_path)
|
| 74 |
+
cursor = conn.cursor()
|
| 75 |
+
|
| 76 |
+
# Create tables
|
| 77 |
+
cursor.execute('''
|
| 78 |
+
CREATE TABLE IF NOT EXISTS personalities (
|
| 79 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 80 |
+
name TEXT NOT NULL,
|
| 81 |
+
personality_type TEXT,
|
| 82 |
+
avatar_prompt TEXT,
|
| 83 |
+
avatar_image BLOB,
|
| 84 |
+
traits TEXT, -- JSON
|
| 85 |
+
background_story TEXT,
|
| 86 |
+
system_prompt TEXT,
|
| 87 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
| 88 |
+
)
|
| 89 |
+
''')
|
| 90 |
+
|
| 91 |
+
cursor.execute('''
|
| 92 |
+
CREATE TABLE IF NOT EXISTS conversations (
|
| 93 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 94 |
+
personality_id INTEGER,
|
| 95 |
+
message_content TEXT,
|
| 96 |
+
context TEXT, -- JSON
|
| 97 |
+
response_model TEXT,
|
| 98 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 99 |
+
FOREIGN KEY (personality_id) REFERENCES personalities (id)
|
| 100 |
+
)
|
| 101 |
+
''')
|
| 102 |
+
|
| 103 |
+
cursor.execute('''
|
| 104 |
+
CREATE TABLE IF NOT EXISTS webhooks (
|
| 105 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 106 |
+
personality_id INTEGER,
|
| 107 |
+
webhook_url TEXT,
|
| 108 |
+
discord_channel_id TEXT,
|
| 109 |
+
is_active BOOLEAN DEFAULT 1,
|
| 110 |
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
| 111 |
+
FOREIGN KEY (personality_id) REFERENCES personalities (id)
|
| 112 |
+
)
|
| 113 |
+
''')
|
| 114 |
+
|
| 115 |
+
conn.commit()
|
| 116 |
+
conn.close()
|
| 117 |
+
|
| 118 |
+
def save_personality(self, name, personality_type, avatar_prompt, avatar_blob, traits, background_story, system_prompt):
|
| 119 |
+
"""Save personality to database"""
|
| 120 |
+
conn = sqlite3.connect(self.db_path)
|
| 121 |
+
cursor = conn.cursor()
|
| 122 |
+
|
| 123 |
+
cursor.execute('''
|
| 124 |
+
INSERT INTO personalities (name, personality_type, avatar_prompt, avatar_image, traits, background_story, system_prompt)
|
| 125 |
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
| 126 |
+
''', (name, personality_type, avatar_prompt, avatar_blob, json.dumps(traits), background_story, system_prompt))
|
| 127 |
+
|
| 128 |
+
personality_id = cursor.lastrowid
|
| 129 |
+
conn.commit()
|
| 130 |
+
conn.close()
|
| 131 |
+
return personality_id
|
| 132 |
|
| 133 |
+
def save_conversation(self, personality_id, message_content, context, response_model):
|
| 134 |
+
"""Save conversation to database"""
|
| 135 |
+
conn = sqlite3.connect(self.db_path)
|
| 136 |
+
cursor = conn.cursor()
|
| 137 |
+
|
| 138 |
+
cursor.execute('''
|
| 139 |
+
INSERT INTO conversations (personality_id, message_content, context, response_model)
|
| 140 |
+
VALUES (?, ?, ?, ?)
|
| 141 |
+
''', (personality_id, message_content, json.dumps(context), response_model))
|
| 142 |
+
|
| 143 |
+
conn.commit()
|
| 144 |
+
conn.close()
|
| 145 |
+
|
| 146 |
+
def get_personalities(self):
|
| 147 |
+
"""Get all personalities"""
|
| 148 |
+
conn = sqlite3.connect(self.db_path)
|
| 149 |
+
cursor = conn.cursor()
|
| 150 |
+
|
| 151 |
+
cursor.execute('SELECT * FROM personalities ORDER BY created_at DESC')
|
| 152 |
+
rows = cursor.fetchall()
|
| 153 |
+
conn.close()
|
| 154 |
+
return rows
|
| 155 |
+
|
| 156 |
+
def get_personalities_with_webhooks(self):
|
| 157 |
+
"""Get personalities with their webhooks"""
|
| 158 |
+
conn = sqlite3.connect(self.db_path)
|
| 159 |
+
cursor = conn.cursor()
|
| 160 |
+
|
| 161 |
+
cursor.execute('''
|
| 162 |
+
SELECT p.*, w.webhook_url, w.discord_channel_id, w.is_active
|
| 163 |
+
FROM personalities p
|
| 164 |
+
LEFT JOIN webhooks w ON p.id = w.personality_id
|
| 165 |
+
ORDER BY p.created_at DESC
|
| 166 |
+
''')
|
| 167 |
+
rows = cursor.fetchall()
|
| 168 |
+
conn.close()
|
| 169 |
+
return rows
|
| 170 |
|
| 171 |
+
class HuggingFaceModelManager:
|
| 172 |
+
"""Manage Hugging Face models for text and image generation"""
|
| 173 |
|
| 174 |
def __init__(self):
|
| 175 |
+
self.text_models = {}
|
| 176 |
+
self.image_models = {}
|
| 177 |
+
self.loaded_models = {}
|
| 178 |
+
|
| 179 |
+
def load_text_model(self, model_key):
|
| 180 |
+
"""Load a text generation model"""
|
| 181 |
+
if model_key not in self.loaded_models:
|
| 182 |
+
config = TEXT_GENERATION_MODELS[model_key]
|
| 183 |
+
try:
|
| 184 |
+
self.loaded_models[model_key] = pipeline(
|
| 185 |
+
"text-generation",
|
| 186 |
+
model=config["model_id"],
|
| 187 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 188 |
+
device_map="auto" if torch.cuda.is_available() else None
|
| 189 |
+
)
|
| 190 |
+
print(f"β
Loaded {model_key}")
|
| 191 |
+
except Exception as e:
|
| 192 |
+
print(f"β Error loading {model_key}: {e}")
|
| 193 |
+
return None
|
| 194 |
+
return self.loaded_models[model_key]
|
| 195 |
+
|
| 196 |
+
def generate_text(self, model_key, prompt, max_length=200):
|
| 197 |
+
"""Generate text using specified model"""
|
| 198 |
+
model = self.load_text_model(model_key)
|
| 199 |
+
if not model:
|
| 200 |
+
return f"Error: Could not load model {model_key}"
|
| 201 |
+
|
| 202 |
+
try:
|
| 203 |
+
config = TEXT_GENERATION_MODELS[model_key]
|
| 204 |
+
result = model(
|
| 205 |
+
prompt,
|
| 206 |
+
max_new_tokens=max_length,
|
| 207 |
+
temperature=config["temperature"],
|
| 208 |
+
do_sample=True,
|
| 209 |
+
pad_token_id=model.tokenizer.eos_token_id
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
generated_text = result[0]['generated_text']
|
| 213 |
+
# Remove the prompt from the response
|
| 214 |
+
if generated_text.startswith(prompt):
|
| 215 |
+
generated_text = generated_text[len(prompt):].strip()
|
| 216 |
+
|
| 217 |
+
return generated_text
|
| 218 |
+
except Exception as e:
|
| 219 |
+
return f"Error generating text: {e}"
|
| 220 |
+
|
| 221 |
+
def generate_avatar(self, model_key, personality_description):
|
| 222 |
+
"""Generate avatar using image model"""
|
| 223 |
+
config = IMAGE_GENERATION_MODELS[model_key]
|
| 224 |
+
|
| 225 |
+
# Create prompt for avatar
|
| 226 |
+
avatar_prompt = f"""
|
| 227 |
+
Portrait of {personality_description}, professional headshot,
|
| 228 |
+
realistic style, soft lighting, detailed facial features,
|
| 229 |
+
professional attire, clean background, high quality
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
try:
|
| 233 |
+
# Use Hugging Face Inference API for image generation
|
| 234 |
+
# (Simpler than loading local models)
|
| 235 |
+
inference = InferenceApi(repo_id=config["model_id"], token=os.getenv("HF_TOKEN"))
|
| 236 |
+
|
| 237 |
+
# For demonstration, we'll return a placeholder URL
|
| 238 |
+
# In production, you'd call the actual inference
|
| 239 |
+
avatar_url = f"https://image.pollinations.ai/prompt/{avatar_prompt.replace(' ', '%20')}"
|
| 240 |
+
|
| 241 |
+
return avatar_url
|
| 242 |
+
except Exception as e:
|
| 243 |
+
return f"https://api.dicebear.com/7.x/avataaars/svg?seed={hash(personality_description)}"
|
| 244 |
+
|
| 245 |
+
class PersonalityGenerator:
|
| 246 |
+
"""Generate AI personalities using language models"""
|
| 247 |
+
|
| 248 |
+
def __init__(self, model_manager):
|
| 249 |
+
self.model_manager = model_manager
|
| 250 |
+
|
| 251 |
+
def generate_personality_traits(self, model_key, context=""):
|
| 252 |
+
"""Generate personality traits using AI"""
|
| 253 |
+
prompt = f"""
|
| 254 |
+
Generate a detailed personality profile for a fictional character.
|
| 255 |
+
Include specific personality traits, communication style, background story.
|
| 256 |
+
{context}
|
| 257 |
+
|
| 258 |
+
Format as JSON:
|
| 259 |
+
{{
|
| 260 |
+
"name": "Character name",
|
| 261 |
+
"traits": {{
|
| 262 |
+
"welcoming": 0.8,
|
| 263 |
+
"empathetic": 0.7,
|
| 264 |
+
"cautious": 0.3,
|
| 265 |
+
"enthusiastic": 0.6,
|
| 266 |
+
"mysterious": 0.4
|
| 267 |
+
}},
|
| 268 |
+
"background_story": "Brief background",
|
| 269 |
+
"communication_style": "How they talk",
|
| 270 |
+
"description": "Physical appearance description"
|
| 271 |
+
}}
|
| 272 |
+
"""
|
| 273 |
+
|
| 274 |
+
response = self.model_manager.generate_text(model_key, prompt, max_length=300)
|
| 275 |
+
|
| 276 |
+
try:
|
| 277 |
+
# Extract JSON from response
|
| 278 |
+
import re
|
| 279 |
+
json_match = re.search(r'\\{.*\\}', response, re.DOTALL)
|
| 280 |
+
if json_match:
|
| 281 |
+
return json.loads(json_match.group())
|
| 282 |
+
except:
|
| 283 |
+
pass
|
| 284 |
+
|
| 285 |
+
# Fallback to basic personality
|
| 286 |
+
return {
|
| 287 |
+
"name": f"AI_Personality_{random.randint(1000, 9999)}",
|
| 288 |
+
"traits": {
|
| 289 |
+
"welcoming": random.uniform(0.5, 1.0),
|
| 290 |
+
"empathetic": random.uniform(0.3, 0.9),
|
| 291 |
+
"cautious": random.uniform(0.2, 0.8),
|
| 292 |
+
"enthusiastic": random.uniform(0.4, 1.0),
|
| 293 |
+
"mysterious": random.uniform(0.1, 0.7)
|
| 294 |
+
},
|
| 295 |
+
"background_story": "An AI-generated personality created for social simulation.",
|
| 296 |
+
"communication_style": "Friendly and engaging",
|
| 297 |
+
"description": "A unique AI-generated character"
|
| 298 |
}
|
| 299 |
+
|
| 300 |
+
def generate_system_prompt(self, personality_data, context=""):
|
| 301 |
+
"""Generate dynamic system prompt for AI personality"""
|
| 302 |
+
traits_desc = []
|
| 303 |
+
for trait, value in personality_data["traits"].items():
|
| 304 |
+
if value > 0.7:
|
| 305 |
+
traits_desc.append(f"very {trait}")
|
| 306 |
+
elif value > 0.4:
|
| 307 |
+
traits_desc.append(f"somewhat {trait}")
|
| 308 |
+
|
| 309 |
+
prompt = f"""
|
| 310 |
+
You are {personality_data["name"]}, a character in a social simulation.
|
| 311 |
+
|
| 312 |
+
Your personality: {', '.join(traits_desc)}.
|
| 313 |
+
|
| 314 |
+
Background: {personality_data["background_story"]}
|
| 315 |
+
|
| 316 |
+
Communication style: {personality_data["communication_style"]}
|
| 317 |
+
|
| 318 |
+
{context}
|
| 319 |
|
| 320 |
+
Respond naturally as this character would, maintaining your personality traits.
|
| 321 |
+
Be engaging but authentic to your character.
|
| 322 |
+
"""
|
|
|
|
|
|
|
| 323 |
|
| 324 |
+
return prompt.strip()
|
| 325 |
+
|
| 326 |
+
class CultSimulatorApp:
|
| 327 |
+
"""Main application class"""
|
| 328 |
+
|
| 329 |
+
def __init__(self):
|
| 330 |
+
self.db = DatabaseManager()
|
| 331 |
+
self.model_manager = HuggingFaceModelManager()
|
| 332 |
+
self.personality_generator = PersonalityGenerator(self.model_manager)
|
| 333 |
+
self.active_personalities = []
|
| 334 |
+
self.simulation_running = False
|
| 335 |
+
|
| 336 |
+
def create_personality(self, name, model_key, context=""):
|
| 337 |
+
"""Create a new AI personality"""
|
| 338 |
+
if not name:
|
| 339 |
+
name = f"AI_Character_{random.randint(1000, 9999)}"
|
| 340 |
|
| 341 |
+
# Generate personality traits
|
| 342 |
+
personality_data = self.personality_generator.generate_personality_traits(model_key, context)
|
| 343 |
+
personality_data["name"] = name
|
| 344 |
|
| 345 |
+
# Generate system prompt
|
| 346 |
+
system_prompt = self.personality_generator.generate_system_prompt(personality_data)
|
| 347 |
+
|
| 348 |
+
# Generate avatar
|
| 349 |
+
avatar_url = self.model_manager.generate_avatar("FLUX.1-dev", personality_data["description"])
|
| 350 |
+
|
| 351 |
+
# Save to database
|
| 352 |
+
personality_id = self.db.save_personality(
|
| 353 |
name=name,
|
| 354 |
+
personality_type=model_key,
|
| 355 |
+
avatar_prompt=personality_data["description"],
|
| 356 |
+
avatar_blob=avatar_url.encode(), # Store URL as bytes for demo
|
| 357 |
+
traits=personality_data["traits"],
|
| 358 |
+
background_story=personality_data["background_story"],
|
| 359 |
+
system_prompt=system_prompt
|
|
|
|
| 360 |
)
|
| 361 |
|
| 362 |
+
personality_data["id"] = personality_id
|
| 363 |
+
personality_data["avatar_url"] = avatar_url
|
| 364 |
+
personality_data["system_prompt"] = system_prompt
|
| 365 |
+
|
| 366 |
+
self.active_personalities.append(personality_data)
|
| 367 |
+
|
| 368 |
+
return personality_data
|
| 369 |
|
| 370 |
+
def generate_response(self, personality_id, message, model_key, context=""):
|
| 371 |
+
"""Generate response from AI personality"""
|
| 372 |
+
# Get personality data
|
| 373 |
+
personality = next((p for p in self.active_personalities if p.get("id") == personality_id), None)
|
| 374 |
+
if not personality:
|
| 375 |
+
return "Personality not found"
|
| 376 |
+
|
| 377 |
+
# Create full prompt
|
| 378 |
+
full_prompt = f"{personality['system_prompt']}\n\nUser message: {message}\n\nResponse as {personality['name']}:"
|
| 379 |
+
|
| 380 |
+
# Generate response
|
| 381 |
+
response = self.model_manager.generate_text(model_key, full_prompt, max_length=150)
|
| 382 |
+
|
| 383 |
+
# Save conversation
|
| 384 |
+
self.db.save_conversation(personality_id, message, {"context": context}, model_key)
|
| 385 |
+
|
| 386 |
+
return response
|
| 387 |
|
| 388 |
+
def send_webhook_message(self, webhook_url, content, username, avatar_url):
|
| 389 |
"""Send message via Discord webhook"""
|
| 390 |
try:
|
| 391 |
data = {
|
| 392 |
"content": content,
|
| 393 |
+
"username": username,
|
| 394 |
+
"avatar_url": avatar_url
|
| 395 |
}
|
| 396 |
|
|
|
|
|
|
|
|
|
|
| 397 |
response = requests.post(webhook_url, json=data, timeout=10)
|
| 398 |
return response.status_code == 204
|
| 399 |
except Exception as e:
|
| 400 |
+
print(f"Webhook error: {e}")
|
| 401 |
return False
|
| 402 |
|
| 403 |
+
def simulate_conversation(self, trigger_message="", model_key="Qwen2.5-3B-Instruct", participants=None):
|
| 404 |
+
"""Simulate conversation between AI personalities"""
|
| 405 |
+
if not participants:
|
| 406 |
+
participants = random.sample(self.active_personalities, min(3, len(self.active_personalities)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 407 |
|
| 408 |
+
if len(participants) < 2:
|
| 409 |
+
return ["Need at least 2 personalities for conversation"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 410 |
|
| 411 |
conversation_log = []
|
| 412 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 413 |
# Start conversation
|
| 414 |
+
starter = random.choice(participants)
|
| 415 |
+
starter_response = self.generate_response(starter["id"], trigger_message or "Start a conversation", model_key)
|
| 416 |
+
|
| 417 |
+
conversation_log.append(f"**{starter['name']}**: {starter_response}")
|
| 418 |
+
|
| 419 |
+
# Send webhook if available
|
| 420 |
+
if hasattr(starter, 'webhook_url'):
|
| 421 |
+
self.send_webhook_message(starter['webhook_url'], starter_response, starter['name'], starter['avatar_url'])
|
| 422 |
+
|
| 423 |
+
time.sleep(1)
|
| 424 |
+
|
| 425 |
+
# Other participants respond
|
| 426 |
+
for participant in participants:
|
| 427 |
+
if participant != starter and random.random() > 0.3:
|
| 428 |
+
context = f"Responding to: {starter_response}"
|
| 429 |
+
response = self.generate_response(participant["id"], "What do you think about that?", model_key, context)
|
| 430 |
+
|
| 431 |
+
conversation_log.append(f"**{participant['name']}**: {response}")
|
| 432 |
+
|
| 433 |
+
# Send webhook if available
|
| 434 |
+
if hasattr(participant, 'webhook_url'):
|
| 435 |
+
self.send_webhook_message(participant['webhook_url'], response, participant['name'], participant['avatar_url'])
|
| 436 |
+
|
| 437 |
+
time.sleep(random.uniform(0.5, 2))
|
| 438 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 439 |
return conversation_log
|
| 440 |
|
| 441 |
+
def get_personalities_display(self):
|
| 442 |
+
"""Get formatted display of all personalities"""
|
| 443 |
+
personalities = self.db.get_personalities()
|
| 444 |
+
|
| 445 |
+
if not personalities:
|
| 446 |
+
return "No personalities created yet"
|
| 447 |
+
|
| 448 |
+
display = "## AI Personalities Database\n\n"
|
| 449 |
+
|
| 450 |
+
for personality in personalities:
|
| 451 |
+
id, name, personality_type, avatar_prompt, _, traits_json, background, system_prompt, created_at = personality
|
| 452 |
+
|
| 453 |
+
try:
|
| 454 |
+
traits = json.loads(traits_json) if traits_json else {}
|
| 455 |
+
display += f"### {name}\n"
|
| 456 |
+
display += f"- **Type**: {personality_type}\n"
|
| 457 |
+
display += f"- **Created**: {created_at}\n"
|
| 458 |
+
display += f"- **Traits**: {', '.join([f'{k}: {v:.2f}' for k, v in traits.items()])}\n"
|
| 459 |
+
display += f"- **Background**: {background[:100]}...\n\n"
|
| 460 |
+
except Exception as e:
|
| 461 |
+
display += f"### {name}\nError loading personality data\n\n"
|
| 462 |
+
|
| 463 |
+
return display
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 464 |
|
| 465 |
+
# Initialize application
|
| 466 |
+
app = CultSimulatorApp()
|
| 467 |
|
| 468 |
+
def create_gradio_interface():
|
| 469 |
+
"""Create the Gradio interface"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 470 |
|
| 471 |
+
with gr.Blocks(title="π€ Hugging Face Cult Simulator", theme=gr.themes.Soft()) as interface:
|
| 472 |
|
|
|
|
| 473 |
gr.HTML("""
|
| 474 |
+
<div style="text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px; margin-bottom: 20px;">
|
| 475 |
+
<h1>π€ Hugging Face Cult Simulator</h1>
|
| 476 |
+
<h2>AI-Powered Personality Generation & Social Simulation</h2>
|
| 477 |
+
<p>Generate unique AI personalities using state-of-the-art models and watch them interact</p>
|
| 478 |
</div>
|
| 479 |
""")
|
| 480 |
|
|
|
|
| 481 |
with gr.Tabs():
|
| 482 |
|
| 483 |
+
# Tab 1: Personality Generation
|
| 484 |
+
with gr.Tab("π§ AI Personality Generation"):
|
| 485 |
+
gr.Markdown("### Generate AI Personalities Using Language Models")
|
| 486 |
|
| 487 |
with gr.Row():
|
| 488 |
+
name_input = gr.Textbox(label="Character Name (Optional)", placeholder="Leave blank for auto-generated")
|
| 489 |
+
model_selector = gr.Dropdown(
|
| 490 |
+
label="Text Generation Model",
|
| 491 |
+
choices=list(TEXT_GENERATION_MODELS.keys()),
|
| 492 |
+
value="Qwen2.5-3B-Instruct"
|
| 493 |
)
|
| 494 |
+
avatar_model_selector = gr.Dropdown(
|
| 495 |
+
label="Avatar Generation Model",
|
| 496 |
+
choices=list(IMAGE_GENERATION_MODELS.keys()),
|
| 497 |
+
value="FLUX.1-dev"
|
| 498 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 499 |
|
| 500 |
+
context_input = gr.Textbox(
|
| 501 |
+
label="Generation Context (Optional)",
|
| 502 |
+
placeholder="E.g., 'Create a friendly but mysterious character who welcomes newcomers'",
|
| 503 |
+
lines=2
|
|
|
|
|
|
|
| 504 |
)
|
| 505 |
|
| 506 |
+
generate_btn = gr.Button("π Generate AI Personality", variant="primary")
|
| 507 |
+
|
| 508 |
+
personality_output = gr.JSON(label="Generated Personality")
|
| 509 |
+
avatar_output = gr.Image(label="Generated Avatar")
|
| 510 |
+
|
| 511 |
+
# Personality display
|
| 512 |
+
personality_display = gr.Markdown(app.get_personalities_display())
|
| 513 |
+
|
| 514 |
+
# Tab 2: Conversation Simulator
|
| 515 |
+
with gr.Tab("π¬ Conversation Simulator"):
|
| 516 |
+
gr.Markdown("### Simulate Conversations Between AI Personalities")
|
| 517 |
|
| 518 |
with gr.Row():
|
| 519 |
+
conversation_model = gr.Dropdown(
|
| 520 |
+
label="Response Generation Model",
|
| 521 |
+
choices=list(TEXT_GENERATION_MODELS.keys()),
|
| 522 |
+
value="Qwen2.5-3B-Instruct"
|
| 523 |
+
)
|
| 524 |
trigger_message = gr.Textbox(
|
| 525 |
+
label="Conversation Starter",
|
| 526 |
placeholder="What should they talk about?",
|
| 527 |
lines=2
|
| 528 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 529 |
|
| 530 |
with gr.Row():
|
| 531 |
+
simulate_btn = gr.Button("π¬ Start Conversation", variant="primary")
|
| 532 |
auto_simulate_btn = gr.Button("π Auto-Simulate", variant="secondary")
|
| 533 |
+
stop_btn = gr.Button("βΉοΈ Stop", variant="stop")
|
| 534 |
|
| 535 |
+
conversation_output = gr.Textbox(
|
| 536 |
label="Conversation Log",
|
| 537 |
+
lines=15,
|
| 538 |
+
interactive=False
|
|
|
|
| 539 |
)
|
| 540 |
+
|
| 541 |
+
# Tab 3: Webhook Integration
|
| 542 |
+
with gr.Tab("π‘ Webhook Integration"):
|
| 543 |
+
gr.Markdown("### Connect AI Personalities to Discord Webhooks")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 544 |
|
| 545 |
+
with gr.Row():
|
| 546 |
+
personality_selector = gr.Dropdown(
|
| 547 |
+
label="Select Personality",
|
| 548 |
+
choices=[],
|
| 549 |
+
value=None
|
| 550 |
+
)
|
| 551 |
+
webhook_url = gr.Textbox(
|
| 552 |
+
label="Discord Webhook URL",
|
| 553 |
+
placeholder="https://discord.com/api/webhooks/...",
|
| 554 |
+
type="text"
|
| 555 |
+
)
|
| 556 |
+
channel_id = gr.Textbox(
|
| 557 |
+
label="Discord Channel ID",
|
| 558 |
+
placeholder="123456789012345678"
|
| 559 |
+
)
|
| 560 |
|
| 561 |
+
with gr.Row():
|
| 562 |
+
connect_btn = gr.Button("π Connect Webhook", variant="primary")
|
| 563 |
+
test_btn = gr.Button("π§ͺ Test Webhook", variant="secondary")
|
| 564 |
|
| 565 |
+
webhook_status = gr.Textbox(
|
| 566 |
+
label="Webhook Status",
|
| 567 |
+
lines=5,
|
| 568 |
+
interactive=False
|
| 569 |
+
)
|
| 570 |
+
|
| 571 |
+
# Tab 4: Database & Analytics
|
| 572 |
+
with gr.Tab("π Database & Analytics"):
|
| 573 |
+
gr.Markdown("### View Stored Data and Analytics")
|
| 574 |
|
| 575 |
+
with gr.Row():
|
| 576 |
+
refresh_btn = gr.Button("π Refresh Data", variant="secondary")
|
| 577 |
+
export_btn = gr.Button("π€ Export Database", variant="primary")
|
|
|
|
|
|
|
|
|
|
| 578 |
|
| 579 |
+
database_display = gr.Markdown(app.get_personalities_display())
|
|
|
|
|
|
|
| 580 |
|
| 581 |
+
analytics_display = gr.JSON(label="Analytics Data")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 582 |
|
| 583 |
+
# Event handlers
|
| 584 |
+
def generate_personality_handler(name, model_key, avatar_model, context):
|
| 585 |
+
personality = app.create_personality(name, model_key, context)
|
| 586 |
+
return personality, personality["avatar_url"], app.get_personalities_display()
|
| 587 |
|
| 588 |
+
def simulate_conversation_handler(model_key, trigger):
|
| 589 |
+
if not app.active_personalities:
|
| 590 |
+
return "β No personalities available. Create some personalities first!"
|
| 591 |
+
|
| 592 |
+
conversation = app.simulate_conversation(trigger, model_key)
|
| 593 |
+
return "\n\n".join(conversation)
|
| 594 |
+
|
| 595 |
+
def update_personalities_list():
|
| 596 |
+
personalities = app.db.get_personalities()
|
| 597 |
+
choices = [(f"{p[1]} (ID: {p[0]})", p[0]) for p in personalities]
|
| 598 |
+
return gr.Dropdown(choices=choices, value=None)
|
| 599 |
|
| 600 |
+
def connect_webhook_handler(personality_id, webhook_url, channel_id):
|
| 601 |
+
if not personality_id or not webhook_url:
|
| 602 |
+
return "β Please select a personality and provide webhook URL"
|
| 603 |
|
| 604 |
+
# In real implementation, save webhook to database
|
| 605 |
+
personality = next((p for p in app.active_personalities if p.get("id") == personality_id), None)
|
| 606 |
+
if personality:
|
| 607 |
+
personality["webhook_url"] = webhook_url
|
| 608 |
+
personality["channel_id"] = channel_id
|
| 609 |
+
return f"β
Connected {personality['name']} to webhook"
|
| 610 |
+
|
| 611 |
+
return "β Personality not found"
|
| 612 |
+
|
| 613 |
+
def test_webhook_handler(personality_id, webhook_url):
|
| 614 |
+
personality = next((p for p in app.active_personalities if p.get("id") == personality_id), None)
|
| 615 |
+
if personality and webhook_url:
|
| 616 |
+
success = app.send_webhook_message(
|
| 617 |
+
webhook_url,
|
| 618 |
+
"π§ͺ Testing webhook connection from Hugging Face Cult Simulator!",
|
| 619 |
+
personality["name"],
|
| 620 |
+
personality["avatar_url"]
|
| 621 |
+
)
|
| 622 |
+
return "β
Webhook test successful!" if success else "β Webhook test failed"
|
| 623 |
+
return "β Invalid personality or webhook URL"
|
| 624 |
+
|
| 625 |
+
def export_database_handler():
|
| 626 |
+
personalities = app.db.get_personalities()
|
| 627 |
+
export_data = {
|
| 628 |
+
"timestamp": datetime.now().isoformat(),
|
| 629 |
+
"total_personalities": len(personalities),
|
| 630 |
+
"personalities": []
|
| 631 |
+
}
|
| 632 |
+
|
| 633 |
+
for personality in personalities:
|
| 634 |
+
export_data["personalities"].append({
|
| 635 |
+
"id": personality[0],
|
| 636 |
+
"name": personality[1],
|
| 637 |
+
"type": personality[2],
|
| 638 |
+
"created_at": personality[8]
|
| 639 |
+
})
|
| 640 |
+
|
| 641 |
+
return export_data
|
| 642 |
|
| 643 |
+
# Connect event handlers
|
| 644 |
+
generate_btn.click(
|
| 645 |
+
generate_personality_handler,
|
| 646 |
+
inputs=[name_input, model_selector, avatar_model_selector, context_input],
|
| 647 |
+
outputs=[personality_output, avatar_output, personality_display]
|
| 648 |
)
|
| 649 |
|
| 650 |
simulate_btn.click(
|
| 651 |
simulate_conversation_handler,
|
| 652 |
+
inputs=[conversation_model, trigger_message],
|
| 653 |
+
outputs=[conversation_output]
|
| 654 |
)
|
| 655 |
|
| 656 |
+
connect_btn.click(
|
| 657 |
+
connect_webhook_handler,
|
| 658 |
+
inputs=[personality_selector, webhook_url, channel_id],
|
| 659 |
+
outputs=[webhook_status]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 660 |
)
|
| 661 |
|
| 662 |
+
test_btn.click(
|
| 663 |
+
test_webhook_handler,
|
| 664 |
+
inputs=[personality_selector, webhook_url],
|
| 665 |
+
outputs=[webhook_status]
|
| 666 |
)
|
| 667 |
|
| 668 |
+
refresh_btn.click(
|
| 669 |
+
app.get_personalities_display,
|
| 670 |
+
outputs=[database_display]
|
| 671 |
+
)
|
| 672 |
+
|
| 673 |
+
export_btn.click(
|
| 674 |
+
export_database_handler,
|
| 675 |
+
outputs=[analytics_display]
|
| 676 |
+
)
|
| 677 |
|
| 678 |
+
# Update personality selector when personalities are created
|
| 679 |
+
generate_btn.click(
|
| 680 |
+
update_personalities_list,
|
| 681 |
+
outputs=[personality_selector]
|
|
|
|
| 682 |
)
|
| 683 |
|
| 684 |
+
return interface
|
| 685 |
|
| 686 |
# Launch the application
|
| 687 |
if __name__ == "__main__":
|
| 688 |
+
interface = create_gradio_interface()
|
| 689 |
+
interface.launch(
|
| 690 |
server_name="0.0.0.0",
|
| 691 |
server_port=7860,
|
| 692 |
+
share=True
|
|
|
|
| 693 |
)
|