jacksonstrut commited on
Commit
a3a6f99
·
verified ·
1 Parent(s): 8dc44dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -11
app.py CHANGED
@@ -30,15 +30,9 @@ if missing_vars:
30
  raise ValueError(f"Missing environment variables: {', '.join(missing_vars)}")
31
 
32
  # Replace with your actual model name
33
- model_name = "jacksonstrut/tinyllama-1.1B-chat" # Update this with your model's name
34
-
35
- from transformers import AutoTokenizer
36
-
37
  model_name = "jacksonstrut/tinyllama-1.1B-chat"
38
- HUGGINGFACE_API_TOKEN = os.getenv('HUGGINGFACE_API_TOKEN')
39
 
40
  # Disable tokenizer parallelism
41
- import os
42
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
43
 
44
  # Load the tokenizer with use_fast=False
@@ -51,7 +45,7 @@ tokenizer = AutoTokenizer.from_pretrained(
51
  # Ensure pad_token is set
52
  if tokenizer.pad_token is None:
53
  tokenizer.pad_token = tokenizer.eos_token
54
- )
55
  config = AutoConfig.from_pretrained(model_name)
56
  model = AutoModelForCausalLM.from_pretrained(
57
  model_name,
@@ -60,8 +54,93 @@ model = AutoModelForCausalLM.from_pretrained(
60
  )
61
  model.to('cpu')
62
 
63
- # Ensure pad_token is set
64
- if tokenizer.pad_token is None:
65
- tokenizer.pad_token = tokenizer.eos_token
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
- # ... rest of your code ...
 
 
 
 
30
  raise ValueError(f"Missing environment variables: {', '.join(missing_vars)}")
31
 
32
  # Replace with your actual model name
 
 
 
 
33
  model_name = "jacksonstrut/tinyllama-1.1B-chat"
 
34
 
35
  # Disable tokenizer parallelism
 
36
  os.environ["TOKENIZERS_PARALLELISM"] = "false"
37
 
38
  # Load the tokenizer with use_fast=False
 
45
  # Ensure pad_token is set
46
  if tokenizer.pad_token is None:
47
  tokenizer.pad_token = tokenizer.eos_token
48
+
49
  config = AutoConfig.from_pretrained(model_name)
50
  model = AutoModelForCausalLM.from_pretrained(
51
  model_name,
 
54
  )
55
  model.to('cpu')
56
 
57
+ # List of house music hooks to drop randomly
58
+ HOUSE_MUSIC_HOOKS = [
59
+ # ... your list of hooks ...
60
+ ]
61
+
62
+ # Initialize chat history for users
63
+ chat_histories = {}
64
+
65
+ async def generate_response(user_id, user_message):
66
+ """Generates a response using the model."""
67
+ try:
68
+ # Retrieve or initialize the chat history for the user
69
+ if user_id in chat_histories:
70
+ chat_history_ids = chat_histories[user_id]
71
+ else:
72
+ chat_history_ids = None
73
+
74
+ # Encode the user message and append the EOS token
75
+ new_user_input_ids = tokenizer.encode(user_message + tokenizer.eos_token, return_tensors='pt').to('cpu')
76
+
77
+ # Concatenate new user input with chat history (if it exists)
78
+ if chat_history_ids is not None:
79
+ bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1)
80
+ else:
81
+ bot_input_ids = new_user_input_ids
82
+
83
+ # Generate a response
84
+ output_ids = model.generate(
85
+ bot_input_ids,
86
+ max_length=bot_input_ids.shape[-1] + MAX_TOKENS,
87
+ temperature=TEMPERATURE,
88
+ do_sample=True,
89
+ top_p=0.95,
90
+ top_k=50,
91
+ pad_token_id=tokenizer.pad_token_id,
92
+ no_repeat_ngram_size=3,
93
+ )
94
+
95
+ # Extract the new response
96
+ response_ids = output_ids[:, bot_input_ids.shape[-1]:]
97
+ response_text = tokenizer.decode(response_ids[0], skip_special_tokens=True)
98
+
99
+ # Update the chat history
100
+ chat_histories[user_id] = output_ids[:, -1000:] # Keep last 1000 tokens to limit history size
101
+
102
+ # Randomly include a house music hook (30% chance)
103
+ if random.random() < 0.3:
104
+ response_text = f"{random.choice(HOUSE_MUSIC_HOOKS)} {response_text}"
105
+
106
+ logger.info(f"Generated response: {response_text}")
107
+ return response_text
108
+
109
+ except Exception as e:
110
+ logger.error(f"Error generating response: {e}")
111
+ return "Sorry, I'm too hyped to respond right now!"
112
+
113
+ # Create a Twitch chatbot using TwitchIO
114
+ class TwitchChatBot(commands.Bot):
115
+
116
+ def __init__(self):
117
+ super().__init__(
118
+ token=TWITCH_OAUTH_TOKEN,
119
+ nick=TWITCH_BOT_USERNAME,
120
+ prefix='!',
121
+ initial_channels=[TWITCH_CHANNEL_NAME]
122
+ )
123
+
124
+ async def event_ready(self):
125
+ """Event handler when the bot is connected and ready."""
126
+ logger.info(f"Logged in as | {self.nick}")
127
+ logger.info(f"Connected to channel | {TWITCH_CHANNEL_NAME}")
128
+
129
+ async def event_message(self, message):
130
+ """Event handler when a message is received in chat."""
131
+ # Ignore messages sent by the bot itself
132
+ if message.echo:
133
+ return
134
+
135
+ logger.info(f"Message received from {message.author.name}: {message.content}")
136
+
137
+ # Generate a response
138
+ response = await generate_response(message.author.id, message.content)
139
+
140
+ # Send the response back to the Twitch chat
141
+ await message.channel.send(f"@{message.author.name} {response}")
142
 
143
+ # Initialize and run the bot
144
+ if __name__ == "__main__":
145
+ bot = TwitchChatBot()
146
+ bot.run()