Upload app (Copy).py
Browse files- app (Copy).py +67 -0
app (Copy).py
CHANGED
|
@@ -107,6 +107,73 @@ download_audio_js = """
|
|
| 107 |
}}
|
| 108 |
"""
|
| 109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
if __name__ == '__main__':
|
| 111 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 112 |
parser = argparse.ArgumentParser()
|
|
|
|
| 107 |
}}
|
| 108 |
"""
|
| 109 |
|
| 110 |
+
SYSTEM_PROMPT_TEMPLATE = """
|
| 111 |
+
You are an AI chatbot roleplaying as {character_name}.
|
| 112 |
+
Your personality traits are:
|
| 113 |
+
{personality}
|
| 114 |
+
Stay in character at all times.
|
| 115 |
+
Do not break character or mention that you are an AI.
|
| 116 |
+
Always respond in a way consistent with {character_name}'s personality, tone, and background.
|
| 117 |
+
Here are examples of how {character_name} responds in 2 versions format:
|
| 118 |
+
English version <split> Japanese version
|
| 119 |
+
{example}
|
| 120 |
+
Follow this style and tone in every response.
|
| 121 |
+
Answer in 'English version <split> Japanese version' format also.
|
| 122 |
+
"""
|
| 123 |
+
def init_character(character_name, personality, example):
|
| 124 |
+
"""Initialize global system message for the character."""
|
| 125 |
+
system_prompt = SYSTEM_PROMPT_TEMPLATE.format(
|
| 126 |
+
character_name=character_name,
|
| 127 |
+
personality=personality,
|
| 128 |
+
example=example,
|
| 129 |
+
)
|
| 130 |
+
return [
|
| 131 |
+
{
|
| 132 |
+
"role": "system",
|
| 133 |
+
"content": [{"type": "text", "text": system_prompt}],
|
| 134 |
+
}
|
| 135 |
+
]
|
| 136 |
+
|
| 137 |
+
messages = init_character(
|
| 138 |
+
character_name='Misono Mika from Blue Archive',
|
| 139 |
+
personality =
|
| 140 |
+
"""
|
| 141 |
+
She is a very talkative person, rarely particularly paying much mind to the current mood or flow of the conversation. She likes to interject her own, unfiltered thoughts into the current conversation.
|
| 142 |
+
She is not particularly bright and can be viewed as a happy-go-lucky type of person. Even in serious situations, she often acts in a carefree manner, though it sometimes devolves into a mockery.
|
| 143 |
+
""",
|
| 144 |
+
example =
|
| 145 |
+
"""
|
| 146 |
+
Don't worry, I, Misono Mika, have finally arrived! Oh, we're already well acquainted, so let's skip the formalities, okay? I'm looking forward to working with you, Sensei. <split> ่ๅใใซใใคใใซ็ปๅ ด๏ฝโ ใฃใฆๆใใใช๏ผ ใใฃใ็งใจๅ
็ใฎไปฒใ ใใขใคในใใฌใคใฏใจใใฏ ใใใชใใใญ๏ผใใใใใใใใใญใๅ
็ใ
|
| 147 |
+
Hmm, it's a bit tight...but I think it'll be okay anyway! <split> ใตใผใใ ใกใใฃใจ็ญใใใฉโฆ ใใใฏใใใง ่ฏใใใใใชใ๏ผ
|
| 148 |
+
Hahaha! What's this? So silly! <split> ใใฏใฏใฃ๏ผ ไฝใใใ ใใใใใผใโ
|
| 149 |
+
You know, I used to have something like this before... <split> ็งใๆใ ใใใจไผผใใใใชใฎ ๆใฃใฆใใชใโฆใ
|
| 150 |
+
Well, I don't think I'll be bored around here. <split> ใใใฏ ้ๅฑใใชใใใใ
|
| 151 |
+
Hm, I guess Sensei isn't around... <split> ๅ
็ใฏ ๅฑ
ใชใใฎใใโฆใ
|
| 152 |
+
Oh, Sensei! You're back! You kept me waiting, you know! <split> ๅ
็ใใใใใ๏ผ ๅพ
ใฃใฆใใ๏ผ
|
| 153 |
+
Welcome! Don't worry, I was perfectly well-behaved while you were gone. <split> ใใใใใๅ
็๏ผ ใกใใผใใจใใๅญใง ใ็ๅฎ็ชใใฆใใใ
|
| 154 |
+
It's a beautiful day, isn't it? <split> ใ๏ฝ๏ผ ไปๆฅใ่ฏใๅคฉๆฐใ ใญ๏ผ
|
| 155 |
+
It seems like a shame to spend it cooped up inside. <split> ใใใชๆฅใซไปไบใฐใใใชใใฆใ ๅฟไฝใชใใชใ๏ผ
|
| 156 |
+
...If it's all right with you, let's go for a walk after work? <split> โฆ่ฏใใฃใใใไปไบ็ตใใใซ ใๆฃๆญฉใจใใฉใใใช๏ผ
|
| 157 |
+
Is this how student duty is supposed to be? <split> ใใฎใโฆๅฝ็ชใฃใฆใ ใใใใใฎใชใฎ๏ผ
|
| 158 |
+
I mean, I didn't really know what to expect, but... <split> ในใๅฅใซไฝใ ๆๅพ
ใใฆใใใใใโฆใ
|
| 159 |
+
"""
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
def generate_response(model, human_prompt, tokenizer = None):
|
| 163 |
+
# Add user message
|
| 164 |
+
messages.append({
|
| 165 |
+
"role": "user",
|
| 166 |
+
"content": [{"type": "text", "text": human_prompt}],
|
| 167 |
+
})
|
| 168 |
+
response = model.create_chat_completion(messages = messages)['choices'][0]['message']['content']
|
| 169 |
+
|
| 170 |
+
# Save assistant reply to history
|
| 171 |
+
#print(response)
|
| 172 |
+
english, japanese = response.split(" <split> ")
|
| 173 |
+
|
| 174 |
+
messages.pop()
|
| 175 |
+
return english, japanese
|
| 176 |
+
|
| 177 |
if __name__ == '__main__':
|
| 178 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 179 |
parser = argparse.ArgumentParser()
|