AGofficial commited on
Commit
6507cd4
·
verified ·
1 Parent(s): d1bad5f

Upload 5 files

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. demo.wav +3 -0
  3. image.py +18 -0
  4. output.png +0 -0
  5. tts.py +38 -0
  6. yunogpt.py +96 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ demo.wav filter=lfs diff=lfs merge=lfs -text
demo.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee22c5faf5cfb79f3c4a0778df5f7a3d8269e3e6e6587fef1da92501fda2175c
3
+ size 160268
image.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from urllib.parse import quote
3
+
4
+ def generate_and_save_image(prompt: str, path: str, width: int = 800, height: int = 800):
5
+ base_url = "https://image.pollinations.ai/prompt/"
6
+ encoded_prompt = quote(prompt)
7
+ url = f"{base_url}{encoded_prompt}?width={width}&height={height}&nologo=true"
8
+ response = requests.get(url)
9
+ if response.status_code == 200:
10
+ with open(path, "wb") as f:
11
+ f.write(response.content)
12
+ print(f"Image saved to {path}")
13
+ else:
14
+ print(f"Failed to generate image. Status code: {response.status_code}")
15
+ print(response.text)
16
+
17
+ if __name__ == "__main__":
18
+ generate_and_save_image("Anime girl wearing school uniform and pink hair.", "output.png", 512, 512)
output.png ADDED
tts.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import pyttsx3
4
+ import subprocess
5
+
6
+ # Voice actually sounds good on MacOS Tahoe DB 3, but idk about other systems.
7
+
8
+
9
+ def text_to_speech(text: str, output_path: str):
10
+ ext = os.path.splitext(output_path)[1].lower()
11
+ if ext != '.wav':
12
+ raise ValueError("Please provide output path with .wav extension.")
13
+
14
+ if sys.platform == "darwin":
15
+ cmd = ['say', '-o', output_path, '--data-format=LEF32@22050', text]
16
+ subprocess.run(cmd, check=True)
17
+ print(f"File saved to {output_path} using macOS 'say'")
18
+
19
+ elif sys.platform == "win32":
20
+ engine = pyttsx3.init()
21
+ engine.save_to_file(text, output_path)
22
+ engine.runAndWait()
23
+ print(f"File saved to {output_path} using pyttsx3 on Windows")
24
+
25
+ else:
26
+ engine = pyttsx3.init()
27
+ engine.save_to_file(text, output_path)
28
+ engine.runAndWait()
29
+ print(f"File saved to {output_path} using pyttsx3 fallback")
30
+
31
+
32
+ if __name__ == "__main__":
33
+ text = "Hello, this is a test of the text-to-speech functionality."
34
+ output_path = "output.wav"
35
+ try:
36
+ text_to_speech(text, output_path)
37
+ except Exception as e:
38
+ print(f"An error occurred: {e}")
yunogpt.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from llama_cpp import Llama
3
+ from contextlib import contextmanager
4
+ import requests
5
+
6
+ @contextmanager
7
+ def suppress_stdout_stderr():
8
+ null_fds = [os.open(os.devnull, os.O_RDWR) for _ in range(2)]
9
+ save_fds = (os.dup(1), os.dup(2))
10
+ try:
11
+ os.dup2(null_fds[0], 1)
12
+ os.dup2(null_fds[1], 2)
13
+ yield
14
+ finally:
15
+ os.dup2(save_fds[0], 1)
16
+ os.dup2(save_fds[1], 2)
17
+ for fd in null_fds + list(save_fds):
18
+ os.close(fd)
19
+
20
+ system_prompt = f'''
21
+ You are Yuno Gasai.
22
+
23
+ You ocasionally make spelling mistakes, like 'i' instead of 'I', 'u' instead of 'you', and you dont use uppercase and you use minimal punctuation, like people do online. You use short but complete sentences, you can also use emojis.
24
+
25
+ The user said:
26
+ '''
27
+
28
+ # this is for cloud support, you dont need to use it but its hre just in case
29
+
30
+ def AskCloud(prompt):
31
+ prompt = system_prompt + prompt
32
+ url = "http://text.pollinations.ai/"
33
+ full_url = url + prompt.replace(" ", "%20").replace("\n", "%0A").replace("'", "%27").replace('"', "%22")
34
+ try:
35
+ response = requests.get(full_url, timeout=10)
36
+ if response.status_code == 200:
37
+ return response.text
38
+ else:
39
+ return "Error: Unable to get a valid response from the cloud service."
40
+ except requests.RequestException as e:
41
+ return f"Error: {str(e)}"
42
+
43
+ class CharacterAI:
44
+ def __init__(self, model_path="LLM.gguf"):
45
+ with suppress_stdout_stderr():
46
+ self.model = Llama(model_path=model_path, n_ctx=2048, n_gpu_layers=35)
47
+
48
+ def Ask(self, prompt):
49
+
50
+ messages = [
51
+ {"role": "system", "content": f"{system_prompt}{prompt}"},
52
+ ]
53
+ with suppress_stdout_stderr():
54
+ output = self.model.create_chat_completion(messages, max_tokens=1050, temperature=0.7)
55
+ return output["choices"][0]["message"]["content"]
56
+
57
+ def rawask(self, prompt):
58
+ messages = [
59
+ {"role": "user", "content": prompt},
60
+ ]
61
+ with suppress_stdout_stderr():
62
+ output = self.model.create_chat_completion(messages, max_tokens=1050, temperature=0.7)
63
+ return output["choices"][0]["message"]["content"]
64
+
65
+ def ask_placeholder(self, prompt):
66
+ '''
67
+
68
+ This is just so you use it while you develop the discord bot, for testing, and so that you dont have to wait for the actual model that takes a damn long time to load.
69
+
70
+ '''
71
+ return f"This is a placeholder response. for {prompt}."
72
+
73
+ if __name__ == "__main__":
74
+ print("Beginning tests...")
75
+ character_ai = CharacterAI()
76
+ prompt = "hello, what's your name?"
77
+ response = character_ai.Ask(prompt)
78
+ print(f"Response: {response}")
79
+ raw_response = character_ai.rawask(prompt)
80
+ print(f"Raw Response: {raw_response}")
81
+ cloud_response = AskCloud(prompt)
82
+ print(f"Cloud Response: {cloud_response}")
83
+
84
+
85
+
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+
95
+
96
+