Bofandra commited on
Commit
83ff0bf
Β·
verified Β·
1 Parent(s): a258a34

Upload app (3).py

Browse files
Files changed (1) hide show
  1. app (3).py +139 -0
app (3).py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ #from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
+ from ctransformers import AutoModelForCausalLM, AutoTokenizer
5
+ import torch
6
+ import time
7
+ from datetime import datetime
8
+ import shutil
9
+
10
+ import os
11
+ folder_path = "./models" # Change the folder name if needed
12
+
13
+ if not os.path.exists(folder_path):
14
+ os.makedirs(folder_path)
15
+ print(f"βœ… Folder created: {folder_path}")
16
+ else:
17
+ print(f"πŸ“‚ Folder already exists: {folder_path}")
18
+
19
+ # Define source file and destination folder
20
+ source_file = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
21
+ destination_folder = "./models"
22
+
23
+ # Copy the file
24
+ shutil.copy(source_file, destination_folder)
25
+
26
+ source_file = "config.json"
27
+ # Copy the file
28
+ shutil.copy(source_file, destination_folder)
29
+
30
+ model_path = "./models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" # Adjust if necessary
31
+
32
+ if os.path.exists(model_path):
33
+ print(f"βœ… Model found at: {model_path}")
34
+ else:
35
+ print(f"❌ Model not found at: {model_path}")
36
+ print("πŸ” Available files:", os.listdir("./models"))
37
+
38
+ torch.random.manual_seed(0)
39
+ """
40
+ model = AutoModelForCausalLM.from_pretrained(
41
+ "microsoft/Phi-3-mini-4k-instruct",
42
+ device_map="cpu",
43
+ torch_dtype="auto",
44
+ trust_remote_code=True,
45
+ )
46
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
47
+ """
48
+
49
+ """
50
+ model_name = "microsoft/phi-2"
51
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
52
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="cpu")
53
+ """
54
+
55
+ torch.set_default_device("cpu")
56
+
57
+ print("model")
58
+ print(datetime.fromtimestamp(time.time()))
59
+
60
+ #model_name = "TheBloke/TinyLlama-1.1B-Chat-GGUF" # Change if using a different version
61
+ #model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" # Change if using a different version
62
+ model_name = "microsoft/phi-1_5" # Change if using a different version
63
+ #model = AutoModelForCausalLM.from_pretrained(destination_folder, model_type="llama")
64
+ model = AutoModelForCausalLM.from_pretrained(model_name)
65
+
66
+ print("tokenizer")
67
+ print(datetime.fromtimestamp(time.time()))
68
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
69
+
70
+ print("done setup")
71
+ print(datetime.fromtimestamp(time.time()))
72
+
73
+ def generate_letter(date, letter_time, purpose, place, sender, receiver):
74
+ prompt = (f"Tuliskan surat resmi dengan detail sebagai berikut:\n"
75
+ f"Tanggal: {date}\nWaktu: {letter_time}\nTujuan: {purpose}\nTempat: {place}\n"
76
+ f"Pengirim: {sender}\nPenerima: {receiver}\n\nSurat:")
77
+ """prompt = (f"Write a formal letter with the following details:\n"
78
+ f"Date: {date}\nTime: {letter_time}\nPurpose: {purpose}\nPlace: {place}\n"
79
+ f"Sender: {sender}\nReceiver: {receiver}\n\nLetter:")"""
80
+ """
81
+ messages = [
82
+ {"role": "system", "content": "You are a helpful AI assistant."},
83
+ {"role": "user", "content": f"{prompt}"},
84
+ ]
85
+
86
+ pipe = pipeline(
87
+ "text-generation",
88
+ model=model,
89
+ tokenizer=tokenizer,
90
+ )
91
+
92
+ generation_args = {
93
+ "max_new_tokens": 500,
94
+ "return_full_text": False,
95
+ "temperature": 0.0,
96
+ "do_sample": False,
97
+ }
98
+
99
+ output = pipe(messages, **generation_args)
100
+ return output[0]['generated_text']
101
+ """
102
+
103
+ print("tokenizer")
104
+ print(datetime.fromtimestamp(time.time()))
105
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
106
+
107
+ print("model.generate")
108
+ print(datetime.fromtimestamp(time.time()))
109
+ output = model.generate(**inputs, max_length=100, temperature=0.7, top_p=0.9)
110
+
111
+ print("return")
112
+ print(datetime.fromtimestamp(time.time()))
113
+ return tokenizer.decode(output[0], skip_special_tokens=True)
114
+ """response = model(prompt)
115
+ return response"""
116
+
117
+ # Create the Gradio interface
118
+ iface = gr.Interface(
119
+ fn=generate_letter,
120
+ inputs=[
121
+ gr.Textbox(label="Tanggal"),
122
+ gr.Textbox(label="Waktu"),
123
+ gr.Textbox(label="Tujuan"),
124
+ gr.Textbox(label="Tempat"),
125
+ gr.Textbox(label="Pengirim"),
126
+ gr.Textbox(label="Penerima"),
127
+ ],
128
+ #outputs=gr.Textbox(label="Generated Letter"),
129
+ outputs=gr.Textbox(label="Surat yang dibuat secara otomatis"),
130
+ #title="Letter Generator",
131
+ title="Generator Surat",
132
+ #description="Enter the details and generate a formal letter automatically."
133
+ description="Tuliskan detail informasi surat yang ingin dibuat."
134
+ )
135
+
136
+ # Launch the app
137
+ if __name__ == "__main__":
138
+ iface.launch()
139
+