Update app.py
Browse files
app.py
CHANGED
|
@@ -13,8 +13,11 @@ class Z(object):
|
|
| 13 |
prompt0 = txt0
|
| 14 |
|
| 15 |
# for Wizard-Vicuna-13B
|
| 16 |
-
prompt00 = f'''USER: {prompt0}
|
| 17 |
-
ASSISTANT:'''
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
prompt00 = f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
| 20 |
|
|
@@ -31,15 +34,25 @@ from ctransformers import AutoModelForCausalLM
|
|
| 31 |
|
| 32 |
# wizzard vicuna
|
| 33 |
# see https://github.com/melodysdreamj/WizardVicunaLM
|
| 34 |
-
llm = AutoModelForCausalLM.from_pretrained('TheBloke/Wizard-Vicuna-13B-Uncensored-GGML', model_file='Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin', model_type='llama')
|
| 35 |
|
| 36 |
#llm = AutoModelForCausalLM.from_pretrained('mverrilli/dolly-v2-12b-ggml', model_file='ggml-model-q5_0.bin', model_type='dolly-v2')
|
| 37 |
|
| 38 |
#llm = AutoModelForCausalLM.from_pretrained('mverrilli/dolly-v2-7b-ggml', model_file='ggml-model-q5_0.bin', model_type='dolly-v2')
|
| 39 |
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
z = Z()
|
| 42 |
z.llm = llm
|
|
|
|
| 43 |
z.init()
|
| 44 |
|
| 45 |
def greet(prompt, temperature):
|
|
|
|
| 13 |
prompt0 = txt0
|
| 14 |
|
| 15 |
# for Wizard-Vicuna-13B
|
| 16 |
+
#prompt00 = f'''USER: {prompt0}
|
| 17 |
+
#ASSISTANT:'''
|
| 18 |
+
|
| 19 |
+
# for starcoder
|
| 20 |
+
prompt00 = f'''{prompt0}'''
|
| 21 |
|
| 22 |
prompt00 = f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
| 23 |
|
|
|
|
| 34 |
|
| 35 |
# wizzard vicuna
|
| 36 |
# see https://github.com/melodysdreamj/WizardVicunaLM
|
| 37 |
+
#llm = AutoModelForCausalLM.from_pretrained('TheBloke/Wizard-Vicuna-13B-Uncensored-GGML', model_file='Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin', model_type='llama')
|
| 38 |
|
| 39 |
#llm = AutoModelForCausalLM.from_pretrained('mverrilli/dolly-v2-12b-ggml', model_file='ggml-model-q5_0.bin', model_type='dolly-v2')
|
| 40 |
|
| 41 |
#llm = AutoModelForCausalLM.from_pretrained('mverrilli/dolly-v2-7b-ggml', model_file='ggml-model-q5_0.bin', model_type='dolly-v2')
|
| 42 |
|
| 43 |
|
| 44 |
+
|
| 45 |
+
# non-RLHF model
|
| 46 |
+
# 4 may 2023
|
| 47 |
+
# site https://huggingface.co/bigcode/starcoder
|
| 48 |
+
modelInfo = {'path':'NeoDim/starcoder-GGML', 'subPath':'starcoder-ggml-q8_0.bin', 'promptType':'raw', 'modelType':'starcoder'}
|
| 49 |
+
llm = AutoModelForCausalLM.from_pretrained(modelInfo['path'], model_file=modelInfo['subPath'], model_type=modelInfo['modelType'])
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
z = Z()
|
| 54 |
z.llm = llm
|
| 55 |
+
z.modelInfo = modelInfo
|
| 56 |
z.init()
|
| 57 |
|
| 58 |
def greet(prompt, temperature):
|