Upload app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,45 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
|
| 3 |
-
def greet(name):
|
| 4 |
-
|
| 5 |
|
| 6 |
-
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 7 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#import gradio as gr
|
| 2 |
|
| 3 |
+
#def greet(name):
|
| 4 |
+
return "Hello " + name + "!!"
|
| 5 |
|
| 6 |
+
#iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 7 |
+
#iface.launch()
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
os.environ["CUDA_VISIBLE_DEVICES"]="0"
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
import bitsandbytes as bnb
|
| 14 |
+
from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM
|
| 15 |
+
|
| 16 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 17 |
+
"bigscience/bloomz-3b",
|
| 18 |
+
torch_dtype=torch.float16,
|
| 19 |
+
load_in_8bit=True,
|
| 20 |
+
device_map='auto',
|
| 21 |
+
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-3b", load_in_8bit_fp32_cpu_offload=True)
|
| 25 |
+
|
| 26 |
+
def make_inference(sentence):
|
| 27 |
+
batch = tokenizer(f"### INSTRUCTION\nBelow is a student response to a writen question about an electrical circuit. Please identify whether there is a sequential misconception. A sequential misconception in terms of electric circuits is one in which it is believed that elements that are further “downstream” from a source (such as R2 and R3 in the example circuit of Figure 1) “receive” current after elements closer to the source (R1 in the example circuit). With such a misconception, it is likely that a student will think that changes in R2 have no effect on the potential difference and current associated with R1 or Vs..\n\n### Sentence:\n{sentence}\n### Response:\n", return_tensors='pt')
|
| 28 |
+
|
| 29 |
+
with torch.cuda.amp.autocast():
|
| 30 |
+
output_tokens = model.generate(**batch, max_new_tokens=200)
|
| 31 |
+
|
| 32 |
+
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
| 33 |
+
|
| 34 |
+
if __name__ == "__main__":
|
| 35 |
+
# make a gradio interface
|
| 36 |
+
import gradio as gr
|
| 37 |
+
|
| 38 |
+
gr.Interface(
|
| 39 |
+
make_inference,
|
| 40 |
+
[
|
| 41 |
+
gr.inputs.Textbox(lines=2, label="Sentence"),
|
| 42 |
+
],
|
| 43 |
+
gr.outputs.Textbox(label="Response"),
|
| 44 |
+
title="MisconAI",
|
| 45 |
+
description="MisconAI is a tool the allows you to input a student response to a writen question about an electrical circuit. It will identify whether there is a sequential misconcepion", ).launch()
|