ejschwartz commited on
Commit
fbe1b1f
·
1 Parent(s): 1a488de

replace app

Browse files
Files changed (1) hide show
  1. main.py +17 -21
main.py CHANGED
@@ -1,30 +1,26 @@
 
1
  import gradio as gr
2
  import torch
3
- import requests
4
- from torchvision import transforms
5
 
6
- model = torch.hub.load("pytorch/vision:v0.6.0", "resnet18", pretrained=True).eval()
7
- response = requests.get("https://git.io/JJkYN")
8
- labels = response.text.split("\n")
9
 
 
 
 
 
10
 
11
- def predict(inp):
12
- inp = transforms.ToTensor()(inp).unsqueeze(0)
13
- with torch.no_grad():
14
- prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
15
- confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
16
- return confidences
17
 
18
 
19
- def run():
20
- demo = gr.Interface(
21
- fn=predict,
22
- inputs=gr.Image(type="pil"),
23
- outputs=gr.Label(num_top_classes=3),
24
- )
25
 
26
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
27
 
28
-
29
- if __name__ == "__main__":
30
- run()
 
1
+ from transformers import pipeline
2
  import gradio as gr
3
  import torch
4
+ import logging
 
5
 
6
+ logging.basicConfig(level=logging.INFO)
7
+ logging.info(f"CUDA available: {torch.cuda.is_available()}, CUDA version: {torch.version.cuda}")
 
8
 
9
+ def decomp_create_prompt(input_data: str) -> str:
10
+ before = "# This is the decompiled code:\n"
11
+ after = "\n# What is the source code?\n"
12
+ prompt = before + input_data.strip() + after
13
 
14
+ return prompt
 
 
 
 
 
15
 
16
 
17
+ pipe = pipeline(model="ejschwartz/decaf-v1-22b-4bit", return_full_text=False)
18
+ pipe.model.to("cuda")
 
 
 
 
19
 
20
+ def generate(text):
21
+ print(f"Generating text... {text}")
22
+ prompt = decomp_create_prompt(text)
23
+ return pipe(prompt, max_new_tokens=2000)[0]['generated_text']
24
 
25
+ demo = gr.Interface(fn=generate, inputs="text", outputs="text")
26
+ demo.launch()