ejschwartz commited on
Commit
5a8b10a
·
1 Parent(s): 0e77531
Files changed (1) hide show
  1. app.py +12 -13
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import spaces
2
- from transformers import pipeline, BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer
3
  import gradio as gr
4
  import torch
5
  import logging
@@ -12,24 +12,23 @@ try:
12
  except Exception as e:
13
  logging.warning(f"Could not import bitsandbytes: {e}")
14
 
15
- bnb_config = BitsAndBytesConfig(
16
- load_in_4bit=True,
17
- bnb_4bit_compute_dtype=torch.float16, # key
18
- )
19
 
20
- tokenizer = AutoTokenizer.from_pretrained("ejschwartz/decaf-v1-22b-4bit")
21
- model = AutoModelForCausalLM.from_pretrained(
22
- "ejschwartz/decaf-v1-22b-4bit",
23
- device_map="auto",
24
- quantization_config=bnb_config,
25
- )
26
 
27
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
 
 
 
28
 
29
  @spaces.GPU(size="xlarge")
30
  def generate(text):
31
  print(f"Generating text... {text}")
32
- return pipe(text)[0]
 
33
 
34
  demo = gr.Interface(fn=generate, inputs="text", outputs="text")
35
  demo.launch()
 
1
  import spaces
2
+ from transformers import pipeline
3
  import gradio as gr
4
  import torch
5
  import logging
 
12
  except Exception as e:
13
  logging.warning(f"Could not import bitsandbytes: {e}")
14
 
 
 
 
 
15
 
16
+ def decomp_create_prompt(input_data: str) -> str:
17
+ before = "# This is the decompiled code:\n"
18
+ after = "\n# What is the source code?\n"
19
+ prompt = before + input_data.strip() + after
 
 
20
 
21
+ return prompt
22
+
23
+
24
+ pipe = pipeline(model="ejschwartz/decaf-v1-22b-4bit")
25
+ pipe.model.to("cuda")
26
 
27
  @spaces.GPU(size="xlarge")
28
  def generate(text):
29
  print(f"Generating text... {text}")
30
+ prompt = decomp_create_prompt(text)
31
+ return pipe(prompt)[0]['generated_text']
32
 
33
  demo = gr.Interface(fn=generate, inputs="text", outputs="text")
34
  demo.launch()