File size: 2,000 Bytes
ab7670e
 
a4a291f
 
ab7670e
 
 
 
 
 
 
 
 
 
 
 
 
6496004
 
 
 
 
 
 
 
 
 
 
 
 
 
ab7670e
2dc5a8b
6496004
772cf77
 
 
 
ab7670e
 
 
772cf77
6496004
2dc5a8b
10beea5
772cf77
2dc5a8b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
import gradio as gr

## Download the GGUF model
model_name = "cris177/Qwen2-Simple-Arguments"
model_file = "Qwen2_arguments.Q4_K_M.gguf" # this is the specific model file we'll use in this example. It's a 4-bit quant, but other levels of quantization are available in the model repo if preferred
model_path = hf_hub_download(model_name, filename=model_file)

## Instantiate model from downloaded file
llm = Llama(
    model_path=model_path,
    n_ctx=2000,  # Context length to use
    n_threads=2,            # Number of CPU threads to use
    n_gpu_layers=0        # Number of model layers to offload to GPU
)


def analyze_argument(argument):
    instruction = 'Based on the following argument, identify the following elements: premises, conclusion, propositions, type of argument, negation of propositions and validity.'
    alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.

    ### Instruction:
    {}

    ### Input:
    {}

    ### Response:"""
    prompt = alpaca_prompt.format(instruction, argument)

    output = llm(prompt, max_tokens=1000)['choices'][0]['text'].strip()
    return output

description = """This tool analyzes simple arguments, that is, arguments composed of at most two propositions.

It applies the fine-tuned LLM from https://huggingface.co/cris177/Qwen2-Simple-Arguments

For faster inference we use the 4-bit quantization model https://huggingface.co/cris177/Qwen2-Simple-Arguments/resolve/main/Qwen2_arguments.Q4_K_M.gguf.

It requires only 3 GB of RAM, and runs on just 2 vCPUs (which causes it to run somewhat slowly in this demo).
"""

gr.Interface(analyze_argument, inputs="text", outputs="text",
    title="Simple Arguments Analyzer",
    description=description,
    examples=[["If it's wednesday it's cold, and it's cold, therefore it's wednesday."]]).launch()