Loguie commited on
Commit
8722a33
·
verified ·
1 Parent(s): e9aaaaa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -44
app.py CHANGED
@@ -4,55 +4,36 @@ import torch
4
  import requests
5
  from PIL import Image
6
  from io import BytesIO
 
 
 
 
 
 
7
 
8
- fashion_items = ['top', 'trousers', 'jumper']
 
9
 
10
- # Load model and processor
11
- model_name = 'Marqo/marqo-fashionSigLIP'
12
- model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
13
- processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
 
14
 
15
- # Preprocess and normalize text data
16
- with torch.no_grad():
17
- # Ensure truncation and padding are activated
18
- processed_texts = processor(
19
- text=fashion_items,
20
- return_tensors="pt",
21
- truncation=True, # Ensure text is truncated to fit model input size
22
- padding=True # Pad shorter sequences so that all are the same length
23
- )['input_ids']
24
-
25
- text_features = model.get_text_features(processed_texts)
26
- text_features = text_features / text_features.norm(dim=-1, keepdim=True)
27
 
28
- # Prediction function
29
- def predict_from_url(url):
30
- # Check if the URL is empty
31
- if not url:
32
- return {"Error": "Please input a URL"}
33
-
34
- try:
35
- image = Image.open(BytesIO(requests.get(url).content))
36
- except Exception as e:
37
- return {"Error": f"Failed to load image: {str(e)}"}
38
-
39
- processed_image = processor(images=image, return_tensors="pt")['pixel_values']
40
-
41
- with torch.no_grad():
42
- image_features = model.get_image_features(processed_image)
43
- image_features = image_features / image_features.norm(dim=-1, keepdim=True)
44
- text_probs = (100 * image_features @ text_features.T).softmax(dim=-1)
45
-
46
- return {fashion_items[i]: float(text_probs[0, i]) for i in range(len(fashion_items))}
47
 
48
- # Gradio interface
49
- demo = gr.Interface(
50
- fn=predict_from_url,
51
- inputs=gr.Textbox(label="Enter Image URL"),
52
- outputs=gr.Label(label="Classification Results"),
53
- title="Fashion Item Classifier",
54
- allow_flagging="never"
55
- )
56
 
57
  # Launch the interface
58
  demo.launch()
 
4
  import requests
5
  from PIL import Image
6
  from io import BytesIO
7
+ # Import necessary libraries
8
+ import torch
9
+ from transformers import pipeline
10
+
11
+ # Check if CUDA (GPU support) is available, for faster processing if you have a GPU
12
+ device = 0 if torch.cuda.is_available() else -1
13
 
14
+ # Load the BART model for summarization using Hugging Face's pipeline
15
+ summarizer = pipeline("summarization", model="facebook/bart-large-cnn", device=device)
16
 
17
+ # Function to summarize a given text
18
+ def summarize_text(input_text):
19
+ # Get the summary from the model
20
+ summary = summarizer(input_text, max_length=150, min_length=50, do_sample=False)
21
+ return summary[0]['summary_text']
22
 
23
+ # Example text to summarize
24
+ text_to_summarize = """
25
+ The Hugging Face team has developed several transformers architectures that excel in a variety of NLP tasks.
26
+ These architectures are pre-trained on large text corpora and can be fine-tuned for specific tasks like text classification, named entity recognition, and question answering.
27
+ The transformers library provides easy-to-use tools for downloading pre-trained models and fine-tuning them, making it simple for developers and researchers to apply state-of-the-art NLP methods to their own tasks.
28
+ Hugging Face has made it easier for people to contribute to the world of NLP research by providing open-source libraries that are well-documented and easy to use.
29
+ """
 
 
 
 
 
30
 
31
+ # Calling the function to summarize the text
32
+ summary = summarize_text(text_to_summarize)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
+ # Print the summarized text
35
+ print("Original Text: \n", text_to_summarize)
36
+ print("\nSummary: \n", summary)
 
 
 
 
 
37
 
38
  # Launch the interface
39
  demo.launch()