id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
168,277 | import json
import os
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Type, TypeVar, Union
from huggingface_hub import ModelHubMixin, hf_hub_download
The provided code snippet includes necessary dependencies for implementing the `prepare_dialogue` function. Write a Python function `def prepare_dialogue(example, dialogue_template, is_train=True)` to solve the following problem:
Format example to single- or multi-turn dialogue.
Here is the function:
def prepare_dialogue(example, dialogue_template, is_train=True):
"""Format example to single- or multi-turn dialogue."""
# TODO: make this simpler by just ensuring every dataset has a messages column
if "messages" in example.keys() and example["messages"] is not None:
dialogue_template.messages = example["messages"]
elif all(k in example.keys() for k in ("prompt", "completion")):
# Construct single-turn dialogue from prompt and completion
dialogue_template.messages = [
{"role": "user", "content": example["prompt"]},
{"role": "assistant", "content": example["completion"]},
]
elif "prompt" in example.keys():
# Construct single-turn dialogue from prompt (inference only)
dialogue_template.messages = [
{"role": "user", "content": example["prompt"]},
]
else:
raise ValueError(
f"Could not format example as dialogue! Require either `messages` or `[prompt, completion]` or `[prompt]` keys but found {list(example.keys())}"
)
if is_train:
example["text"] = dialogue_template.get_training_prompt()
else:
example["text"] = dialogue_template.get_inference_prompt()
return example | Format example to single- or multi-turn dialogue. |
168,278 | import json
import os
from dataclasses import asdict, dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Type, TypeVar, Union
from huggingface_hub import ModelHubMixin, hf_hub_download
IGNORE_INDEX = -100
The provided code snippet includes necessary dependencies for implementing the `mask_user_labels` function. Write a Python function `def mask_user_labels(tokenizer, dialogue_template, labels)` to solve the following problem:
Masks the user turns of a dialogue from the loss
Here is the function:
def mask_user_labels(tokenizer, dialogue_template, labels):
"""Masks the user turns of a dialogue from the loss"""
user_token_id = tokenizer.convert_tokens_to_ids(dialogue_template.user_token)
assistant_token_id = tokenizer.convert_tokens_to_ids(dialogue_template.assistant_token)
for idx, label_id in enumerate(labels):
if label_id == user_token_id:
current_idx = idx
while labels[current_idx] != assistant_token_id and current_idx < len(labels):
labels[current_idx] = IGNORE_INDEX
current_idx += 1 | Masks the user turns of a dialogue from the loss |
168,279 | import os
import re, json
from typing import Dict, List, Any
import requests
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import BaseLLM
from pydantic import BaseModel, Field
from langchain.chains.base import Chain
from langchain.chat_models import ChatOpenAI
from langchain.agents import Tool, LLMSingleActionAgent, AgentExecutor
from langchain.llms import OpenAI
import openai
from langchain.prompts.base import StringPromptTemplate
from typing import Callable
from langchain.agents.agent import AgentOutputParser
from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish
from typing import Union
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.agents import load_tools
from langchain.tools import BaseTool
from langchain.globals import set_llm_cache, get_llm_cache
from langchain.cache import InMemoryCache
from langchain.vectorstores import Pinecone
import pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.tools import BaseTool
from langchain.tools import BaseTool
class knowledge_base(BaseTool):
name = "ProductInfoSearch"
description = "when user want to buy any product or need to search product information"
sources_list = []
def _run(self, query: str):
product_string = query
print("Product Information: \n", product_string)
response = search_product_info({"question": product_string}, return_only_outputs=True)
answer = response['answer'].strip()
# print("Answer: ", answer)
sources = response['sources'].strip()
# print("Sources: ", sources)
if sources:
# print("Sources: ", sources)
self.sources_list.append(sources)
return query+"Search"+"product_string"+"Answer from Database:"+answer
def _arun(self, query: str):
print("Could not get information for your query using the knowledge base tool.")
raise NotImplementedError("Could not get information for your query.")
from langchain.agents import load_tools
knowledge_base = knowledge_base()
def get_tools(product_catalog):
# query to get_tools can be used to be embedded and relevant tools found
# see here: https://langchain-langchain.vercel.app/docs/use_cases/agents/custom_agent_with_plugin_retrieval#tool-retriever
# knowledge_base = setup_knowledge_base(product_catalog)
tools = tools = load_tools([])
tools.append(knowledge_base)
return tools | null |
168,280 | import streamlit as st
import requests
def initialize_session_state():
if "is_dark_theme" not in st.session_state:
st.session_state.is_dark_theme = False | null |
168,281 | import streamlit as st
import requests
def get_walmart_bot_response(user_input):
endpoint = "http://localhost:5000/walmartbot"
data = {"messages": [user_input]}
response = requests.post(endpoint, json=data)
return response.json() | null |
168,282 | import streamlit as st
import requests
def get_searchgpt_response(user_input):
endpoint = "http://localhost:5000/searchgpt"
reply = requests.post(endpoint, json={"text": user_input})
response = {"messages": [reply.json()]}
return response | null |
168,283 | import streamlit as st
import requests
def display_response(response, user_input, prev_messages):
if "messages" in response:
for message in response["messages"]:
st.text(f"Bot: 💬 {message}")
if "sources" in response:
for source in response["sources"]:
st.text(f"Source: 📚 {source}")
# Update previous messages
prev_messages.append({"user": user_input, "bot": response.get("messages", [""])[0]})
st.session_state.prev_messages = prev_messages[-5:] # Keep only the last 5 messages | null |
168,284 | import streamlit as st
import requests
def apply_theme():
# Check if dark theme is enabled
is_dark_theme = st.session_state.get("is_dark_theme", False)
# Apply the theme based on the user's choice
if is_dark_theme:
st.markdown(
"""
<style>
body {
background-color: #00172B;
color: #C6CDD4;
}
</style>
""",
unsafe_allow_html=True,
)
else:
# Default light theme
st.markdown(
"""
<style>
body {
background-color: #FFFFFF;
color: #000000;
}
</style>
""",
unsafe_allow_html=True,
) | null |
168,285 | import streamlit as st
import requests
def toggle_theme():
st.session_state.is_dark_theme = not st.session_state.is_dark_theme
# Use this to rerun the script when the button is clicked
st.rerun() | null |
168,286 | from search_capabilities import *
from walmart_functions import *
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List
from fastapi.middleware.cors import CORSMiddleware
class ConversationRequest(BaseModel):
messages: List[str]
sales_agent = SalesGPT.from_llm(llm, verbose=False, **config)
def handle_conversation(request: ConversationRequest):
try:
# Determine if it's a new conversation or continuation
if not request.messages:
# It's a new conversation
sales_agent.seed_agent()
sales_agent.determine_conversation_stage()
else:
# It's a continuation
for message in request.messages:
print("User: ",message)
sales_agent.human_step(message)
sales_agent.determine_conversation_stage()
ai_message = sales_agent.step()
if "<END" in str(ai_message):
ai_message = str(ai_message).split('<END')[0]
sources = knowledge_base.sources_list
print('Response Received')
response = {
"messages": [str(ai_message)],
"sources": sources,
}
if sources:
print('Sources: ', sources)
return response
# return ConversationResponse(messages=[ai_message])
# Process user input and update conversation history
ai_message = sales_agent.step()
if "<END" in str(ai_message):
ai_message = str(ai_message).split('<END')[0]
sources = knowledge_base.sources_list
print('Response Received')
# Return AI response
response = {
"messages": [str(ai_message)],
"sources": sources,
}
if sources:
print('Sources: ', sources)
knowledge_base.sources_list = []
return response
# return ConversationResponse(messages=[ai_message])
except Exception as e:
print("Error: ", e)
raise HTTPException(status_code=500, detail="Internal Server Error") | null |
168,287 | from search_capabilities import *
from walmart_functions import *
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List
from fastapi.middleware.cors import CORSMiddleware
class ChatResponse(BaseModel):
text: str
def handle_chat(request: ChatResponse):
try:
# Determine if it's a new conversation or continuation
if not request.text:
# It's a new conversation
response = "Pleaes enter input in the text field"
return response
else:
# It's a continuation
if request.text:
input_text = request.text
print(input_text)
response = get_response(input_text)
return response
except Exception as e:
print("Error: ", e)
raise HTTPException(status_code=500, detail="Internal Server Error") | null |
168,288 | import requests
import os
from dotenv import load_dotenv
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.prompts import MessagesPlaceholder
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.chains.summarize import load_summarize_chain
from langchain.tools import BaseTool
from langchain.agents import load_tools
from pydantic import BaseModel, Field
from typing import Type
from bs4 import BeautifulSoup
import requests
import json
from langchain.schema import SystemMessage
from langchain.utilities import OpenWeatherMapAPIWrapper
from duckduckgo_search import DDGS
from langchain.tools import PubmedQueryRun
class BeautifulSoup(Tag):
"""A data structure representing a parsed HTML or XML document.
Most of the methods you'll call on a BeautifulSoup object are inherited from
PageElement or Tag.
Internally, this class defines the basic interface called by the
tree builders when converting an HTML/XML document into a data
structure. The interface abstracts away the differences between
parsers. To write a new tree builder, you'll need to understand
these methods as a whole.
These methods will be called by the BeautifulSoup constructor:
* reset()
* feed(markup)
The tree builder may call these methods from its feed() implementation:
* handle_starttag(name, attrs) # See note about return value
* handle_endtag(name)
* handle_data(data) # Appends to the current data node
* endData(containerClass) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
# Since BeautifulSoup subclasses Tag, it's possible to treat it as
# a Tag with a .name. This name makes it clear the BeautifulSoup
# object isn't a real markup tag.
ROOT_TAG_NAME = '[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
# A string containing all ASCII whitespace characters, used in
# endData() to detect data chunks that seem 'empty'.
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n"
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, exclude_encodings=None,
element_classes=None, **kwargs):
"""Constructor.
:param markup: A string or a file-like object representing
markup to be parsed.
:param features: Desirable features of the parser to be
used. This may be the name of a specific parser ("lxml",
"lxml-xml", "html.parser", or "html5lib") or it may be the
type of markup to be used ("html", "html5", "xml"). It's
recommended that you name a specific parser, so that
Beautiful Soup gives you the same results across platforms
and virtual environments.
:param builder: A TreeBuilder subclass to instantiate (or
instance to use) instead of looking one up based on
`features`. You only need to use this if you've implemented a
custom TreeBuilder.
:param parse_only: A SoupStrainer. Only parts of the document
matching the SoupStrainer will be considered. This is useful
when parsing part of a document that would otherwise be too
large to fit into memory.
:param from_encoding: A string indicating the encoding of the
document to be parsed. Pass this in if Beautiful Soup is
guessing wrongly about the document's encoding.
:param exclude_encodings: A list of strings indicating
encodings known to be wrong. Pass this in if you don't know
the document's encoding but you know Beautiful Soup's guess is
wrong.
:param element_classes: A dictionary mapping BeautifulSoup
classes like Tag and NavigableString, to other classes you'd
like to be instantiated instead as the parse tree is
built. This is useful for subclassing Tag or NavigableString
to modify default behavior.
:param kwargs: For backwards compatibility purposes, the
constructor accepts certain keyword arguments used in
Beautiful Soup 3. None of these arguments do anything in
Beautiful Soup 4; they will result in a warning and then be
ignored.
Apart from this, any keyword arguments passed into the
BeautifulSoup constructor are propagated to the TreeBuilder
constructor. This makes it possible to configure a
TreeBuilder by passing in arguments, not just by saying which
one to use.
"""
if 'convertEntities' in kwargs:
del kwargs['convertEntities']
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. Suggest you use "
"features='lxml' for HTML and features='lxml-xml' for "
"XML.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name),
DeprecationWarning, stacklevel=3
)
return kwargs.pop(old_name)
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if from_encoding and isinstance(markup, str):
warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
from_encoding = None
self.element_classes = element_classes or dict()
# We need this information to track whether or not the builder
# was specified well enough that we can omit the 'you need to
# specify a parser' warning.
original_builder = builder
original_features = features
if isinstance(builder, type):
# A builder class was passed in; it needs to be instantiated.
builder_class = builder
builder = None
elif builder is None:
if isinstance(features, str):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
# At this point either we have a TreeBuilder instance in
# builder, or we have a builder_class that we can instantiate
# with the remaining **kwargs.
if builder is None:
builder = builder_class(**kwargs)
if not original_builder and not (
original_features == builder.NAME or
original_features in builder.ALTERNATE_NAMES
) and markup:
# The user did not tell us which TreeBuilder to use,
# and we had to guess. Issue a warning.
if builder.is_xml:
markup_type = "XML"
else:
markup_type = "HTML"
# This code adapted from warnings.py so that we get the same line
# of code as our warnings.warn() call gets, even if the answer is wrong
# (as it may be in a multithreading situation).
caller = None
try:
caller = sys._getframe(1)
except ValueError:
pass
if caller:
globals = caller.f_globals
line_number = caller.f_lineno
else:
globals = sys.__dict__
line_number= 1
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if filename:
# If there is no filename at all, the user is most likely in a REPL,
# and the warning is not necessary.
values = dict(
filename=filename,
line_number=line_number,
parser=builder.NAME,
markup_type=markup_type
)
warnings.warn(
self.NO_PARSER_SPECIFIED_WARNING % values,
GuessedAtParserWarning, stacklevel=2
)
else:
if kwargs:
warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.")
self.builder = builder
self.is_xml = builder.is_xml
self.known_xml = self.is_xml
self._namespaces = dict()
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256 and (
(isinstance(markup, bytes) and not b'<' in markup)
or (isinstance(markup, str) and not '<' in markup)
):
# Issue warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# since that is sometimes the intended behavior.
if not self._markup_is_url(markup):
self._markup_resembles_filename(markup)
rejections = []
success = False
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(
markup, from_encoding, exclude_encodings=exclude_encodings)):
self.reset()
self.builder.initialize_soup(self)
try:
self._feed()
success = True
break
except ParserRejectedMarkup as e:
rejections.append(e)
pass
if not success:
other_exceptions = [str(e) for e in rejections]
raise ParserRejectedMarkup(
"The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions)
)
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _clone(self):
"""Create a new BeautifulSoup object with the same TreeBuilder,
but not associated with any markup.
This is the first step of the deepcopy process.
"""
clone = type(self)("", None, self.builder)
# Keep track of the encoding of the original document,
# since we won't be parsing it again.
clone.original_encoding = self.original_encoding
return clone
def __getstate__(self):
# Frequently a tree builder can't be pickled.
d = dict(self.__dict__)
if 'builder' in d and d['builder'] is not None and not self.builder.picklable:
d['builder'] = type(self.builder)
# Store the contents as a Unicode string.
d['contents'] = []
d['markup'] = self.decode()
# If _most_recent_element is present, it's a Tag object left
# over from initial parse. It might not be picklable and we
# don't need it.
if '_most_recent_element' in d:
del d['_most_recent_element']
return d
def __setstate__(self, state):
# If necessary, restore the TreeBuilder by looking it up.
self.__dict__ = state
if isinstance(self.builder, type):
self.builder = self.builder()
elif not self.builder:
# We don't know which builder was used to build this
# parse tree, so use a default we know is always available.
self.builder = HTMLParserTreeBuilder()
self.builder.soup = self
self.reset()
self._feed()
return state
def _decode_markup(cls, markup):
"""Ensure `markup` is bytes so it's safe to send into warnings.warn.
TODO: warnings.warn had this problem back in 2010 but it might not
anymore.
"""
if isinstance(markup, bytes):
decoded = markup.decode('utf-8', 'replace')
else:
decoded = markup
return decoded
def _markup_is_url(cls, markup):
"""Error-handling method to raise a warning if incoming markup looks
like a URL.
:param markup: A string.
:return: Whether or not the markup resembles a URL
closely enough to justify a warning.
"""
if isinstance(markup, bytes):
space = b' '
cant_start_with = (b"http:", b"https:")
elif isinstance(markup, str):
space = ' '
cant_start_with = ("http:", "https:")
else:
return False
if any(markup.startswith(prefix) for prefix in cant_start_with):
if not space in markup:
warnings.warn(
'The input looks more like a URL than markup. You may want to use'
' an HTTP client like requests to get the document behind'
' the URL, and feed that document to Beautiful Soup.',
MarkupResemblesLocatorWarning,
stacklevel=3
)
return True
return False
def _markup_resembles_filename(cls, markup):
"""Error-handling method to raise a warning if incoming markup
resembles a filename.
:param markup: A bytestring or string.
:return: Whether or not the markup resembles a filename
closely enough to justify a warning.
"""
path_characters = '/\\'
extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt']
if isinstance(markup, bytes):
path_characters = path_characters.encode("utf8")
extensions = [x.encode('utf8') for x in extensions]
filelike = False
if any(x in markup for x in path_characters):
filelike = True
else:
lower = markup.lower()
if any(lower.endswith(ext) for ext in extensions):
filelike = True
if filelike:
warnings.warn(
'The input looks more like a filename than markup. You may'
' want to open this file and pass the filehandle into'
' Beautiful Soup.',
MarkupResemblesLocatorWarning, stacklevel=3
)
return True
return False
def _feed(self):
"""Internal method that parses previously set markup, creating a large
number of Tag and NavigableString objects.
"""
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
"""Reset this object to a state as though it had never parsed any
markup.
"""
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.open_tag_counter = Counter()
self.preserve_whitespace_tag_stack = []
self.string_container_stack = []
self._most_recent_element = None
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, attrs={},
sourceline=None, sourcepos=None, **kwattrs):
"""Create a new Tag associated with this BeautifulSoup object.
:param name: The name of the new Tag.
:param namespace: The URI of the new Tag's XML namespace, if any.
:param prefix: The prefix for the new Tag's XML namespace, if any.
:param attrs: A dictionary of this Tag's attribute values; can
be used instead of `kwattrs` for attributes like 'class'
that are reserved words in Python.
:param sourceline: The line number where this tag was
(purportedly) found in its source document.
:param sourcepos: The character position within `sourceline` where this
tag was (purportedly) found.
:param kwattrs: Keyword arguments for the new Tag's attribute values.
"""
kwattrs.update(attrs)
return self.element_classes.get(Tag, Tag)(
None, self.builder, name, namespace, nsprefix, kwattrs,
sourceline=sourceline, sourcepos=sourcepos
)
def string_container(self, base_class=None):
container = base_class or NavigableString
# There may be a general override of NavigableString.
container = self.element_classes.get(
container, container
)
# On top of that, we may be inside a tag that needs a special
# container class.
if self.string_container_stack and container is NavigableString:
container = self.builder.string_containers.get(
self.string_container_stack[-1].name, container
)
return container
def new_string(self, s, subclass=None):
"""Create a new NavigableString associated with this BeautifulSoup
object.
"""
container = self.string_container(subclass)
return container(s)
def insert_before(self, *args):
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
it because there is nothing before or after it in the parse tree.
"""
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, *args):
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
it because there is nothing before or after it in the parse tree.
"""
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
"""Internal method called by _popToTag when a tag is closed."""
tag = self.tagStack.pop()
if tag.name in self.open_tag_counter:
self.open_tag_counter[tag.name] -= 1
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
if self.string_container_stack and tag == self.string_container_stack[-1]:
self.string_container_stack.pop()
#print("Pop", tag.name)
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
"""Internal method called by handle_starttag when a tag is opened."""
#print("Push", tag.name)
if self.currentTag is not None:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name != self.ROOT_TAG_NAME:
self.open_tag_counter[tag.name] += 1
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
if tag.name in self.builder.string_containers:
self.string_container_stack.append(tag)
def endData(self, containerClass=None):
"""Method called by the TreeBuilder when the end of a data segment
occurs.
"""
if self.current_data:
current_data = ''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
containerClass = self.string_container(containerClass)
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Method called by the TreeBuilder to integrate an object into the parse tree."""
if parent is None:
parent = self.currentTag
if most_recent_element is not None:
previous_element = most_recent_element
else:
previous_element = self._most_recent_element
next_element = previous_sibling = next_sibling = None
if isinstance(o, Tag):
next_element = o.next_element
next_sibling = o.next_sibling
previous_sibling = o.previous_sibling
if previous_element is None:
previous_element = o.previous_element
fix = parent.next_element is not None
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
self._most_recent_element = o
parent.contents.append(o)
# Check if we are inserting into an already parsed node.
if fix:
self._linkage_fixer(parent)
def _linkage_fixer(self, el):
"""Make sure linkage of this fragment is sound."""
first = el.contents[0]
child = el.contents[-1]
descendant = child
if child is first and el.parent is not None:
# Parent should be linked to first child
el.next_element = child
# We are no longer linked to whatever this element is
prev_el = child.previous_element
if prev_el is not None and prev_el is not el:
prev_el.next_element = None
# First child should be linked to the parent, and no previous siblings.
child.previous_element = el
child.previous_sibling = None
# We have no sibling as we've been appended as the last.
child.next_sibling = None
# This index is a tag, dig deeper for a "last descendant"
if isinstance(child, Tag) and child.contents:
descendant = child._last_descendant(False)
# As the final step, link last descendant. It should be linked
# to the parent's next sibling (if found), else walk up the chain
# and find a parent with a sibling. It should have no next sibling.
descendant.next_element = None
descendant.next_sibling = None
target = el
while True:
if target is None:
break
elif target.next_sibling is not None:
descendant.next_element = target.next_sibling
target.next_sibling.previous_element = child
break
target = target.parent
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag.
If there are no open tags with the given name, nothing will be
popped.
:param name: Pop up to the most recent tag with this name.
:param nsprefix: The namespace prefix that goes with `name`.
:param inclusivePop: It this is false, pops the tag stack up
to but *not* including the most recent instqance of the
given tag.
"""
#print("Popping to %s" % name)
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
if not self.open_tag_counter.get(name):
break
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
sourcepos=None, namespaces=None):
"""Called by the tree builder when a new tag is encountered.
:param name: Name of the tag.
:param nsprefix: Namespace prefix for the tag.
:param attrs: A dictionary of attribute values.
:param sourceline: The line number where this tag was found in its
source document.
:param sourcepos: The character position within `sourceline` where this
tag was found.
:param namespaces: A dictionary of all namespace prefix mappings
currently in scope in the document.
If this method returns None, the tag was rejected by an active
SoupStrainer. You should proceed as if the tag had not occurred
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print("Start tag %s: %s" % (name, attrs))
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = self.element_classes.get(Tag, Tag)(
self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element,
sourceline=sourceline, sourcepos=sourcepos,
namespaces=namespaces
)
if tag is None:
return tag
if self._most_recent_element is not None:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
"""Called by the tree builder when an ending tag is encountered.
:param name: Name of the tag.
:param nsprefix: Namespace prefix for the tag.
"""
#print("End tag: " + name)
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
"""Called by the tree builder when a chunk of textual data is encountered."""
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal", iterator=None):
"""Returns a string or Unicode representation of the parse tree
as an HTML or XML document.
:param pretty_print: If this is True, indentation will be used to
make the document more readable.
:param eventual_encoding: The encoding of the final document.
If this is None, the document will be a Unicode string.
"""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
# This is a special Python encoding; it can't actually
# go into an XML document because it means nothing
# outside of Python.
eventual_encoding = None
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = '<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = ''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter, iterator)
def get_website_info(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
title = soup.find('title').text
para_texts = [p.text for p in soup.find_all('p')]
content = {
'url': url,
'title': title,
'paragraphs': para_texts,
}
return content | null |
168,289 | import requests
import os
from dotenv import load_dotenv
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.prompts import MessagesPlaceholder
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.chains.summarize import load_summarize_chain
from langchain.tools import BaseTool
from langchain.agents import load_tools
from pydantic import BaseModel, Field
from typing import Type
from bs4 import BeautifulSoup
import requests
import json
from langchain.schema import SystemMessage
from langchain.utilities import OpenWeatherMapAPIWrapper
from duckduckgo_search import DDGS
from langchain.tools import PubmedQueryRun
agent = initialize_agent(
tools,
llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
agent_kwargs={
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
"system_message": system_message,
"format instructions": FORMAT_INSTRUCTIONS,
},
handle_parsing_errors=True,
memory=memory,
max_token_length=220,
max_iterations=3,
early_stopping_method="generate",
max_execution_time=14
)
def get_response(message):
try:
print("Human: " + str(message) + " Search AI:")
response = agent.run("Human: " + str(message) + "? Search AI:")
print(response)
except Exception as e:
response = str(e)
print(response)
if response.startswith("Could not parse LLM output: "):
response = response.removeprefix("Could not parse LLM output: ")
print(response)
else:
response = "Sorry, I did not understand that. Would you please clarify?"
print(response)
return response | null |
168,290 | from fastapi import FastAPI, Depends, File, UploadFile, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from fastapi import Request
from config import settings
import typing as t
import uvicorn
import os
from qdrant_engine import QdrantIndex
async def root(request: Request):
return {"message": "Server is up and running!"} | null |
168,291 | from fastapi import FastAPI, Depends, File, UploadFile, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from fastapi import Request
from config import settings
import typing as t
import uvicorn
import os
from qdrant_engine import QdrantIndex
qdrant_index = QdrantIndex(settings.qdrant_host, settings.qdrant_api_key, False)
async def upload_file(request: Request, file: UploadFile):
filename = file.filename
status = "success"
print(file.size)
try:
filepath = os.path.join('app','documents', os.path.basename(filename))
contents = await file.read()
with open(filepath, 'wb') as f:
f.write(contents)
qdrant_index.insert_into_index(filepath, filename)
except Exception as ex:
print(str(ex))
status = "error"
if filepath is not None and os.path.exists(filepath):
os.remove(filepath)
# raise HTTPException(status_code=500, detail="Your file received but couldn't be stored!")
if filepath is not None and os.path.exists(filepath):
os.remove(filepath)
return {"filename": filename, "status": status} | null |
168,292 | from fastapi import FastAPI, Depends, File, UploadFile, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from fastapi import Request
from config import settings
import typing as t
import uvicorn
import os
from qdrant_engine import QdrantIndex
qdrant_index = QdrantIndex(settings.qdrant_host, settings.qdrant_api_key, False)
class UserQuery(BaseModel):
query: str
async def query_index(request: Request, input_query: UserQuery):
print(input_query)
generated_response, relevant_docs = qdrant_index.generate_response(question=input_query.query)
print(generated_response)
return {"response": generated_response, "relevant_docs": relevant_docs} | null |
168,293 | import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
def build_sentence_window_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
sentence_window_size=3,
save_dir="sentence_index",
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=sentence_window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
documents, service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index | null |
168,294 | import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
def get_sentence_window_query_engine(
sentence_index, similarity_top_k=6, rerank_top_n=2
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine | null |
168,295 | import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
def build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index",
chunk_sizes=None
):
# chunk sizes for all the layers (factor of 4)
chunk_sizes = chunk_sizes or [2048, 512, 128]
# Hierarchical node parser to parse the tree nodes (parent and children)
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
# getting all intermediate and parent nodes
nodes = node_parser.get_nodes_from_documents(documents)
# getting only the leaf nodes
leaf_nodes = get_leaf_nodes(nodes)
# required service context to initialize both llm and embed model
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model
)
# storage context to store the intermediate and parent nodes in a docstore, because the index is built only on the leaf nodes
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context
)
return automerging_index | null |
168,296 | import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
def get_automerging_query_engine(
automerging_index,
similarity_top_k=12,
rerank_top_n=6,
):
# retriever is used to merge the child nodes into the parent nodes
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
# Ranking is used to select top k relevant chunks from similarity_top_k
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model='BAAI/bge-reranker-base'
)
# getting query engine with the above mentioned retiriever and reranker
automerging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return automerging_engine | null |
168,297 | import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
def get_prebuilt_trulens_recorder(query_engine, app_id):
# Feedback functions
# Answer Relevance
provider = fOpenAI()
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons,
name="Answer Relevance"
).on_input_output()
# Context Relevance
context_selection = TruLlama.select_source_nodes().node.text
f_qs_relevance = (
Feedback(provider.qs_relevance,
name="Context Relevance")
.on_input()
.on(context_selection)
.aggregate(np.mean)
)
# Groundedness
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons,
name="Groundedness"
)
.on(context_selection)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks = [
f_qa_relevance,
f_qs_relevance,
f_groundedness
]
)
return tru_recorder | null |
168,298 | from setuptools import setup, find_packages
from typing import List
REQUIREMENTS_FILE_NAME = "requirements.txt"
HYPHEN_E_DOT = "-e ."
List = _Alias()
def get_requirements_list()->List[str]:
with open(REQUIREMENTS_FILE_NAME) as requirement_file:
requirement_list = requirement_file.readlines()
requirement_list = [requirement_name.replace("\n", "") for requirement_name in requirement_list]
if HYPHEN_E_DOT in requirement_list:
requirement_list.remove(HYPHEN_E_DOT)
return requirement_list | null |
168,299 | from flask import Flask
from video_summarizer.logger import logging
from video_summarizer.exception import CustomException
import os, sys
import logging
logging.basicConfig(filename=log_file_path,
filemode='w',
format='[%(asctime)s] %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
class CustomException(Exception):
def __init__(self,error_message:Exception,error_details:sys):
self.error_message = CustomException.get_detailed_error_message(error_message=error_message,
error_details=error_details)
def get_detailed_error_message(error_message:Exception,error_details:sys)->str:
_,_, exec_tb= error_details.exc_info()
# a, b, c = 1,2,3 -> _, _, c
exception_block_line_number =exec_tb.tb_frame.f_lineno #
try_block_line_number =exec_tb.tb_lineno
file_name=exec_tb.tb_frame.f_code.co_filename
error_message = f"""
Error occurred in execution of :
[{file_name}] at
try block line number : [{try_block_line_number}]
and exception block line number : [{exception_block_line_number}]
error message : [{error_message}]
"""
return error_message
def __str__(self):
"""
Formating how a object should be visible if used in print statement.
"""
return self.error_message
def __repr__(self):
return CustomException.__name__.str()
def index():
try:
raise Exception("We are testing our custom exception file") # error
except Exception as e:
summary = CustomException(e, sys)
logging.info(summary.error_message)
logging.info("We are testing logging module")
return "hello World" | null |
168,300 | import os
from flask import Flask, render_template, request, send_from_directory
from video_summarizer.components.video_to_subtitle import SubtitleGenerator, Video2SubConfig
from video_summarizer.components.summarize import Summarizer
from video_summarizer.pipeline import run_training_pipeline
import threading
from gtts import gTTS
from video_summarizer.components.video_downloader import VideoDownloader
The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index()` to solve the following problem:
It renders the index.html file in the templates folder Returns: The index.html file is being returned.
Here is the function:
def index():
"""
It renders the index.html file in the templates folder
Returns:
The index.html file is being returned.
"""
return render_template('index.html') | It renders the index.html file in the templates folder Returns: The index.html file is being returned. |
168,301 | import os
from flask import Flask, render_template, request, send_from_directory
from video_summarizer.components.video_to_subtitle import SubtitleGenerator, Video2SubConfig
from video_summarizer.components.summarize import Summarizer
from video_summarizer.pipeline import run_training_pipeline
import threading
from gtts import gTTS
from video_summarizer.components.video_downloader import VideoDownloader
app = Flask(__name__)
app.config['upload_dir'] = os.path.join('uploads/')
os.makedirs(app.config['upload_dir'], exist_ok=True)
print("Loading Summarizer model")
summarizer = Summarizer()
summarizer.load_model()
class Video2SubConfig:
output_dir: str = 'srt'
audio_codec: str = 'pcm_s16le'
audio_sample_rate: str = '16k'
audio_channels: int = 1
class SubtitleGenerator:
def __init__(self, config: Video2SubConfig):
self.output_dir = config.output_dir
self.audio_sample_rate = config.audio_sample_rate
self.audio_codec = config.audio_codec
self.audio_channels = config.audio_channels
# extract_audio_from_video
# generate_subtitles
# get_subtitles -> Output
def extract_audio_from_video(self, video_paths: List[str]) -> Dict[str, str]:
temp_dir = tempfile.gettempdir() # tempfile Return the name of the directory used for temporary files
audio_paths = {}
# https://youtube.com/bfibufpvui
for video_path in video_paths:
filename = os.path.basename(video_path).split(".")[0] # dot seperator
output_path = os.path.join(temp_dir, f"{filename}.wav")
# ffmpeg library to perform video to audio conversion
try:
ffmpeg.input(video_path).output(
output_path,
acodec="pcm_s16le",
ac=self.audio_channels,
ar=self.audio_sample_rate,
).run(quiet=False, overwrite_output=True, capture_stdout=True, capture_stderr=True)
except ffmpeg.Error as e:
print('stdout:', e.stdout.decode('utf8'))
print('stderr:', e.stderr.decode('utf8'))
raise e
audio_paths[video_path] = output_path
return audio_paths
def generate_subtitles(
self, audio_paths: Dict[str, str], transcribe_fn: callable
) -> Dict[str, str]:
subtitles_paths = {}
for video_path, audio_path in audio_paths.items():
try:
filename = os.path.basename(video_path).split(".")[0]
subtitle_path = os.path.join(self.output_dir, f"{filename}.srt") # abc.srt
logger.info(
f"Generating subtitles for {os.path.basename(audio_path)}... This might take a while."
)
warnings.filterwarnings("ignore")
result = transcribe_fn(audio_path)
warnings.filterwarnings("default")
with open(subtitle_path, "w", encoding="utf-8") as srt_file:
write_srt(result["segments"], file=srt_file)
subtitles_paths[video_path] = subtitle_path
except ffmpeg._run.Error as e:
logger.error(f"Error generating subtitles for {os.path.basename(audio_path)}: {str(e)}")
return subtitles_paths, result
def get_subtitles(
self, video_paths: List[str], model_path: str, task: str, verbose=False
) -> Dict[str, str]:
os.makedirs(self.output_dir, exist_ok=True)
if model_path.endswith(".en"):
print(f"{model_path} is an English model")
model = whisper.load_model(model_path)
audio_paths = self.extract_audio_from_video(video_paths)
subtitles_paths = self.generate_subtitles(
audio_paths,
lambda audio_path: model.transcribe(audio_path, verbose=verbose, task=task),
)
return subtitles_paths
class VideoDownloader:
def __init__(self, url: str, save_path: str):
self.url = url
self.save_path = save_path
def download(self) -> str | None:
"""
It downloads a video from a given url, and returns the path to the downloaded video
Returns:
The return value is a string or None.
"""
try:
if 'youtu' in self.url:
return self._download_youtube()
else:
return self._download_other()
except Exception as e:
raise CustomException(e, sys)
# url = https://youtube.com
def _download_youtube(self) -> str:
"""
It downloads a youtube video from a given url and saves it to a given path
Returns:
The path to the downloaded video
"""
try:
yt = YouTube(self.url)
video = yt.streams.first()
video.download(self.save_path)
logger.info(f"Youtube Video downloaded to {os.path.join(self.save_path, video.default_filename)}")
return os.path.join(self.save_path, video.default_filename)
except Exception as e:
raise CustomException(e, sys)
def _download_other(self) -> str:
"""
It downloads the video from the url and saves it to the save_path
Returns:
The path to the downloaded file.
"""
try:
response = requests.get(self.url, stream=True)
filename = self.url.split("/")[-1] # https://youtube.com/ -> how to get job in data science
with open(os.path.join(self.save_path, f"{filename}"), "wb") as f:
for chunk in response.iter_content(chunk_size=4096):
f.write(chunk)
logger.info(f"Video downloaded to {os.path.join(self.save_path, filename)}")
return os.path.join(self.save_path, filename)
except Exception as e:
raise CustomException(e, sys)
The provided code snippet includes necessary dependencies for implementing the `upload_file` function. Write a Python function `def upload_file()` to solve the following problem:
The function upload_file() takes in a video file or a video link, transcribes the video, and summarizes the transcript Returns: the transcript and summary text.
Here is the function:
def upload_file():
"""
The function upload_file() takes in a video file or a video link, transcribes the video, and
summarizes the transcript
Returns:
the transcript and summary text.
"""
if request.method == 'POST':
video_file = request.files["video_file"]
subtitle_generator = SubtitleGenerator(config=Video2SubConfig)
if video_file:
filename = video_file.filename
video_path = os.path.join(os.path.join(
app.config['upload_dir'], filename))
print(video_path)
video_file.save(video_path)
task = 'transcribe'
if len(request.form.getlist('translate-btn')) > 0:
task = 'translate'
print("translate task is selected:", task)
subtitle = subtitle_generator.get_subtitles(video_paths=video_path,
model_path='tiny', task=task, verbose=False)
else:
video_link = request.form["link-input"]
downloader = VideoDownloader(
url=video_link, save_path=app.config['upload_dir'])
video_path = downloader.download()
task = 'transcribe'
option = request.form.get('options')
if option == 'translate':
task = 'translate'
print("translate task is selected:", task)
print("task selected:", task)
subtitle = subtitle_generator.get_subtitles(video_paths=[video_path],
model_path='tiny', task=task, verbose=False)
transcript_text = subtitle[1]['text']
summary_text = summarizer.summarize_text(transcript_text)
tts = gTTS(summary_text, tld="co.uk")
audio_path = os.path.join(app.config['upload_dir'], 'summary.mp3')
tts.save(audio_path)
return render_template('index.html', transcript=transcript_text, summary=summary_text, audio_path=audio_path) | The function upload_file() takes in a video file or a video link, transcribes the video, and summarizes the transcript Returns: the transcript and summary text. |
168,302 | import os
from flask import Flask, render_template, request, send_from_directory
from video_summarizer.components.video_to_subtitle import SubtitleGenerator, Video2SubConfig
from video_summarizer.components.summarize import Summarizer
from video_summarizer.pipeline import run_training_pipeline
import threading
from gtts import gTTS
from video_summarizer.components.video_downloader import VideoDownloader
app = Flask(__name__)
app.config['upload_dir'] = os.path.join('uploads/')
def serve_audio(filename):
return send_from_directory(app.config['upload_dir'], filename, mimetype='audio/mpeg') | null |
168,303 | import os
from flask import Flask, render_template, request, send_from_directory
from video_summarizer.components.video_to_subtitle import SubtitleGenerator, Video2SubConfig
from video_summarizer.components.summarize import Summarizer
from video_summarizer.pipeline import run_training_pipeline
import threading
from gtts import gTTS
from video_summarizer.components.video_downloader import VideoDownloader
def run_training_pipeline(config=TrainingPipelineConfig):
# Create Directories
create_directories()
# create an instance of the `DataIngestion` class and assign variable as `downloader`.
downloader = DataIngestion(dataset_name="lighteval/summarization", subset="xsum", save_folder=config.data_ingestion_artifacts)
downloader.download_dataset()
# create an instance of the `DataValidation` class and assign variable as `validator`.
validator = DataValidation(dataset_folder=config.data_ingestion_artifacts)
validator.check_dataset()
# create an instance of the config dataclass for data processing component
data_processing_config = DataProcessingConfig(max_input_length=128, max_target_length=64, dataset_folder=config.data_ingestion_artifacts, artifacts_folder=config.data_processing_artifacts)
# create an instance of the 'DataProcessing' class and assigns variable as 'processor'.
processor = DataProcessing(data_processing_config)
processor.process_data()
# create an instance of the `ModelTrainer` class with the specified parameters.
trainer = ModelTrainer(model_checkpoint='facebook/bart-base',
processed_dataset_folder=config.data_processing_artifacts,
trainer_artifact_dir=config.model_trainer_artifacts)
trainer.train()
def start_train():
def run_training():
run_training_pipeline()
train_thread = threading.Thread(target=run_training)
train_thread.start()
return "Training has started" | null |
168,304 | import sys
from datetime import timedelta
from typing import Iterator, TextIO
from video_summarizer.exception import CustomException
def format_timestamp(seconds: float, always_include_hours: bool = False):
"""
It takes a float representing a number of seconds, and returns a string representing the same number
of seconds in the format HH:MM:SS.mmm
Args:
seconds (float): The number of seconds to format.
always_include_hours (bool): If True, the hours will always be included in the output. If False,
the hours will only be included if they are non-zero. Defaults to False
Returns:
A string with the format:
"""
try:
assert seconds >= 0, "non-negative timestamp expected"
timestamp = timedelta(seconds=seconds)
total_seconds = int(timestamp.total_seconds())
hours, remainder = divmod(total_seconds, 3600)
minutes, seconds = divmod(remainder, 60)
hours_marker = f"{hours}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}.{timestamp.microseconds // 1000:03d}"
except Exception as e:
raise CustomException(e, sys)
import sys
if sys.platform[:5] == 'linux':
def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
try:
with open(_proc_pid_stat, 'r') as f:
l = f.readline().split(' ')
return int(l[13])
except Exception:
return int(100*(time.time()-_load_time[0]))
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
"""
Return number of jiffies elapsed.
Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc.
"""
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
class Iterator(Iterable[_T_co], Protocol[_T_co]):
def __next__(self) -> _T_co: ...
def __iter__(self) -> Iterator[_T_co]: ...
class TextIO(IO[str]):
# TODO use abstractproperty
def buffer(self) -> BinaryIO: ...
def encoding(self) -> str: ...
def errors(self) -> Optional[str]: ...
def line_buffering(self) -> int: ... # int on PyPy, bool on CPython
def newlines(self) -> Any: ... # None, str or tuple
def __enter__(self) -> TextIO: ...
class CustomException(Exception):
def __init__(self,error_message:Exception,error_details:sys):
self.error_message = CustomException.get_detailed_error_message(error_message=error_message,
error_details=error_details)
def get_detailed_error_message(error_message:Exception,error_details:sys)->str:
_,_, exec_tb= error_details.exc_info()
# a, b, c = 1,2,3 -> _, _, c
exception_block_line_number =exec_tb.tb_frame.f_lineno #
try_block_line_number =exec_tb.tb_lineno
file_name=exec_tb.tb_frame.f_code.co_filename
error_message = f"""
Error occurred in execution of :
[{file_name}] at
try block line number : [{try_block_line_number}]
and exception block line number : [{exception_block_line_number}]
error message : [{error_message}]
"""
return error_message
def __str__(self):
"""
Formating how a object should be visible if used in print statement.
"""
return self.error_message
def __repr__(self):
return CustomException.__name__.str()
The provided code snippet includes necessary dependencies for implementing the `write_srt` function. Write a Python function `def write_srt(transcript: Iterator[dict], file: TextIO)` to solve the following problem:
It takes a transcript and a file object, and writes the transcript to the file in SRT format -> SubRip subTitle Args: transcript (Iterator[dict]): Iterator[dict] file (TextIO): The file to write the transcript to.
Here is the function:
def write_srt(transcript: Iterator[dict], file: TextIO):
"""
It takes a transcript and a file object, and writes the transcript to the file in SRT format -> SubRip subTitle
Args:
transcript (Iterator[dict]): Iterator[dict]
file (TextIO): The file to write the transcript to.
"""
try:
for i, segment in enumerate(transcript, start=1):
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True)} --> "
f"{format_timestamp(segment['end'], always_include_hours=True)}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
except Exception as e:
raise CustomException(e, sys)
# start: 0:02:05:45.600 -->END 0:04:05:45.600 -> 01 sec to 60 sec -> START 0:00:00:01.000 -->END 0:00:00:60.000
# start: 0:02:05:45.600 --> END 0:04:05:45.600
# start: 0:02:05:45.600 --> END 0:04:05:45.600 | It takes a transcript and a file object, and writes the transcript to the file in SRT format -> SubRip subTitle Args: transcript (Iterator[dict]): Iterator[dict] file (TextIO): The file to write the transcript to. |
168,305 | from flask import Flask
from video_summarizer.logger import logging
import logging
logging.basicConfig(filename=log_file_path,
filemode='w',
format='[%(asctime)s] %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def index():
logging.info("We are testing our logging file")
return "Hello worlds" | null |
168,306 | from fastapi import FastAPI, APIRouter
async def root():
return {"message": "Hello World"} | null |
168,307 | import secrets
import pandas as pd
from typing import Callable, Annotated, Union
from fastapi import FastAPI, APIRouter, Depends, HTTPException, status, Request, Body, Response
from fastapi.responses import JSONResponse
from fastapi.security import HTTPBasic, HTTPBasicCredentials, OAuth2PasswordBearer, OAuth2PasswordRequestForm
from fastapi.routing import APIRoute
from fastapi.exceptions import RequestValidationError
from fastapi.encoders import jsonable_encoder
from datetime import datetime, timedelta
from app.schemas.token_schema import AccessTokenSchema, TokenRequestBodyPayload, Token, User
from app.api.auth import auth_jwt
from app.api.auth.auth_bearer import JWTBearer
from app.core.config import settings
from app.dbcontext.db_token import token_dbcontext
from app.handlers import exception_handler, log_database_handler
from app.models.constants import constants
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logdb)
class timedelta(SupportsAbs[timedelta]):
min: ClassVar[timedelta]
max: ClassVar[timedelta]
resolution: ClassVar[timedelta]
if sys.version_info >= (3, 6):
def __init__(
self,
days: float = ...,
seconds: float = ...,
microseconds: float = ...,
milliseconds: float = ...,
minutes: float = ...,
hours: float = ...,
weeks: float = ...,
*,
fold: int = ...,
) -> None: ...
else:
def __init__(
self,
days: float = ...,
seconds: float = ...,
microseconds: float = ...,
milliseconds: float = ...,
minutes: float = ...,
hours: float = ...,
weeks: float = ...,
) -> None: ...
def days(self) -> int: ...
def seconds(self) -> int: ...
def microseconds(self) -> int: ...
def total_seconds(self) -> float: ...
def __add__(self, other: timedelta) -> timedelta: ...
def __radd__(self, other: timedelta) -> timedelta: ...
def __sub__(self, other: timedelta) -> timedelta: ...
def __rsub__(self, other: timedelta) -> timedelta: ...
def __neg__(self) -> timedelta: ...
def __pos__(self) -> timedelta: ...
def __abs__(self) -> timedelta: ...
def __mul__(self, other: float) -> timedelta: ...
def __rmul__(self, other: float) -> timedelta: ...
def __floordiv__(self, other: timedelta) -> int: ...
def __floordiv__(self, other: int) -> timedelta: ...
if sys.version_info >= (3,):
def __truediv__(self, other: timedelta) -> float: ...
def __truediv__(self, other: float) -> timedelta: ...
def __mod__(self, other: timedelta) -> timedelta: ...
def __divmod__(self, other: timedelta) -> Tuple[int, timedelta]: ...
else:
def __div__(self, other: timedelta) -> float: ...
def __div__(self, other: float) -> timedelta: ...
def __le__(self, other: timedelta) -> bool: ...
def __lt__(self, other: timedelta) -> bool: ...
def __ge__(self, other: timedelta) -> bool: ...
def __gt__(self, other: timedelta) -> bool: ...
def __hash__(self) -> int: ...
settings = Settings()
class token_dbcontext:
def get_api_consumer_details(self, user_name):
cnxn = pyodbc.connect(settings.CONNECTION_STRINGS)
cursor = cnxn.cursor()
sp_name = "{CALL getAPIConsumerDetails (?)}"
params = (user_name,)
# Execute Stored Procedure With Parameters
cursor.execute(sp_name, params)
result = convert_result2dict(cursor)
# Close the cursor and delete it
cursor.close()
del cursor
# Close the database connection
cnxn.close()
return result
async def login_for_access_token(
form_data: Annotated[OAuth2PasswordRequestForm, Depends()]
):
try:
logger.info("token API call start...")
_obj_token_dbcontext = token_dbcontext()
ds = _obj_token_dbcontext.get_api_consumer_details(form_data.username)
if ds is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
is_password_valid = auth_jwt.verify_password(form_data.password, ds[0]['hashed_password'])
if is_password_valid == False:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = auth_jwt.create_access_token(
data={"sub": ds[0]['user_name']}, expires_delta=access_token_expires
)
logger.info("token API call end...")
return {"access_token": access_token, "token_type": "bearer"}
except Exception as ex:
raise ex | null |
168,308 | import secrets
import pandas as pd
from typing import Callable, Annotated, Union
from fastapi import FastAPI, APIRouter, Depends, HTTPException, status, Request, Body, Response
from fastapi.responses import JSONResponse
from fastapi.security import HTTPBasic, HTTPBasicCredentials, OAuth2PasswordBearer, OAuth2PasswordRequestForm
from fastapi.routing import APIRoute
from fastapi.exceptions import RequestValidationError
from fastapi.encoders import jsonable_encoder
from datetime import datetime, timedelta
from app.schemas.token_schema import AccessTokenSchema, TokenRequestBodyPayload, Token, User
from app.api.auth import auth_jwt
from app.api.auth.auth_bearer import JWTBearer
from app.core.config import settings
from app.dbcontext.db_token import token_dbcontext
from app.handlers import exception_handler, log_database_handler
from app.models.constants import constants
import logging
class User(BaseModel):
username: str
email: Union[str, None] = None
full_name: Union[str, None] = None
disabled: Union[bool, None] = None
async def read_users_me(
current_user: Annotated[User, Depends(auth_jwt.get_current_active_user)]
):
return current_user | null |
168,309 | import secrets
import pandas as pd
from typing import Callable, Annotated, Union
from fastapi import FastAPI, APIRouter, Depends, HTTPException, status, Request, Body, Response
from fastapi.responses import JSONResponse
from fastapi.security import HTTPBasic, HTTPBasicCredentials, OAuth2PasswordBearer, OAuth2PasswordRequestForm
from fastapi.routing import APIRoute
from fastapi.exceptions import RequestValidationError
from fastapi.encoders import jsonable_encoder
from datetime import datetime, timedelta
from app.schemas.token_schema import AccessTokenSchema, TokenRequestBodyPayload, Token, User
from app.api.auth import auth_jwt
from app.api.auth.auth_bearer import JWTBearer
from app.core.config import settings
from app.dbcontext.db_token import token_dbcontext
from app.handlers import exception_handler, log_database_handler
from app.models.constants import constants
import logging
async def read_own_items():
return [{"item_id": "Foo", "owner": "current_user"}] | null |
168,310 | from datetime import datetime, timedelta
from typing import Any, Union, Annotated
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import jwt, ExpiredSignatureError, JWTError
from passlib.context import CryptContext
from app.schemas.token_schema import User, TokenData
from app.dbcontext.db_token import token_dbcontext
from app.core.config import settings
from app.models.constants import constants
import time
settings = Settings()
constants = AppConstants()
def decode_access_token(token: str) -> dict:
try:
decoded_token = jwt.decode(token, settings.SECRET_KEY, algorithms=settings.ALGORITHM)
username: str = decoded_token.get("sub")
if username is None:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=constants.TOKEN_INVALID_CREDENTIALS)
else:
return decoded_token
except ExpiredSignatureError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=constants.TOKEN_EXPIRED)
except JWTError:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=constants.TOKEN_INVALID) | null |
168,311 | from datetime import datetime, timedelta
from typing import Any, Union, Annotated
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import jwt, ExpiredSignatureError, JWTError
from passlib.context import CryptContext
from app.schemas.token_schema import User, TokenData
from app.dbcontext.db_token import token_dbcontext
from app.core.config import settings
from app.models.constants import constants
import time
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
def get_password_hash(password: str) -> str:
return pwd_context.hash(password) | null |
168,312 | from typing import Callable, Any, List, Optional
import json
from fastapi import FastAPI, APIRouter, Depends, HTTPException, status, Request, Body, Response
from fastapi.routing import APIRoute
from fastapi.exceptions import RequestValidationError
from app.handlers import log_database_handler
import logging
def parse_error(
err: Any, field_names: List, raw: bool = True
) -> Optional[dict]:
"""
Parse single error object (such as pydantic-based or fastapi-based) to dict
:param err: Error object
:param field_names: List of names of the field that are already processed
:param raw: Whether this is a raw error or wrapped pydantic error
:return: dict with name of the field (or "__all__") and actual message
"""
message = str(err.exc) or ""
# if isinstance(err.exc, EnumError):
# permitted_values = ", ".join(
# [f"'{val}'" for val in err.exc.enum_values]
# )
# message = f"Value is not a valid enumeration member; " \
# f"permitted: {permitted_values}."
# elif isinstance(err.exc, StrRegexError):
# message = "Provided value doesn't match valid format."
# else:
# message = str(err.exc) or ""
if hasattr(err.exc, "code") and err.exc.code.startswith("error_code"):
error_code = int(err.exc.code.split(".")[-1])
else:
# default error code for non-custom errors is 400
error_code = 400
if not raw:
if len(err.loc_tuple()) == 2:
if str(err.loc_tuple()[0]) in ["body", "query"]:
name = err.loc_tuple()[1]
else:
name = err.loc_tuple()[0]
elif len(err.loc_tuple()) == 1:
if str(err.loc_tuple()[0]) == "body":
name = "__all__"
else:
name = str(err.loc_tuple()[0])
else:
name = "__all__"
else:
if len(err.loc_tuple()) == 2:
name = str(err.loc_tuple()[0])
elif len(err.loc_tuple()) == 1:
name = str(err.loc_tuple()[0])
else:
name = "__all__"
if name in field_names:
return None
if message and not any(
[message.endswith("."), message.endswith("?"), message.endswith("!")]
):
message = message + "."
message = message.capitalize()
return {"name": name, "message": message, "error_code": error_code}
List = _Alias()
The provided code snippet includes necessary dependencies for implementing the `raw_errors_to_fields` function. Write a Python function `def raw_errors_to_fields(raw_errors: List) -> List[dict]` to solve the following problem:
Translates list of raw errors (instances) into list of dicts with name/msg :param raw_errors: List with instances of raw error :return: List of dicts (1 dict for every raw error)
Here is the function:
def raw_errors_to_fields(raw_errors: List) -> List[dict]:
"""
Translates list of raw errors (instances) into list of dicts with name/msg
:param raw_errors: List with instances of raw error
:return: List of dicts (1 dict for every raw error)
"""
fields = []
for top_err in raw_errors:
if hasattr(top_err.exc, "raw_errors"):
for err in top_err.exc.raw_errors:
# This is a special case when errors happen both in request
# handling & internal validation
if isinstance(err, list):
err = err[0]
field_err = parse_error(
err,
field_names=list(map(lambda x: x["name"], fields)),
raw=True,
)
if field_err is not None:
fields.append(field_err)
else:
field_err = parse_error(
top_err,
field_names=list(map(lambda x: x["name"], fields)),
raw=False,
)
if field_err is not None:
fields.append(field_err)
return fields | Translates list of raw errors (instances) into list of dicts with name/msg :param raw_errors: List with instances of raw error :return: List of dicts (1 dict for every raw error) |
168,313 | import pyodbc
import pandas as pd
from app.core.config import settings
def convert_result2dict(cursor):
try:
result = []
columns = [column[0] for column in cursor.description]
for row in cursor.fetchall():
result.append(dict(zip(columns,row)))
#print(result)
#Check for results
if len(result) > 0:
ret_result = result
else:
ret_result = None
except pyodbc.Error as e:
print(e)
ret_result = None
return ret_result | null |
168,314 | import streamlit as st
from stmol import showmol
import py3Dmol
import requests
import biotite.structure.io as bsio
st.sidebar.title('🎈 ESMFold')
st.sidebar.write('[*ESMFold*](https://esmatlas.com/about) is an end-to-end single sequence protein structure predictor based on the ESM-2 language model. For more information, read the [research article](https://www.biorxiv.org/content/10.1101/2022.07.20.500902v2) and the [news article](https://www.nature.com/articles/d41586-022-03539-1) published in *Nature*.')
def render_mol(pdb):
pdbview = py3Dmol.view()
pdbview.addModel(pdb,'pdb')
pdbview.setStyle({'cartoon':{'color':'spectrum'}})
pdbview.setBackgroundColor('white')#('0xeeeeee')
pdbview.zoomTo()
pdbview.zoom(2, 800)
pdbview.spin(True)
showmol(pdbview, height = 500,width=800)
txt = st.sidebar.text_area('Input sequence', DEFAULT_SEQ, height=275)
def update(sequence=txt):
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
response = requests.post('https://api.esmatlas.com/foldSequence/v1/pdb/', headers=headers, data=sequence, verify=False)
name = sequence[:3] + sequence[-3:]
pdb_string = response.content.decode('utf-8')
with open('predicted.pdb', 'w') as f:
f.write(pdb_string)
struct = bsio.load_structure('predicted.pdb', extra_fields=["b_factor"])
b_value = round(struct.b_factor.mean(), 4)
# Display protein structure
st.subheader('Visualization of predicted protein structure')
render_mol(pdb_string)
# plDDT value is stored in the B-factor field
st.subheader('plDDT')
st.write('plDDT is a per-residue estimate of the confidence in prediction on a scale from 0-100.')
st.info(f'plDDT: {b_value}')
st.download_button(
label="Download PDB",
data=pdb_string,
file_name='predicted.pdb',
mime='text/plain',
) | null |
168,315 | import langchain
import os
import streamlit as st
import requests
import sounddevice as sd
import wavio
import openai
from openai import OpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename) | null |
168,316 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
model = VGG16()
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
print(model.summary())
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
filename = "/content/drive/MyDrive/Image_Captioning_Project/Flickr8k.token.txt"
print("Loaded: %d" %len(descriptions))
print("Vocab size: %d" %len(vocab))
from pickle import dump
from pickle import load
filename = '/content/drive/MyDrive/Image_Captioning_Project/Flickr_8k.trainImages.txt'
print('Dataset: %d' % len(train))
print('Descriptions: train=%d' % len(train_descriptions))
print('Photos: train=%d' % len(train_features))
from tensorflow.keras.layers import add
import tensorflow as tf
filename = "/content/drive/MyDrive/Image_Captioning_Project/Flickr_8k.trainImages.txt"
print("Dataset: %d" %len(train))
print("train_descriptions= %d" %len(train_descriptions))
print("photos: train= %d" %len(train_feature))
print("Vocab size: %d" %vocab_size)
print('Description Length: %d' % max_length)
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
filename = "/content/drive/MyDrive/Image_Captioning_Project/Flickr_8k.trainImages.txt"
print("Dataset: %d" %len(train))
print("train_descriptions= %d" %len(train_descriptions))
print("photos: train= %d" %len(train_feature))
print("Vocab size: %d" %vocab_size)
print('Description Length: %d' % max_length)
filename = "/content/drive/MyDrive/Image_Captioning_Project/Flickr_8k.testImages.txt"
print("Dataset: %d" %len(test))
print("Description= %d" %len(test_description))
print("photos: test=%d" % len(test_features))
from keras.models import load_model
filename = "/content/drive/MyDrive/Image_Captioning_Project/model_18.h5"
model = load_model(filename)
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
model = load_model('/content/drive/MyDrive/Image_Captioning_Project/model_18.h5')
print(description)
print(result)
def extract_features(directory):
# extract features from each photo
features = dict()
for name in listdir(directory):
# load an image from file
filename = directory + '/' + name
image = load_img(filename, target_size=(224, 224))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# get features
feature = model.predict(image, verbose=0)
# get image id
image_id = name.split('.')[0]
# store feature
features[image_id] = feature
print('>%s' % name)
return features | null |
168,317 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
def load_descriptions(doc):
mapping = dict()
# process lines
for line in doc.split('\n'):
# split line by white space
tokens = line.split()
if len(line) < 2:
continue
# take the first token as the image id, the rest as the description
image_id, image_desc = tokens[0], tokens[1:]
# remove filename from image id
image_id = image_id.split('.')[0]
# convert description tokens back to string
image_desc = ' '.join(image_desc)
# create the list if needed
if image_id not in mapping:
mapping[image_id] = list()
# store description
mapping[image_id].append(image_desc)
return mapping | null |
168,318 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
def clean_descriptions(descriptions):
# prepare translation table for removing punctuation
table = str.maketrans('', '', string.punctuation)
for key, desc_list in descriptions.items():
for i in range(len(desc_list)):
desc = desc_list[i]
# tokenize
desc = desc.split()
# convert to lower case
desc = [word.lower() for word in desc]
# remove punctuation from each token
desc = [w.translate(table) for w in desc]
# remove hanging 's' and 'a'
desc = [word for word in desc if len(word)>1]
# remove tokens with numbers in them
desc = [word for word in desc if word.isalpha()]
# store as string
desc_list[i] = ' '.join(desc) | null |
168,319 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
def to_vocabulary(descriptions):
# build a list of all description strings
all_desc = set()
for key in descriptions.keys():
[all_desc.update(d.split()) for d in descriptions[key]]
return all_desc | null |
168,320 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
with open('tokenizer1.pkl', 'wb') as f:
pickle.dump(tokenizer, f)
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
def save_descriptions(descriptions, filename):
lines = list()
for key, desc_list in descriptions.items():
for desc in desc_list:
lines.append(key + " " + desc)
data = '\n'.join(lines)
file = open(filename, 'w')
file.write(data)
file.close() | null |
168,321 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
with open('tokenizer1.pkl', 'wb') as f:
pickle.dump(tokenizer, f)
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
def load_photo_features(features, dataset):
all_features = load(open(features, 'rb'))
features = {k: all_features[k] for k in dataset}
return features | null |
168,322 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
def to_lines(descriptions):
all_desc = list()
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
from tensorflow.keras.layers import add
import tensorflow as tf
tokenizer = create_tokenizer(train_descriptions)
import pickle
def to_lines(descriptions):
all_desc = list()
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
tokenizer = create_tokenizer(train_descriptions)
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
tokenizer = load(open('/content/tokenizer1.pkl', 'rb'))
tokenizer.analyzer = None
def create_tokenizer(descriptions):
lines = to_lines(descriptions)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer | null |
168,323 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
def to_lines(descriptions):
all_desc = list()
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
def to_lines(descriptions):
all_desc = list()
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
def max_length(description):
lines = to_lines(description)
return max(len(d.split()) for d in lines) | null |
168,324 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
model = VGG16()
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
print(model.summary())
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
print("Loaded: %d" %len(descriptions))
print("Vocab size: %d" %len(vocab))
from pickle import dump
from pickle import load
print('Dataset: %d' % len(train))
print('Descriptions: train=%d' % len(train_descriptions))
print('Photos: train=%d' % len(train_features))
from tensorflow.keras.layers import add
import tensorflow as tf
print("Dataset: %d" %len(train))
print("train_descriptions= %d" %len(train_descriptions))
print("photos: train= %d" %len(train_feature))
print("Vocab size: %d" %vocab_size)
print('Description Length: %d' % max_length)
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
print("Dataset: %d" %len(train))
print("train_descriptions= %d" %len(train_descriptions))
print("photos: train= %d" %len(train_feature))
print("Vocab size: %d" %vocab_size)
print('Description Length: %d' % max_length)
print("Dataset: %d" %len(test))
print("Description= %d" %len(test_description))
print("photos: test=%d" % len(test_features))
from keras.models import load_model
model = load_model(filename)
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
model = load_model('/content/drive/MyDrive/Image_Captioning_Project/model_18.h5')
print(description)
print(result)
def define_model(vocab_size, max_length):
# feature extractor model
inputs1 = Input(shape=(1000,))
fe1 = Dropout(0.5)(inputs1)
fe2 = Dense(256, activation='relu')(fe1)
# sequence model
inputs2 = Input(shape=(max_length,))
se1 = Embedding(vocab_size,output_dim=256, mask_zero=True)(inputs2)
se2 = Dropout(0.5)(se1)
se3 = LSTM(256)(se2)
# decoder model
decoder1 = add([fe2, se3])
decoder2 = Dense(256, activation='relu')(decoder1)
outputs = Dense(vocab_size, activation='softmax')(decoder2)
# tie it together [image, seq] [word]
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# summarize model
print(model.summary())
return model | null |
168,325 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
def create_sequences(tokenizer, max_length, desc_list, photo):
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
photo = extract_features("/content/drive/MyDrive/Image_Captioning_Project/Images/101654506_8eb26cfb60.jpg")
def data_generator(descriptions, photos, tokenizer, max_length):
# loop for ever over images
while 1:
for key, desc_list in descriptions.items():
# retrieve the photo feature
photo = photos[key][0]
in_img, in_seq, out_word = create_sequences(tokenizer, max_length, desc_list, photo)
yield [[in_img, in_seq], out_word] | null |
168,326 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
import nltk
doc = load_doc(filename)
from pickle import dump
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
from pickle import load
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close
return text
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
def load_set(filename):
doc = load_doc(filename)
dataset = list()
# process line by line
for line in doc.split('\n'):
# skip empty lines
if len(line) < 1:
continue
# get the image identifier
identifier = line.split('.')[0]
dataset.append(identifier)
return set(dataset) | null |
168,327 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
def to_lines(descriptions):
from tensorflow.keras.layers import add
import tensorflow as tf
tokenizer = create_tokenizer(train_descriptions)
import pickle
def to_lines(descriptions):
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
tokenizer = create_tokenizer(train_descriptions)
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
tokenizer = load(open('/content/tokenizer1.pkl', 'rb'))
tokenizer.analyzer = None
def create_tokenizer(descriptions):
lines = to_lines(descriptions)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer | null |
168,328 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
def to_lines(descriptions):
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
def to_lines(descriptions):
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
def max_length(descriptions):
lines = to_lines(descriptions)
return max(len(d.split()) for d in lines) | null |
168,329 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
print(model.summary())
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
print("Loaded: %d" %len(descriptions))
print("Vocab size: %d" %len(vocab))
from pickle import dump
from pickle import load
print('Dataset: %d' % len(train))
print('Descriptions: train=%d' % len(train_descriptions))
print('Photos: train=%d' % len(train_features))
from tensorflow.keras.layers import add
import tensorflow as tf
print("Dataset: %d" %len(train))
print("train_descriptions= %d" %len(train_descriptions))
print("photos: train= %d" %len(train_feature))
print("Vocab size: %d" %vocab_size)
print('Description Length: %d' % max_length)
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'startseq'
# iterate over the whole length of the sequence
for i in range(max_length):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo,sequence], verbose=0)
# convert probability to integer
yhat = np.argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# stop if we predict the end of the sequence
if word == 'endseq':
break
return in_text
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
print("Dataset: %d" %len(train))
print("train_descriptions= %d" %len(train_descriptions))
print("photos: train= %d" %len(train_feature))
print("Vocab size: %d" %vocab_size)
print('Description Length: %d' % max_length)
print("Dataset: %d" %len(test))
print("Description= %d" %len(test_description))
print("photos: test=%d" % len(test_features))
from keras.models import load_model
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
print(description)
print(result)
def corpus_bleu(
list_of_references,
hypotheses,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. macro-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
Custom weights may be supplied to fine-tune the BLEU score further.
A tuple of float weights for unigrams, bigrams, trigrams and so on can be given.
>>> weights = (0.1, 0.3, 0.5, 0.1)
>>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS
0.5818...
This particular weight gave extra value to trigrams.
Furthermore, multiple weights can be given, resulting in multiple BLEU scores.
>>> weights = [
... (0.5, 0.5),
... (0.333, 0.333, 0.334),
... (0.25, 0.25, 0.25, 0.25),
... (0.2, 0.2, 0.2, 0.2, 0.2)
... ]
>>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS
[0.8242..., 0.7067..., 0.5920..., 0.4719...]
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights)
:type weights: tuple(float) / list(tuple(float))
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
assert len(list_of_references) == len(hypotheses), (
"The number of hypotheses and their reference(s) should be the " "same "
)
try:
weights[0][0]
except TypeError:
weights = [weights]
max_weight_length = max(len(weight) for weight in weights)
# Iterate through each hypothesis and their corresponding references.
for references, hypothesis in zip(list_of_references, hypotheses):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i in range(1, max_weight_length + 1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
# Calculate corpus-level brevity penalty.
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Collects the various precision values for the different ngram orders.
p_n = [
Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i in range(1, max_weight_length + 1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0 if len(weights) == 1 else [0] * len(weights)
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method0
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
bleu_scores = []
for weight in weights:
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweigh:
if hyp_lengths < 4 and weight == (0.25, 0.25, 0.25, 0.25):
weight = (1 / hyp_lengths,) * hyp_lengths
s = (w_i * math.log(p_i) for w_i, p_i in zip(weight, p_n) if p_i > 0)
s = bp * math.exp(math.fsum(s))
bleu_scores.append(s)
return bleu_scores[0] if len(weights) == 1 else bleu_scores
def evaluate_model(model, descriptions, photos, tokenizer, max_length):
actual, predicted = list(), list()
# step over the whole set
for key, desc_list in descriptions.items():
# generate description
yhat = generate_desc(model, tokenizer, photos[key], max_length)
# store actual and predicted
references = [d.split() for d in desc_list]
actual.append(references)
predicted.append(yhat.split())
# calculate BLEU score
print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25))) | null |
168,330 | from os import listdir
from numpy import array
from keras.models import Model
from pickle import dump
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from tensorflow.keras.layers import Add
from keras.callbacks import ModelCheckpoint
from keras.applications.vgg16 import VGG16, preprocess_input
model = VGG16()
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
from os import listdir
from pickle import dump
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from keras.models import Model
import string
from nltk.tokenize import word_tokenize
import nltk
from pickle import dump
from pickle import load
from tensorflow.keras.layers import add
import tensorflow as tf
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
from nltk.translate.bleu_score import corpus_bleu
import tensorflow as tf
from keras.models import load_model
model = load_model(filename)
from pickle import load
from numpy import argmax
from tensorflow.keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
from pickle import load
from tensorflow.keras.preprocessing.text import Tokenizer
model = load_model('/content/drive/MyDrive/Image_Captioning_Project/model_18.h5')
def extract_features(filename):
# load the model
model = VGG16()
# re-structure the model
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
# load the photo
image = load_img(filename, target_size=(224, 224))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# get features
feature = model.predict(image, verbose=0)
return feature | null |
168,331 | import streamlit as st
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import numpy as np
from PIL import Image
from pickle import load
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
def generate_caption(model, tokenizer, photo, max_length):
# Seed the generation process
in_text = 'startseq'
# Iterate over the whole length of the sequence
for i in range(max_length):
# Integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# Pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# Predict next word
yhat = model.predict([photo, sequence], verbose=0)
# Convert probability to integer
yhat = np.argmax(yhat)
# Map integer to word
word = word_for_id(yhat, tokenizer)
# Stop if we cannot map the word
if word is None:
break
# Append as input for generating the next word
in_text += ' ' + word
# Stop if we predict the end of the sequence
if word == 'endseq':
break
return in_text | null |
168,332 | import streamlit as st
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import numpy as np
from PIL import Image
from pickle import load
vgg_model = VGG16()
vgg_model.layers.pop()
vgg_model = Model(inputs=vgg_model.inputs, outputs=vgg_model.layers[-2].output)
def extract_features(filename):
# Load the photo
image = load_img(filename, target_size=(224, 224))
# Convert the image pixels to a numpy array
image = img_to_array(image)
# Reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# Prepare the image for the VGG model
image = preprocess_input(image)
# Get features
feature = vgg_model.predict(image, verbose=0)
return feature | null |
168,333 | import streamlit as st
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import numpy as np
from PIL import Image
from pickle import load
def remove_start_end_tokens(caption):
stopwords = ['startseq', 'endseq']
querywords = caption.split()
resultwords = [word for word in querywords if word.lower() not in stopwords]
result = ' '.join(resultwords)
return result | null |
168,334 | from flask import Flask, render_template, request
import pandas as pd
import numpy as np
import sklearn
import os
import pickle
import warnings
def home():
return render_template('home.html') | null |
168,335 | from flask import Flask, render_template, request
import pandas as pd
import numpy as np
import sklearn
import os
import pickle
import warnings
loaded_model = pickle.load(open("model.pkl", 'rb'))
def predict():
N = int(request.form['Nitrogen'])
P = int(request.form['Phosporus'])
K = int(request.form['Potassium'])
temp = float(request.form['Temperature'])
humidity = float(request.form['Humidity'])
ph = float(request.form['pH'])
rainfall = float(request.form['Rainfall'])
feature_list = [N, P, K, temp, humidity, ph, rainfall]
single_pred = np.array(feature_list).reshape(1, -1)
prediction = loaded_model.predict(single_pred)
crop_dict = {1: "Rice", 2: "Maize", 3: "Jute", 4: "Cotton", 5: "Coconut", 6: "Papaya", 7: "Orange",
8: "Apple", 9: "Muskmelon", 10: "Watermelon", 11: "Grapes", 12: "Mango", 13: "Banana",
14: "Pomegranate", 15: "Lentil", 16: "Blackgram", 17: "Mungbean", 18: "Mothbeans",
19: "Pigeonpeas", 20: "Kidneybeans", 21: "Chickpea", 22: "Coffee"}
if prediction[0] in crop_dict:
crop = crop_dict[prediction[0]]
result = "{} is the best crop to be cultivated right there".format(
crop)
else:
result = "Sorry, we could not determine the best crop to be cultivated with the provided data."
return render_template('home.html', prediction=result) | null |
168,336 | from src.ingest_data import ingest_data
from src.preprocessing import data_preprocessing
from src.hyperparameters import search_hyperparameters
from catboost import CatBoostClassifier
import pickle
def ingest_data()-> pd.DataFrame:
data = pd.read_csv("E:\dl\Breast-Cancer-Survival-Prediction\data\data.csv")
return data
def data_preprocessing(df:pd.DataFrame)-> np.ndarray:
df = df.drop(['Date_of_Surgery','Date_of_Last_Visit'],axis=1)
numerical_columns = [column for column in df.columns if df[column].dtype in ['int64','float64']]
categorical_columns = [column for column in df.columns if df[column].dtype == 'object' and column != 'Patient_Status']
#print(numerical_columns)
#print(categorical_columns)
numerical_preprocessor = Pipeline(steps=[
("imputer",SimpleImputer(strategy='mean')),
("scaler",StandardScaler())
])
categorical_preprocessor = Pipeline(steps=[
("imputer",SimpleImputer(strategy='most_frequent')),
("onehot",OneHotEncoder(handle_unknown='ignore'))
])
preprocessor = ColumnTransformer(
transformers=[
("numerical",numerical_preprocessor,numerical_columns),
("categorical",categorical_preprocessor,categorical_columns)
])
X = df.drop("Patient_Status",axis=1)
y = df.Patient_Status
X_train,X_test,y_train,y_test = train_test_split(X,
y,
test_size=0.2,
random_state=42)
X_train = preprocessor.fit_transform(X_train)
X_test = preprocessor.transform(X_test)
with open('model/pipe.pkl','wb') as file:
pickle.dump(preprocessor,file)
label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(y_train)
y_test = label_encoder.transform(y_test)
return X_train,X_test,y_train,y_test
def search_hyperparameters()-> Dict:
study = optuna.create_study(direction='maximize')
study.optimize(objective,n_trials=20)
best_params = study.best_params
best_value = study.best_value
experiment = Experiment(
api_key="qaUy62jElVin2dR5B7isdybJF",
project_name="Brest Cancer Survival Prediction",
)
Experiment(api_key="qaUy62jElVin2dR5B7isdybJF",auto_output_logging="default")
# split best_params into preprocessing and model hyper-parameters
best_preprocessing_hyperparams = {key: value for key, value in best_params.items() if key.startswith('pp_')}
best_model_hyperparams = {
key: value for key, value in best_params.items() if not key.startswith('pp_')}
logger.info("Best Parameters:")
for key, value in best_params.items():
logger.info(f"{key}: {value}")
logger.info(f"Best brier score: {best_value}")
experiment.log_metric('Cross_validation_MAE', best_value)
return best_preprocessing_hyperparams
#return study.best_params
def train_model():
data = ingest_data()
X_train,X_test,y_train,y_test = data_preprocessing(data)
params = search_hyperparameters()
model = CatBoostClassifier(**params,silent=True)
model.fit(X_train,y_train)
with open('model/model.pkl', 'wb') as file:
pickle.dump(model,file=file) | null |
168,337 | import logging
from typing import Optional
Optional: _SpecialForm = ...
def get_console_logger(name:Optional[str]='project') -> logging.Logger:
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s-%(name)s-%(levelname)s-%(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger | null |
168,338 | from flask import Flask, render_template, request
import openai
def index():
return render_template("index.html") | null |
168,339 | from flask import Flask, render_template, request
import openai
openai.api_key = 'YOUR_API_KEY'
def api():
# Get the message from the POST request
message = request.json.get("message")
# Send the message to OpenAI's API and receive the response
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": message}
]
)
if completion.choices[0].message!=None:
return completion.choices[0].message
else :
return 'Failed to Generate response!' | null |
168,340 | from fastapi import FastAPI, HTTPException
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
import torch
from transformers import pipeline
from utils_sum import preprocess_transcript, divide_chunks, Item, clean_transcript
import datetime
import logging
import sys
import nltk
import configparser
logging.basicConfig(level = logging.INFO, filename ='summarize.log',
filemode = 'w', format='%(asctime)s - %(levelname)s - %(message)s')
try:
logging.info('Module Initialization')
# Configuration parser
config = configparser.ConfigParser()
config.read("./config_summarize.ini")
model_name = config['SUMMARIZE']['MODEL_NAME']
num_word = int(config['SUMMARIZE']['WORDS'])
except Exception:
raise HTTPException(status_code = 500, detail = 'PRELIMINARY INITIALIZATION FAILED')
class Item(BaseModel):
title: str
transcript: constr(min_length = 30)
model: str
def clean_transcript(transcript_text):
try:
regex = r"\d+.\d+-\d+.\d+"
removed_timestamp = re.sub(regex, "", transcript_text)
cleaned_text = removed_timestamp.replace("\n", "")
cleaned_text = cleaned_text.replace("\\", "")
return cleaned_text
except Exception:
raise HTTPException(status_code = 500, detail = 'TRANSCRIPT CLEANING ERROR')
def preprocess_transcript(transcript_text, num_word_th=0):
try:
output_list = []
assert(type(transcript_text)==list)
for line in transcript_text:
line_list = sent_tokenize(line)
for l in line_list:
if len(l) > num_word_th:
output_list.append(l)
return output_list
except Exception:
raise HTTPException(status_code = 500, detail = 'PREPROCESSING ERROR')
def divide_chunks(l, n=chunk_size):
try:
for i in range(0, len(l), n):
yield l[i:i + n]
except Exception:
raise HTTPException(status_code = 500, detail = 'DIVIDING CHUNKS ERROR')
async def read_text(data: Item):
logging.info('Executing Prediction API')
if data.model == model_name:
time = datetime.datetime.now()
logging.info('Performing transcript cleaning')
cleaned_transcript = clean_transcript(data.transcript)
logging.info('Preprocessing transcript')
transcript_txt_lines = preprocess_transcript([cleaned_transcript], num_word_th = num_word)
logging.info('Dividing the transcript into chunks')
transcript_txt_chunks = divide_chunks(transcript_txt_lines)
try:
logging.info('Executing prediction module')
final_sum = []
for chunk_lines in transcript_txt_chunks:
chunk_text = ' '.join(chunk_lines)
MAX_LENGTH = len(chunk_text.split()) // 10 + 1 # 10%
MIN_LENGTH = len(chunk_text.split()) // 50 + 1 # 2%
chunk_sum = summarizer(chunk_text, max_length = MAX_LENGTH, min_length = MIN_LENGTH)
final_sum.append(chunk_sum[0].get('summary_text'))
summary_text = ' '.join(final_sum)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code=500, detail='ERROR IN EXTRACTING SUMMARY MODULE')
try:
logging.info('Returning the output')
content = {'TITLE' : data.title, 'SUMMARY' : summary_text, 'TIMESTAMP' : time}
content = jsonable_encoder(content)
return JSONResponse(content)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code=500, detail='ERROR IN DISPLAYING THE OUTPUT')
else:
raise HTTPException(status_code=404, detail='INVALID MODEL NAME - AVAILABLE MODEL : <bart-large-cnn>') | null |
168,341 | import requests
import time
HOST_URL = "localhost"
API_VERSION = "v1.0"
PREDICTION_PORT = 8001
def client_script():
# Prediction Module
try:
st = time.time()
result = requests.post(f'http://{HOST_URL}:{PREDICTION_PORT}/{API_VERSION}/prediction',
headers={'Content-type': 'application/json'},
json={'title': 'sample_title', 'model': 'bart-large-cnn',
'transcript': transcript_data}
)
et = time.time()
if result.status_code == 201 or result.status_code == 200:
print(result.json())
print(f'Inference Time : {et-st}')
elif result.status_code == 422:
print('INVALID INPUT - TITLE, TRANSCRIPT(MIN 30 CHARACTERS) AND MODEL NAME SHOULD BE IN STRING FORMAT')
elif result.status_code == 500:
print('MODULE EXECUTION ERROR')
elif result.status_code == 404:
print('INVALID MODEL NAME')
except:
print('CLIENT MODULE EXECUTION ERROR') | null |
168,342 | from fastapi import FastAPI, HTTPException
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from utils_key import Item, final_processing, over_all_key, divide_chunks, NER_transcript, nounKey_nerKey_summary_chunk, clean_transcript, camel
from keybert import KeyBERT
from keyphrase_vectorizers import KeyphraseCountVectorizer
import nltk
from nltk.tokenize import sent_tokenize
import spacy
import datetime
import logging
import sys
import configparser
logging.basicConfig(level = logging.INFO, filename ='keyhash.log',
filemode = 'w', format='%(asctime)s - %(levelname)s - %(message)s')
try:
kw_extractor = KeyBERT('./files') # Place the files in the same folder
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code = 500, detail = 'MODEL INITIALIZATION FAILED')
class Item(BaseModel):
title: str
summary: constr(min_length = 30)
transcript: constr(min_length = 30)
model: str
def clean_transcript(transcript_text):
try:
regex = r"\d+.\d+-\d+.\d+"
removed_timestamp = re.sub(regex, "", transcript_text)
cleaned_text = removed_timestamp.replace("\n", "")
cleaned_text = cleaned_text.replace("\\", "")
return cleaned_text
except Exception:
raise HTTPException(status_code = 500, detail = 'TRANSCRIPT CLEANING ERROR')
def divide_chunks(l, n=chunk_size):
try:
for i in range(0, len(l), n):
yield l[i:i + n]
except Exception:
raise HTTPException(status_code = 500, detail = 'ERROR IN DIVIDE_CHUNKS MODULE')
def nounKey_nerKey_summary_chunk(summary_chunk, noun_keywords, NER_Keywords):
try:
doc = nlp(summary_chunk)
for noun_chunk in doc.noun_chunks:
text = re.sub(r"[%,:/!@#$^&*()+=|_'?><,.`~-]", "", str(noun_chunk))
text = text.strip()
if text.isnumeric():
continue
if noun_chunk.root.pos_ in ["PROPN", "NOUN"]:
if not any(word.is_stop for word in noun_chunk):
noun_keywords.append(noun_chunk.text)
elif (not any(word.is_stop for word in noun_chunk[1:])) and noun_chunk.text.split(" ")[0].lower() == "the":
noun_keywords.append(noun_chunk.text)
for ent in doc.ents:
if ent.label_ in [ 'EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'NORP', 'ORG', 'PERSON', 'PRODUCT', 'WORK_OF_ART']:
NER_Keywords.append(f"{ent.text}_({ent.label_.lower()})")
NER_Keywords = [word for word in NER_Keywords if word not in string.punctuation]
return noun_keywords, NER_Keywords
except Exception:
raise HTTPException(status_code = 500, detail = 'ERROR IN CHUNK_KEYWORDS MODULE')
def over_all_key(keybert_diversity_phrases, NER_Keywords, noun_keywords):
try:
keywords = list(set([i.lower() for i in keybert_diversity_phrases]))
ner_keywords = list(set([i.lower() for i in NER_Keywords]))
ner_keywords = [nr.split("_")[0] for nr in ner_keywords]
noun_keywords = list(set([i.lower() for i in noun_keywords]))
over_all_keywords = list(set(keywords + ner_keywords + noun_keywords))
return over_all_keywords, ner_keywords
except Exception:
raise HTTPException(status_code = 500, detail = 'ERROR IN OVER_ALL_KEYWORDS MODULE')
def NER_transcript(transcript_text, over_all_keywords, ner_keywords):
try:
doc = nlp(transcript_text)
[ner_keywords.append(f"{ent.text}") for ent in doc.ents if ent.label_ in [ 'EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'NORP', 'ORG', 'PERSON', 'PRODUCT', 'WORK_OF_ART'] and ent.text not in ner_keywords]
[over_all_keywords.insert(0, nr) for nr in ner_keywords if nr not in over_all_keywords]
return over_all_keywords, ner_keywords
except Exception:
raise HTTPException(status_code = 500, detail = 'ERROR IN TRANSCRIPT_KEYWORDS MODULE')
def final_processing(over_all_keywords, ner_keywords_trans, keywords):
try:
ner_keywords_trans = [nr.split("_")[0] for nr in ner_keywords_trans]
noun_keywords = set(set(over_all_keywords) - set(ner_keywords_trans)) - set(keywords)
ner_keywords_trans = ", ".join(ner_keywords_trans)
over_all_keywords = ", ".join(over_all_keywords)
return over_all_keywords, noun_keywords, ner_keywords_trans
except Exception:
raise HTTPException(status_code = 500, detail = 'ERROR IN FINAL_PROCESSING MODULE')
def camel(keybert_diversity_phrases):
try:
keywords = list(set([i.lower() for i in keybert_diversity_phrases]))
final = []
for text in keywords:
words = text.split(' ')
if len(words) > 2:
words = "".join([word.title() for word in words])
final.append(words)
else:
final.append(words[0])
final_keywords = list(set(final))
hashtag_print = ", ".join([f"#{kw}" for kw in final_keywords])
return hashtag_print
except Exception:
raise HTTPException(status_code = 500, detail = 'ERROR IN CAMEL_CASE MODULE')
def sent_tokenize(text, language="english"):
"""
Return a sentence-tokenized copy of *text*,
using NLTK's recommended sentence tokenizer
(currently :class:`.PunktSentenceTokenizer`
for the specified language).
:param text: text to split into sentences
:param language: the model name in the Punkt corpus
"""
tokenizer = load(f"tokenizers/punkt/{language}.pickle")
return tokenizer.tokenize(text)
async def read_text(data: Item):
if data.model == model_name:
time = datetime.datetime.now()
logging.info('Executing keyword module')
summary_text = data.summary
transcript_text = data.transcript
# clean transcript
transcript_text = clean_transcript(transcript_text)
try:
summary_line_list = sent_tokenize(summary_text)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code=500, detail='SENTENCE TOKENIZING ERROR')
# Dividing into chunks
summary_line_chunks_list = divide_chunks(summary_line_list)
summary_chunks_txt_list = [' '.join(i) for i in summary_line_chunks_list]
try:
keybert_diversity_phrases = []
NER_Keywords = []
noun_keywords = []
for new_text in summary_chunks_txt_list:
try:
keywords_n = kw_extractor.extract_keywords(new_text, vectorizer=KeyphraseCountVectorizer(pos_pattern='<N.*>'), use_mmr=True, diversity=1.0,
keyphrase_ngram_range=(1, 1), stop_words='english', top_n=50)
keywords_noun = [i for i in keywords_n if i[1] > 0.2]
for i, _ in keywords_noun:
keybert_diversity_phrases.append(i)
except:
logging.info('No keywords extracted in this loop')
try:
keywords2_nn = kw_extractor.extract_keywords(new_text, vectorizer=KeyphraseCountVectorizer(pos_pattern='<N.+>+<N.+>'), use_mmr=True, diversity=1.0,
keyphrase_ngram_range=(2, 3), stop_words='english', top_n=50)
keywords_nnounn = [i for i in keywords2_nn if i[1] > 0.2]
for i, _ in keywords_nnounn:
keybert_diversity_phrases.append(i)
except:
logging.info('No keywords extracted in this loop')
# Extrating noun and NER from chunk
noun_keywords, NER_Keywords = nounKey_nerKey_summary_chunk(new_text, noun_keywords, NER_Keywords)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code=500, detail='KEYWORD EXTRACTION ERROR')
# Overall keywords
over_all_keywords, ner_keywords = over_all_key(keybert_diversity_phrases, NER_Keywords, noun_keywords)
# keywords from transcript
over_all_keywords, ner_keywords_trans = NER_transcript(transcript_text, over_all_keywords, ner_keywords)
# postprocessing final keywords
over_all_keywords, noun_keywords_all, ner_keywords_all = final_processing(over_all_keywords, ner_keywords_trans, keybert_diversity_phrases)
# Hashtag
hashtag_output = camel(keybert_diversity_phrases)
# returning the result
try:
content = {'Title' : data.title, 'NER Keywords' : ner_keywords_all,
'Noun Keywords' : noun_keywords_all, 'Over_all_keywords' : over_all_keywords,
'hashtag' : hashtag_output, 'timestamp' : time
}
content = jsonable_encoder(content)
return JSONResponse(content)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code=500, detail='ERROR IN RETURNING THE OUTPUT')
else:
raise HTTPException(status_code=404, detail='INVALID MODEL NAME - AVAILABLE MODEL : <all-mpnet-base-v2>') | null |
168,343 | import requests
import time
HOST_URL = "localhost"
API_VERSION = "v1.0"
PREDICTION_PORT = 8001
def client_script():
# Prediction Module
try:
st = time.time()
result = requests.post(f'http://{HOST_URL}:{PREDICTION_PORT}/{API_VERSION}/prediction',
headers={'Content-type': 'application/json'},
json={'title': 'test title', 'model': 'all-mpnet-base-v2',
'transcript': transcript_data, 'summary' : summary_data
}
)
et = time.time()
if result.status_code == 201 or result.status_code == 200:
print(result.json())
print(f'Inference Time : {et-st}')
elif result.status_code == 422:
print('INVALID INPUT - TITLE, SUMMARY(MIN 30 CHARACTERS), TRANSCRIPT(MIN 30 CHARACTERS) AND MODEL NAME SHOULD BE IN STRING FORMAT')
elif result.status_code == 500:
print('MODULE EXECUTION ERROR')
elif result.status_code == 404:
print('INVALID MODEL NAME')
except:
print('CLIENT MODULE EXECUTION ERROR') | null |
168,344 | from fastapi import FastAPI, HTTPException
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from utils_topic import Item, get_similarities_model1
from sentence_transformers import SentenceTransformer
import datetime
import logging
import sys
import numpy as np
import ast
import configparser
logging.basicConfig(level = logging.INFO, filename ='topic.log',
filemode = 'w', format='%(asctime)s - %(levelname)s - %(message)s')
try:
logging.info('Initialization')
# Configuration Parser
config = configparser.ConfigParser()
config.read("./config_topic.ini")
model_name = config['TOPIC']['MODEL_NAME']
candidate_labels= ast.literal_eval(config['TOPIC']['TOPIC_MAIN'])
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code = 500, detail = 'PRELIMINARY INITIALIZATION FAILED')
try:
# Topic Module
logging.info('transformer module initialization')
model1 = SentenceTransformer('./files') # Place files in the same folder
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code = 500, detail = 'MODEL INITIALIZATION FAILED')
try:
# Topic Embeddings
logging.info('Getting embeddings for topics')
embedded_topic = []
for i in range(len(candidate_labels)):
embed = model1.encode(candidate_labels[i])
embedded_topic.append(embed)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code = 500, detail = 'TOPIC EMBEDDING FAILED')
class Item(BaseModel):
title: str
summary: constr(min_length=20)
model: str
def get_similarities_model1(embed_text, embed_topic):
try:
SCALE_SCORE = 20
scored = []
for i in range(len(embed_topic)):
for j in range(len(embed_text)):
score = np.round(cosine_similarity((embed_text[j]), (embed_topic[i])), 3)
scored.append(score * SCALE_SCORE)
scored = softmax(np.array(scored))
return scored
except Exception:
raise HTTPException(status_code = 500, detail = 'RESULT SCORING FAILED')
async def read_text(data: Item):
logging.info('Prediction endpoint execution')
if data.model == model_name:
time = datetime.datetime.now()
logging.info('Executing prediction module')
global embedded_topic
try:
logging.info('Getting embeddings for summary')
embedded_text = []
embed = model1.encode(data.summary)
embedded_text.append(embed)
embedded_text = np.array(embedded_text)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code = 500, detail = 'SUMMARY EMBEDDING FAILED')
logging.info('Getting similarity score for topics and summary')
score = get_similarities_model1(embedded_text, embedded_topic)
score = [round(x, 3) for x in score]
topic_score_list = sorted(zip(candidate_labels, score), key=lambda x: x[1], reverse=True)
try:
# returning the result
logging.info('returning the result')
content = {'TITLE' : data.title,'TOPICS' : dict(topic_score_list), 'TIMESTAMP' : time
}
content = jsonable_encoder(content)
return JSONResponse(content)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code=500, detail='ERROR IN RETURNING THE OUTPUT')
else:
logging.exception(sys.exc_info())
raise HTTPException(status_code=404, detail='INVALID MODEL NAME - AVAILABLE MODEL : <all-mpnet-base-v2>') | null |
168,345 | import requests
import time
HOST_URL = "localhost"
API_VERSION = "v1.0"
PREDICTION_PORT = 8001
def client_script():
# Prediction Module
try:
st = time.time()
result = requests.post(f'http://{HOST_URL}:{PREDICTION_PORT}/{API_VERSION}/prediction',
headers={'Content-type': 'application/json'},
json={'title': 'sample title', 'model': 'all-mpnet-base-v2', 'summary' : summary_data}
)
et = time.time()
if result.status_code == 201 or result.status_code == 200:
print(result.json())
print(f'Inference Time : {et-st}')
elif result.status_code == 422:
print('INVALID INPUT - TITLE, SUMMARY(MIN 20 CHARACTERS) AND MODEL NAME SHOULD BE IN STRING FORMAT')
elif result.status_code == 401:
print('TOKEN EXPIRED')
elif result.status_code == 498:
print('INVALID TOKEN')
elif result.status_code == 500:
print('MODULE EXECUTION ERROR')
elif result.status_code == 404:
print('INVALID MODEL NAME')
except:
print('CLIENT MODULE EXECUTION ERROR') | null |
168,346 | from fastapi import FastAPI, HTTPException
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from utils_zero import Item_zeroshot
from transformers import pipeline
import torch
import datetime
import logging
import sys
import ast
import configparser
logging.basicConfig(level = logging.INFO, filename = './topic_zeroshot.log',
filemode = 'w', format='%(asctime)s - %(levelname)s - %(message)s')
try:
logging.info('Initialization')
# Configuration Parser
config = configparser.ConfigParser()
config.read("./config_zero.ini")
model_name = config['TOPIC_ZERO']['ZERO_NAME']
candidate_labels = ast.literal_eval(config['TOPIC_ZERO']['TOPIC_MAIN'])
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code = 500, detail = 'PRELIMINARY INITIALIZATION FAILED')
try:
# Zeroshot Module
logging.info('Pipeline initialization')
num_of_gpus = torch.cuda.device_count()
if num_of_gpus:
model = pipeline('zero-shot-classification', model = './files', device = 0)
else:
model = pipeline('zero-shot-classification', model = './files')
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code = 500, detail = 'MODEL INITIALIZATION FAILED')
class Item_zeroshot(BaseModel):
title: str
summary: constr(min_length=20)
labels: Optional[conlist(str, min_items=2, max_items=10)] = None
model: str
async def read_text(data: Item_zeroshot):
logging.info('Prediction endpoint execution')
if data.model == model_name:
time = datetime.datetime.now()
if data.labels is not None:
prediction_labels = data.labels
else:
prediction_labels = candidate_labels
try:
logging.info('Executing zeroshot module')
result = model(data.summary, prediction_labels)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code=500, detail='ERROR IN ZEROSHOT MODULE')
try:
sorted_score = sorted(zip(result['labels'], result['scores']), key = lambda x : x[1], reverse = True)
content = {'RESULT' : sorted_score, 'TIMESTAMP' : time}
content = jsonable_encoder(content)
return JSONResponse(content)
except Exception:
logging.exception(sys.exc_info())
raise HTTPException(status_code=500, detail='ERROR IN RETURNING THE OUTPUT')
else:
logging.exception(sys.exc_info())
raise HTTPException(status_code=404, detail='INVALID MODEL NAME - AVAILABLE MODEL : <bart-large-mnli>') | null |
168,347 | import requests
import time
ZERO_URL = "localhost"
API_VERSION = "v1.0"
ZEROSHOT_PORT = 8001
def client_script():
# Prediction Module
try:
st = time.time()
result = requests.post(f'http://{ZERO_URL}:{ZEROSHOT_PORT}/{API_VERSION}/prediction',
headers={'Content-type': 'application/json'},
json={'title' : 'test title', 'model': 'bart-large-mnli',
'summary': summary_data, 'labels' : ['sports','music']}
)
et = time.time()
if result.status_code == 201 or result.status_code == 200:
print(result.json())
print(f'Inference Time : {et - st}')
elif result.status_code == 422:
print('INVALID INPUT - TITLE, SUMMARY(MIN 20 CHARACTERS) AND MODEL NAME SHOULD BE IN STRING FORMAT')
elif result.status_code == 500:
print('MODULE EXECUTION ERROR')
elif result.status_code == 404:
print('INVALID MODEL NAME')
except:
print('ZEROSHOT MODULE CLIENT ERROR') | null |
168,348 | import os
import requests
from bs4 import BeautifulSoup
from datetime import datetime
if not os.path.exists(main_directory):
os.makedirs(main_directory, exist_ok=True)
if not os.path.exists(subdirectory_path):
os.makedirs(subdirectory_path, exist_ok=True)
response = requests.get(url)
class BeautifulSoup(Tag):
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, exclude_encodings=None,
element_classes=None, **kwargs):
def deprecated_argument(old_name, new_name):
def _clone(self):
def __getstate__(self):
def __setstate__(self, state):
def _decode_markup(cls, markup):
def _markup_is_url(cls, markup):
def _markup_resembles_filename(cls, markup):
def _feed(self):
def reset(self):
def new_tag(self, name, namespace=None, nsprefix=None, attrs={},
sourceline=None, sourcepos=None, **kwattrs):
def string_container(self, base_class=None):
def new_string(self, s, subclass=None):
def insert_before(self, *args):
def insert_after(self, *args):
def popTag(self):
def pushTag(self, tag):
def endData(self, containerClass=None):
def object_was_parsed(self, o, parent=None, most_recent_element=None):
def _linkage_fixer(self, el):
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
sourcepos=None, namespaces=None):
def handle_endtag(self, name, nsprefix=None):
def handle_data(self, data):
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal", iterator=None):
def save_content_to_file(url, folder, filename):
try:
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
with open(os.path.join(folder, filename), 'w', encoding='utf-8') as file:
for header in soup.find_all(['h1']):
file.write("Title: " + header.text + '\n' * 5)
for paragraph in soup.find_all('p'):
file.write(paragraph.text + '\n')
else:
print("Failed to retrieve the page:", url)
except Exception as e:
print("An error occurred:", e) | null |
168,349 | import streamlit as st
import requests
import sounddevice as sd
import wavio
from langchain import OpenAI
import os
from openai import OpenAI
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename) | null |
168,350 | from setuptools import find_packages, setup
from typing import List
HYPEN_E_DOT = "-e ."
List = _Alias()
The provided code snippet includes necessary dependencies for implementing the `get_requirements` function. Write a Python function `def get_requirements(file_path: str) -> List[str]` to solve the following problem:
this function will return the list of requirements
Here is the function:
def get_requirements(file_path: str) -> List[str]:
'''
this function will return the list of requirements
'''
requirements = []
with open(file_path) as file_obj:
requirements = file_obj.readlines()
requirements = [req.replace("\n", "") for req in requirements]
if HYPEN_E_DOT in requirements:
requirements.remove(HYPEN_E_DOT)
return requirements | this function will return the list of requirements |
168,351 | import os
import sys
import datetime
import numpy as np
import pandas as pd
import dill
import pickle
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from src.exception import CustomException
from src.logger import logging
class CustomException(Exception):
def __init__(self, error_message, error_detail: sys):
super().__init__(error_message)
self.error_message = error_message_detail(
error_message, error_detail=error_detail)
def __str__(self):
return self.error_message
def date_transform(date):
try:
date_object = datetime.datetime.strptime(date, "%b %d, '%y")
formatted_date = datetime.datetime.strftime(date_object, "%Y-%m-%d")
new_date = datetime.datetime.strptime(
formatted_date, "%Y-%m-%d").date()
current_date = datetime.date.today()
Days = (current_date - new_date).days
return Days
except Exception as e:
raise CustomException(e, sys) | null |
168,352 | import os
import sys
import datetime
import numpy as np
import pandas as pd
import dill
import pickle
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from src.exception import CustomException
from src.logger import logging
def remove_outliers_iqr(df):
# Calculate the first quartile (25th percentile) and third quartile (75th percentile)
q1 = df.quantile(0.25)
q3 = df.quantile(0.75)
# Calculate the interquartile range (IQR)
iqr = q3 - q1
# Define the lower and upper bounds for outliers
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
if lower_bound['exactPrice'] < 0:
lower_bound = df.min()
# Filter rows where any value is within the lower and upper bounds
df_no_outliers = df[~((df < lower_bound) | (df > upper_bound)).any(axis=1)]
return df_no_outliers | null |
168,353 | import os
import sys
import datetime
import numpy as np
import pandas as pd
import dill
import pickle
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from src.exception import CustomException
from src.logger import logging
class CustomException(Exception):
def __init__(self, error_message, error_detail: sys):
super().__init__(error_message)
self.error_message = error_message_detail(
error_message, error_detail=error_detail)
def __str__(self):
return self.error_message
def save_object(file_path, obj):
try:
dir_path = os.path.dirname(file_path)
os.makedirs(dir_path, exist_ok=True)
with open(file_path, "wb") as file_obj:
pickle.dump(obj, file_obj)
except Exception as e:
raise CustomException(e, sys) | null |
168,354 | import os
import sys
import datetime
import numpy as np
import pandas as pd
import dill
import pickle
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from src.exception import CustomException
from src.logger import logging
class CustomException(Exception):
def __init__(self, error_message, error_detail: sys):
super().__init__(error_message)
self.error_message = error_message_detail(
error_message, error_detail=error_detail)
def __str__(self):
return self.error_message
import logging
logging.basicConfig(
filename=LOG_FILE_PATH,
format="[ %(asctime)s ] %(lineno)d %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
def evaluate_models(X_train, y_train, X_test, y_test, models, param):
try:
report = {}
for i in range(len(list(models))):
model = list(models.values())[i]
para = param[list(models.keys())[i]]
gs = GridSearchCV(model, para, cv=3)
gs.fit(X_train, y_train)
model.set_params(**gs.best_params_)
model.fit(X_train, y_train)
# model.fit(X_train, y_train) # Train model
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
train_model_score = r2_score(y_train, y_train_pred)
test_model_score = r2_score(y_test, y_test_pred)
report[list(models.keys())[i]] = test_model_score
logging.info(f"utils model trainer : {report} , { model.get_params()}")
return report
except Exception as e:
raise CustomException(e, sys) | null |
168,355 | import os
import sys
import datetime
import numpy as np
import pandas as pd
import dill
import pickle
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from src.exception import CustomException
from src.logger import logging
class CustomException(Exception):
def __init__(self, error_message, error_detail: sys):
super().__init__(error_message)
self.error_message = error_message_detail(
error_message, error_detail=error_detail)
def __str__(self):
return self.error_message
def load_object(file_path):
try:
with open(file_path, "rb") as file_obj:
return pickle.load(file_obj)
except Exception as e:
raise CustomException(e, sys) | null |
168,356 | import os
import sys
import datetime
import numpy as np
import pandas as pd
import dill
import pickle
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from src.exception import CustomException
from src.logger import logging
def format_indian_currency(num, currency_symbol='₹'):
def output_within_range(number):
# Calculate the output within the range of 15000 to 20000
number1 = number/pow(10, 3)
number1 = np.round(number1) * pow(10, 3)
number2 = number/pow(10, 4)
number2 = np.round(number2) * pow(10, 4)
# Convert the output to a string and return it
num1 = format_indian_currency(str(number1)[:-2])
num2 = format_indian_currency(str(number2)[:-2])
if number1 >= number2:
output_str = f"{num2} - {num1}"
else:
output_str = f"{num1} - {num2}"
print(number)
# print(format_indian_currency(str(number1)[:-2]))
return output_str | null |
168,357 | import sys
from src.logger import logging
def error_message_detail(error, error_detail: sys):
_, _, exc_tb = error_detail.exc_info()
file_name = exc_tb.tb_frame.f_code.co_filename
error_message = "Error ocurred in python script name [{0}] line number [{1}] error message [{2}]".format(
file_name, exc_tb.tb_lineno, str(error)
)
return error_message | null |
168,358 | import numpy as np
from flask import Flask, jsonify, request, render_template
from src.pipeline.predict_pipeline import CustomData, PredictRecommendPipeline
from src.pipeline.scraping_pipeline import ImageScrappingPipeline
from math import trunc
from src.logger import logging
import os
def city_arr():
city_set = set()
for ele in city_loc:
city_set.add(ele[0])
return city_set
def get_city_arr():
cities = city_arr()
return jsonify(list(cities)) | null |
168,359 | import numpy as np
from flask import Flask, jsonify, request, render_template
from src.pipeline.predict_pipeline import CustomData, PredictRecommendPipeline
from src.pipeline.scraping_pipeline import ImageScrappingPipeline
from math import trunc
from src.logger import logging
import os
def main_arr(city):
loc_set = set()
for i in city_loc:
if i[0] == city and i[1] != "Missing":
loc_set.add(i[1])
return list(loc_set)
def get_main_arr(selected_city):
localities = main_arr(selected_city)
return jsonify(localities) | null |
168,360 | import numpy as np
from flask import Flask, jsonify, request, render_template
from src.pipeline.predict_pipeline import CustomData, PredictRecommendPipeline
from src.pipeline.scraping_pipeline import ImageScrappingPipeline
from math import trunc
from src.logger import logging
import os
propType = ['Multistorey Apartment', 'Residential House',
'Builder Floor Apartment', 'Villa', 'Studio Apartment',
'Penthouse']
RoS = ['Rent', 'Sale']
BHK = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
Furnishing = ['Semi-Furnished', 'Furnished', 'Unfurnished']
class PredictRecommendPipeline:
def __init__(self):
pass
def predict(self, features):
try:
# print(features["RentOrSale"], "hiii")
# print(features["RentOrSale"] == "Sale", "hiii")
if (features["RentOrSale"] == "Rent").all():
model_path = "artifacts/model_rent.pkl"
else:
model_path = "artifacts/model.pkl"
preprocessor_path = "artifacts/preprocessor.pkl"
print("Before Loading")
model = load_object(file_path=model_path)
preprocessor = load_object(file_path=preprocessor_path)
print("After Loading")
fea_df = pd.DataFrame(features, columns=['propertyType', 'locality', 'furnishing',
'city', 'bedrooms', 'bathrooms', 'RentOrSale', 'exactPrice'])
data_scaled = preprocessor.transform(fea_df)
preds = model.predict(data_scaled[:, :-1])
prediction = round(preds[0])
result = output_within_range(prediction)
return result
except Exception as e:
raise CustomException(e, sys)
def recommend(self, features):
try:
Data_path = "artifacts/recommend_data.csv"
data = pd.read_csv(Data_path)
recommend = Recommender
similar_houses = recommend.get_similar_houses(
features['propertyType'].loc[0], features['locality'].loc[0], features['furnishing'].loc[0],
features['city'].loc[0], features['bedrooms'].loc[0], features['bathrooms'].loc[0], features['RentOrSale'].loc[0], dataset=data)
# similar_houses = recommend.get_similar_houses(
# str(features['propertyType']), str(
# features['locality']), str(features['furnishing']),
# str(features['city']), str(features['bedrooms']), str(features['bathrooms']), str(features['RentOrSale']), dataset=data)
# similar_houses = recommend.get_similar_houses(
# 'Multistorey Apartment', 'Narendrapur', 'Semi-Furnished', 'Kolkata', '3', '3', 'Rent', data, n=6)
# pass
return similar_houses
except Exception as e:
raise CustomException(e, sys)
class CustomData:
def __init__(self,
propertyType: str,
locality: str,
furnishing: str,
city: str,
bedrooms: str,
bathrooms: str,
RentOrSale: str,
exactPrice: str):
self.propertyType = propertyType
self.locality = locality
self.furnishing = furnishing
self.city = city
self.bedrooms = bedrooms
self.bathrooms = bathrooms
self.RentOrSale = RentOrSale
self.exactPrice = exactPrice
def get_data_as_data_frame(self):
try:
custom_data_input_dict = {
"propertyType": [self.propertyType],
"locality": [self.locality],
"furnishing": [self.furnishing],
"city": [self.city],
"bedrooms": [self.bedrooms],
"bathrooms": [self.bathrooms],
"RentOrSale": [self.RentOrSale],
"exactPrice": [self.exactPrice],
}
return pd.DataFrame(custom_data_input_dict)
except Exception as e:
raise CustomException(e, sys)
def trunc(__x: SupportsFloat) -> int: ...
def home():
if request.method == "GET":
return render_template('index.html', PropType=propType, BHK=BHK, Furnish=Furnishing, Ros=RoS)
else:
print("submitted")
data = CustomData(
propertyType=request.form.get('propertyType'),
locality=request.form.get('locality'),
furnishing=request.form.get('furnishing'),
city=request.form.get('city'),
bedrooms=request.form.get('BHK'),
bathrooms=request.form.get('BHK'),
RentOrSale=request.form.get('RentOrSale'),
exactPrice=" "
)
pred_df = data.get_data_as_data_frame()
print(pred_df)
print("Before Prediction")
predict_recommend_pipeline = PredictRecommendPipeline()
# print("Mid Prediction")
result = predict_recommend_pipeline.predict(pred_df)
# logging.info(f"{result} Prediction Result")
print("after Prediction")
print(pred_df, "DataFrame")
print(result, "result")
recommend = predict_recommend_pipeline.recommend(pred_df)
similarity = (recommend["distances"].mean())*100
similarity = trunc(similarity)
similarity = str(similarity)+"%"
# img_pipeline = ImageScrappingPipeline
# recommend = img_pipeline.get_images(recommend)
# logging.info(
# f" {recommend} Recommended properties with {similarity} % similarity")
print(recommend)
return render_template('home.html', PropType=propType, BHK=BHK, Furnish=Furnishing, Ros=RoS, result=result, dataset=recommend, similar=similarity) | null |
168,361 | import pandas as pd
import numpy as np
from pyscript import Element
from js import document, window
import pickle
import warnings
def get_predictions():
data = {
"ApplicantIncome": document.querySelector("#ApplicantIncome").value,
"CoapplicantIncome": document.querySelector("#CoapplicantIncome").value,
"Credit_History": document.querySelector('input[name="Credit_History"]:checked').value,
"Dependents": document.querySelector("#Dependents").value,
"Education": document.querySelector('input[name="Education"]:checked').value,
"Gender": document.querySelector('input[name="Gender"]:checked').value,
"LoanAmount": document.querySelector("#LoanAmount").value,
"Loan_Amount_Term": document.querySelector("#LoanAmountTerm").value,
"Married": document.querySelector('input[name="Married"]:checked').value,
"Property_Area": document.querySelector("#Property_Area").value,
"Self_Employed": document.querySelector('input[name="Self_Employed"]:checked').value
}
# print("Data", data)
Gender = 1 if data["Gender"]=="male" else 0
Married = 1 if data["Married"]=="yes" else 0
if data["Dependents"]=="0":
Dependents = 0
elif data["Dependents"]=="1":
Dependents = 1
elif data["Dependents"]=="2":
Dependents = 2
else:
Dependents = 3
Education = 0 if data["Education"]=="Graduate" else 1
Self_Employed = 1 if data["Self_Employed"]=="s_yes" else 0
LoanAmount = np.log(int(data["LoanAmount"]))
Loan_Amount_Term = np.log(int(data["Loan_Amount_Term"]))
Credit_History = 1 if data["Credit_History"]=="c_yes" else 0
if data["Property_Area"]=="Rural":
Property_Area = 0
elif data["Property_Area"]=="Semiurban":
Property_Area = 1
else:
Property_Area = 2
TotalIncome = np.log(int(data["ApplicantIncome"])+int(data["CoapplicantIncome"]))
predictionData = [Gender,Married,Dependents,Education,Self_Employed,LoanAmount,Loan_Amount_Term,Credit_History,Property_Area,TotalIncome]
result = loaded_model.predict([predictionData])
if result[0]==1:
result = "will"
else:
result = "will not"
document.querySelector(".prediction").hidden = False
document.querySelector(".result").innerText = result
return result | null |
168,362 | import os, sys
from os.path import dirname as up
from utils.common_libraries import *
from utils.constants import *
The provided code snippet includes necessary dependencies for implementing the `load_image_from_url` function. Write a Python function `def load_image_from_url(url: str, new_size: tuple = None)` to solve the following problem:
Loads an image from a given URL and optionally resizes it. :param url: The URL of the image to load. :type url: str :param new_size: The new size of the image, if resizing is desired. Defaults to None. :type new_size: tuple, optional :return: The loaded image, possibly resized. :rtype: PIL.Image.Image
Here is the function:
def load_image_from_url(url: str, new_size: tuple = None):
"""
Loads an image from a given URL and optionally resizes it.
:param url: The URL of the image to load.
:type url: str
:param new_size: The new size of the image, if resizing is desired. Defaults to None.
:type new_size: tuple, optional
:return: The loaded image, possibly resized.
:rtype: PIL.Image.Image
"""
# Send a GET request to the URL
response = requests.get(url)
# Raise an exception if the request was unsuccessful
response.raise_for_status()
# Open the image from the response content
image = Image.open(BytesIO(response.content))
# Resize the image only if a new size is provided
if new_size is not None:
image = image.resize(new_size)
return image | Loads an image from a given URL and optionally resizes it. :param url: The URL of the image to load. :type url: str :param new_size: The new size of the image, if resizing is desired. Defaults to None. :type new_size: tuple, optional :return: The loaded image, possibly resized. :rtype: PIL.Image.Image |
168,363 | import os, sys
from os.path import dirname as up
from utils.common_libraries import *
from utils.constants import *
The provided code snippet includes necessary dependencies for implementing the `authenticate_google_service_account_credentials` function. Write a Python function `def authenticate_google_service_account_credentials()` to solve the following problem:
Authenticate with Google using a service account. Reads the Google application credentials from an environment variable and creates a service account credential object. Returns: service_account.Credentials: The service account credentials. Raises: RuntimeError: If authentication or logging of the authentication fails.
Here is the function:
def authenticate_google_service_account_credentials():
"""
Authenticate with Google using a service account.
Reads the Google application credentials from an environment variable and creates a service account credential object.
Returns:
service_account.Credentials: The service account credentials.
Raises:
RuntimeError: If authentication or logging of the authentication fails.
"""
credentials = GOOGLE_APPLICATION_CREDENTIALS
if not credentials:
raise RuntimeError(
"Authentication failed: 'GOOGLE_APPLICATION_CREDENTIALS' environment variable is not set"
)
try:
with open(credentials, "r") as source:
info = json.load(source)
service_account.Credentials.from_service_account_info(info)
logging.info("Successfully authenticated with Google service account.")
print("Successfully authenticated with Google service account.")
except (FileNotFoundError, json.JSONDecodeError) as e:
raise RuntimeError(f"Authentication failed: {e}") | Authenticate with Google using a service account. Reads the Google application credentials from an environment variable and creates a service account credential object. Returns: service_account.Credentials: The service account credentials. Raises: RuntimeError: If authentication or logging of the authentication fails. |
168,364 | import os, sys
from os.path import dirname as up
from utils.common_libraries import *
from utils.constants import *
The provided code snippet includes necessary dependencies for implementing the `configure_google_ai_api` function. Write a Python function `def configure_google_ai_api()` to solve the following problem:
Configures the Google AI API with the provided API key. Args: api_key (str): The API key for Google AI API. Returns: The configuration object for Google AI API.
Here is the function:
def configure_google_ai_api():
"""
Configures the Google AI API with the provided API key.
Args:
api_key (str): The API key for Google AI API.
Returns:
The configuration object for Google AI API.
"""
try:
configuration = genai.configure(api_key=GOOGLE_AI_STUDIO)
## Printing the models as part of successful authentication otherwise will throw error
for m in genai.list_models():
print(m.name)
print(m.supported_generation_methods)
logging.info("Successfully configured Google AI API.")
print("Successfully configured Google AI API.")
return configuration
except Exception as e:
logging.error(f"Failed to configure Google AI API: {e}")
raise | Configures the Google AI API with the provided API key. Args: api_key (str): The API key for Google AI API. Returns: The configuration object for Google AI API. |
168,365 | from spacy.lang.en import English
from spacy import displacy
import re
import pandas as pd
import spacy
import random
nlp = English()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
TRAIN_DATA = outer_list
print(outer_list)
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
spacy.displacy.render(doc, style="ent", page="true")
def train_spacy(data,iterations):
TRAIN_DATA = data
nlp = spacy.blank('en') # create blank Language class
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner, last=True)
# add labels
for _, annotations in TRAIN_DATA:
for ent in annotations.get('entities'):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
optimizer = nlp.begin_training()
for itn in range(iterations):
print("Starting iteration " + str(itn))
random.shuffle(TRAIN_DATA)
losses = {}
for text, annotations in TRAIN_DATA:
nlp.update(
[text], # batch of texts
[annotations], # batch of annotations
drop=0.2, # dropout - make it harder to memorise data
sgd=optimizer, # callable to update weights
losses=losses)
print(losses)
return nlp | null |
168,366 | from setuptools import find_packages, setup
def get_requirements(file_path):
# requirements = []
with open(file_path) as requirements_obj:
requirements = requirements_obj.readlines()
requirements = [req.replace("\n","") for req in requirements]
if "-e ." in requirements:
requirements.remove("-e .")
return requirements | null |
168,367 | import os
import sys
import pickle
import dill
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
from src.exception import CustomException
class CustomException(Exception):
def __init__(self, error_message, error_detail: sys):
super().__init__(error_message)
self.error_message = error_message_detail(
error_message, error_detail=error_detail)
def __str__(self):
return self.error_message
def save_object(file_path, obj):
try:
dir_path = os.path.dirname(file_path)
os.makedirs(dir_path, exist_ok=True)
with open(file_path, 'wb') as file_obj:
dill.dump(obj, file_obj)
except Exception as e:
raise CustomException(e, sys) | null |
168,368 | import os
import sys
import pickle
import dill
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
from src.exception import CustomException
class CustomException(Exception):
def __init__(self, error_message, error_detail: sys):
super().__init__(error_message)
self.error_message = error_message_detail(
error_message, error_detail=error_detail)
def __str__(self):
return self.error_message
def load_object(file_path):
try:
with open(file_path, 'rb') as file_obj:
return dill.load(file_obj)
except Exception as e:
raise CustomException(e, sys) | null |
168,369 | import os
import sys
import pickle
import dill
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.model_selection import GridSearchCV
from src.exception import CustomException
class CustomException(Exception):
def __init__(self, error_message, error_detail: sys):
super().__init__(error_message)
self.error_message = error_message_detail(
error_message, error_detail=error_detail)
def __str__(self):
return self.error_message
def evaluate_model(X_train, y_train, X_valid, y_valid, models, params):
try:
report = {}
print("Training Shape:", X_train.shape)
print("Validation Shape:", X_valid.shape)
for i in range(len(models)):
model = list(models.values())[i]
param = params[list(models.keys())[i]]
gs = GridSearchCV(model, param, cv=3)
gs.fit(X_train, y_train)
model.set_params(**gs.best_params_)
model.fit(X_train, y_train)
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_valid)
model_train_score = r2_score(y_train, y_train_pred)
model_test_score = r2_score(y_valid, y_test_pred)
report[list(models.keys())[i]] = model_test_score
return report
except Exception as e:
raise CustomException(e) | null |
168,370 | import sys
def error_message_details(error, error_details:sys):
# exc_info() returns the information about the error
_,_,exc_tb = error_details.exc_info()
# get file name from the exc_info() function
file_name = exc_tb.tb_frame.f_code.co_filename
# construct the error message to return
error_message = "Error occured in Python script [{0}] at line number [{1}] error message [{2}]".format(file_name, exc_tb.tb_lineno, str(error))
return error_message | null |
168,371 | from flask import Flask, request, render_template
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from src.pipeline.prediction_pipeline import InputData, PreditctPipeline
def index():
return render_template('index.html') | null |
168,372 | from flask import Flask, request, render_template
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from src.pipeline.prediction_pipeline import InputData, PreditctPipeline
class PreditctPipeline:
def __init__(self):
pass
def predict(self, features):
try:
model_path = os.path.join("artifacts","model.pkl")
preprocessor_path = os.path.join("artifacts","preprocessor.pkl")
model = load_object(file_path = model_path)
preprocessor = load_object(file_path = preprocessor_path)
print(model)
data_scaled = preprocessor.transform(features)
preds = model.predict(data_scaled)
return preds
except Exception as e:
raise CustomException(e, sys)
class InputData:
def __init__(self,
age: int,
children: int,
bmi: float,
sex: str,
smoker: str,
region: str):
self.age = age
self.children = children
self.bmi = bmi
self.sex = sex
self.smoker = smoker
self.region = region
def get_data_as_dataFrame(self):
try:
input_dict = {
"age": [self.age],
"children": [self.children],
"bmi": [self.bmi],
"sex": [self.sex],
"smoker": [self.smoker],
"region": [self.region]
}
convert_dict = {
'age': int,
'children': int,
'bmi': float,
'sex': str,
'smoker': str,
'region': str
}
df = pd.DataFrame(input_dict)
df = df.astype(convert_dict)
print(df.info())
return df
except Exception as e:
raise CustomException(e, sys)
def predict_data():
if request.method == 'GET':
return render_template('home.html')
else:
data = InputData(
age=request.form.get('age'),
children=request.form.get('children'),
bmi=request.form.get('bmi'),
sex=request.form.get('sex'),
smoker=request.form.get('smoker'),
region=request.form.get('region'),
)
data_df = data.get_data_as_dataFrame()
print(data_df)
predict_pipeline = PreditctPipeline()
predictions = predict_pipeline.predict(data_df)
return render_template('home.html', results=round(predictions[0],2)) | null |
168,373 | import os
from wsgiref import simple_server
from flask import Flask, request, render_template, jsonify
from flask import Response
from flask_cors import CORS, cross_origin
from src.ML_pipelines.stage_03_prediction import prediction
import pickle
from src.training_Validation_Insertion import train_validation
def home():
return render_template('index.html') | null |
168,374 | import os
from wsgiref import simple_server
from flask import Flask, request, render_template, jsonify
from flask import Response
from flask_cors import CORS, cross_origin
from src.ML_pipelines.stage_03_prediction import prediction
import pickle
from src.training_Validation_Insertion import train_validation
model = pickle.load(open(filename, 'rb'))
def prediction(qty_slash_url, length_url, qty_dot_domain, qty_dot_directory, qty_hyphen_directory, file_length,
qty_underline_directory, asn_ip, time_domain_activation, time_domain_expiration, ttl_hostname,model):
model = model
# list = [40, 1000, 20, 15, 20, 10, 10, 10, 15, 10, 10]
list = []
list.append(qty_slash_url)
list.append(length_url)
list.append(qty_dot_domain)
list.append(qty_dot_directory)
list.append(qty_hyphen_directory)
list.append(file_length)
list.append(qty_underline_directory)
list.append(asn_ip)
list.append(time_domain_activation)
list.append(time_domain_expiration)
list.append(ttl_hostname)
list = np.array(list).reshape(-1, 1)
Scaled_list = np.array(StandardScaler().fit_transform(list)).T
result = model.predict(Scaled_list)
return result[0]
def predict():
try:
#Getting user input details
# qty_slash_url = request.json["qty_slash_url"]
qty_slash_url = request.form.get("qty_slash_url")
# length_url = request.json["length_url"]
length_url = request.form.get("length_url")
# qty_dot_domain = request.json["qty_dot_domain"]
qty_dot_domain = request.form.get("qty_dot_domain")
# qty_dot_directory = request.json["qty_dot_directory"]
qty_dot_directory = request.form.get("qty_dot_directory")
#qty_hyphen_directory = request.json["qty_hyphen_directory"]
qty_hyphen_directory = request.form.get("qty_hyphen_directory")
#file_length = request.json["file_length"]
file_length = request.form.get("file_length")
#qty_underline_directory = request.json["qty_underline_directory"]
qty_underline_directory = request.form.get("qty_underline_directory")
# asn_ip = request.json["asn_ip"]
asn_ip = request.form.get("asn_ip")
#time_domain_activation = request.json["time_domain_activation"]
time_domain_activation = request.form.get("time_domain_activation")
#time_domain_expiration = request.json["time_domain_expiration"]
time_domain_expiration = request.form.get("time_domain_expiration")
#ttl_hostname = request.json["ttl_hostname"]
ttl_hostname = request.form.get("ttl_hostname")
result=prediction(qty_slash_url,length_url,qty_dot_domain,qty_dot_directory,qty_hyphen_directory,file_length,
qty_underline_directory,asn_ip,time_domain_activation,time_domain_expiration,ttl_hostname,
model)
#print(result)
result1 = "malicious" if result==1 else "legitimate"
r =Response(response=result1, status=200,mimetype='application/json')
return render_template("index.html",prediction_text="{}".format(result1))
# return r
except ValueError:
return Response("Error Occurred! %s" % ValueError)
except KeyError:
return Response("Error Occurred! %s" % KeyError)
except Exception as e:
return Response("Error Occurred! %s" % e) | null |
168,375 | import os
from wsgiref import simple_server
from flask import Flask, request, render_template, jsonify
from flask import Response
from flask_cors import CORS, cross_origin
from src.ML_pipelines.stage_03_prediction import prediction
import pickle
from src.training_Validation_Insertion import train_validation
class train_validation:
def __init__(self, path):
self.raw_data = Raw_Data_validation(path)
self.dataTransform = dataTransform()
self.dBOperation = dBOperation()
self.file_object = open("Training_Logs/Training_Main_Log.txt", 'a+')
self.log_writer = logger.App_Logger()
def train_validation(self):
try:
self.log_writer.log(self.file_object, 'Start of Validation on files!!')
# extracting values from prediction schema
column_names, noofcolumns = self.raw_data.valuesFromSchema()
# validating column length in the file
self.raw_data.validateColumnLength(noofcolumns)
# validating if any column has all values missing
self.raw_data.validateMissingValuesInWholeColumn()
self.log_writer.log(self.file_object, "Raw Data Validation Complete!!")
########################################################################
self.log_writer.log(self.file_object, "Starting Data Transforamtion!!")
# below function adds quotes to the '?' values in some columns.
self.dataTransform.addQuotesToStringValuesInColumn()
# replacing blanks in the csv file with "Null" values to insert in table
self.dataTransform.replaceMissingWithNull()
self.log_writer.log(self.file_object, "DataTransformation Completed!!!")
########################################################################
self.log_writer.log(self.file_object,
"Creating Training_Database and tables on the basis of given schema!!!")
# create database with given name, if present open the connection! Create table with columns given in schema
self.dBOperation.createTableDb(column_names)
self.log_writer.log(self.file_object, "Table creation Completed !!")
#######################################################################
self.log_writer.log(self.file_object, "Insertion of Data into Table started!!!!")
# insert csv files in the table
self.dBOperation.insertIntoTableGoodData()
self.log_writer.log(self.file_object, "Insertion in Table completed!!!")
self.log_writer.log(self.file_object, "Deleting Good Data Folder!!!")
#######################################################################
# Delete the good data folder after loading files in table
self.raw_data.deleteExistingGoodDataTrainingFolder()
self.log_writer.log(self.file_object, "Good_Data folder deleted!!!")
self.log_writer.log(self.file_object, "Moving bad files to Archive and deleting Bad_Data folder!!!")
#######################################################################
# Move the bad files to archive folder
self.raw_data.moveBadFilesToArchiveBad()
self.log_writer.log(self.file_object, "Bad files moved to archive!! Bad folder Deleted!!")
self.log_writer.log(self.file_object, "Validation Operation completed!!")
self.log_writer.log(self.file_object, "Extracting csv file from table")
#export data in table to csvfile
self.dBOperation.selectingDatafromtableintocsv()
self.log_writer.log(self.file_object, "CSV file extracted from database")
########################################################################
self.log_writer.log(self.file_object, "Data preprocessing started")
p = Path(__file__).parents[2]
path = str(p) + "\params.yaml"
args = argparse.ArgumentParser()
args.add_argument("--config", default=path)
parsed_args = args.parse_args()
data_preprocessing(config_path=parsed_args.config)
self.log_writer.log(self.file_object, "Data preprocessing completed")
########################################################################
self.log_writer.log(self.file_object, "Model selection and hyperparameter tuning started")
model_selection_and_tuning(config_path=parsed_args.config)
self.log_writer.log(self.file_object, "Model selection and hyperparameter tuning completed")
########################################################################
self.file_object.close()
except Exception as e:
raise e
def train():
try:
path = os.path.join("data/", "raw/")
train_valObj = train_validation(path) # object initialization
train_valObj.train_validation() # calling the training_validation function
except ValueError:
return Response("Error Occurred! %s" % ValueError)
except KeyError:
return Response("Error Occurred! %s" % KeyError)
except Exception as e:
return Response("Error Occurred! %s" % e)
return Response("Training successful!!") | null |
168,376 | import argparse
from src.application_logging.logger import App_Logger
from src.utils.common_utils import read_params, save_model,find_best_model
import pandas as pd
from pathlib import Path
from sklearn import linear_model
from sklearn import ensemble
import sklearn.svm
from sklearn.tree import DecisionTreeClassifier
import xgboost as xgb
from sklearn.metrics import confusion_matrix,classification_report,accuracy_score,roc_auc_score
class App_Logger:
def __init__(self):
pass
def log(self, file_object, log_message):
self.now = datetime.now()
self.date = self.now.date()
self.current_time = self.now.strftime("%H:%M:%S")
file_object.write(
str(self.date) + "/" + str(self.current_time) + "\t\t" + log_message +"\n")
def read_params(config_path: str)-> dict:
"""
Method Name: read_params
Description: This method performs reading parameters from param.yaml and is a helper
function for stage_01_data_preprocessing
Output: Return all configuration of yaml to all stages of ML pipeline
On Failure: Raise Error
Written By: Saurabh Naik
Version: 1.0
Revisions: None
"""
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config
def save_model(model,filename):
"""
Method Name: save_model
Description: Save the model file to directory
Outcome: File gets saved
On Failure: Raise Exception
Written By: Saurabh Naik
Version: 1.0
Revisions: None
"""
model_directory = 'models/'
try:
path = os.path.join(model_directory,filename) #create seperate directory for each cluster
if os.path.isdir(path): #remove previously existing models for each clusters
shutil.rmtree(model_directory)
os.makedirs(path)
else:
os.makedirs(path) #
with open(path +'/' + filename+'.pkl',
'wb') as f:
pickle.dump(model, f) # save the model to file
except Exception as e:
raise e
def find_best_model(X_train,y_train):
"""
Method Name: find_best_model
Description: This method finds the best model based on accuracy and roc_auc_score.
Output: Return the best model hyperparameters
On Failure: Raise Exception
Written By: Saurabh Naik
Version: 1.0
Revisions: None
"""
try:
sampler = optuna.samplers.NSGAIISampler()
func = lambda trial: objective(trial, X_train,y_train)
study = optuna.create_study(directions=["maximize", "maximize"], sampler=sampler)
study.optimize(func, n_trials=10)
trial = study.best_trials
param = trial[0].params
return param
except Exception as e:
raise e
class Path(PurePath):
def __new__(cls: Type[_P], *args: Union[str, _PathLike], **kwargs: Any) -> _P: ...
def __enter__(self: _P) -> _P: ...
def __exit__(
self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType]
) -> Optional[bool]: ...
def cwd(cls: Type[_P]) -> _P: ...
def stat(self) -> os.stat_result: ...
def chmod(self, mode: int) -> None: ...
def exists(self) -> bool: ...
def glob(self: _P, pattern: str) -> Generator[_P, None, None]: ...
def group(self) -> str: ...
def is_dir(self) -> bool: ...
def is_file(self) -> bool: ...
if sys.version_info >= (3, 7):
def is_mount(self) -> bool: ...
def is_symlink(self) -> bool: ...
def is_socket(self) -> bool: ...
def is_fifo(self) -> bool: ...
def is_block_device(self) -> bool: ...
def is_char_device(self) -> bool: ...
def iterdir(self: _P) -> Generator[_P, None, None]: ...
def lchmod(self, mode: int) -> None: ...
def lstat(self) -> os.stat_result: ...
def mkdir(self, mode: int = ..., parents: bool = ..., exist_ok: bool = ...) -> None: ...
# Adapted from builtins.open
# Text mode: always returns a TextIOWrapper
def open(
self,
mode: OpenTextMode = ...,
buffering: int = ...,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
newline: Optional[str] = ...,
) -> TextIOWrapper: ...
# Unbuffered binary mode: returns a FileIO
def open(
self, mode: OpenBinaryMode, buffering: Literal[0], encoding: None = ..., errors: None = ..., newline: None = ...
) -> FileIO: ...
# Buffering is on: return BufferedRandom, BufferedReader, or BufferedWriter
def open(
self,
mode: OpenBinaryModeUpdating,
buffering: Literal[-1, 1] = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
) -> BufferedRandom: ...
def open(
self,
mode: OpenBinaryModeWriting,
buffering: Literal[-1, 1] = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
) -> BufferedWriter: ...
def open(
self,
mode: OpenBinaryModeReading,
buffering: Literal[-1, 1] = ...,
encoding: None = ...,
errors: None = ...,
newline: None = ...,
) -> BufferedReader: ...
# Buffering cannot be determined: fall back to BinaryIO
def open(
self, mode: OpenBinaryMode, buffering: int, encoding: None = ..., errors: None = ..., newline: None = ...
) -> BinaryIO: ...
# Fallback if mode is not specified
def open(
self,
mode: str,
buffering: int = ...,
encoding: Optional[str] = ...,
errors: Optional[str] = ...,
newline: Optional[str] = ...,
) -> IO[Any]: ...
def owner(self) -> str: ...
if sys.version_info >= (3, 9):
def readlink(self: _P) -> _P: ...
if sys.version_info >= (3, 8):
def rename(self: _P, target: Union[str, PurePath]) -> _P: ...
def replace(self: _P, target: Union[str, PurePath]) -> _P: ...
else:
def rename(self, target: Union[str, PurePath]) -> None: ...
def replace(self, target: Union[str, PurePath]) -> None: ...
def resolve(self: _P, strict: bool = ...) -> _P: ...
def rglob(self: _P, pattern: str) -> Generator[_P, None, None]: ...
def rmdir(self) -> None: ...
def symlink_to(self, target: Union[str, Path], target_is_directory: bool = ...) -> None: ...
def touch(self, mode: int = ..., exist_ok: bool = ...) -> None: ...
if sys.version_info >= (3, 8):
def unlink(self, missing_ok: bool = ...) -> None: ...
else:
def unlink(self) -> None: ...
def home(cls: Type[_P]) -> _P: ...
def absolute(self: _P) -> _P: ...
def expanduser(self: _P) -> _P: ...
def read_bytes(self) -> bytes: ...
def read_text(self, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> str: ...
def samefile(self, other_path: Union[str, bytes, int, Path]) -> bool: ...
def write_bytes(self, data: bytes) -> int: ...
def write_text(self, data: str, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> int: ...
if sys.version_info >= (3, 8):
def link_to(self, target: Union[str, bytes, os.PathLike[str]]) -> None: ...
The provided code snippet includes necessary dependencies for implementing the `model_selection_and_tuning` function. Write a Python function `def model_selection_and_tuning(config_path)` to solve the following problem:
Method Name: model_selection_and_tuning Description: Selects a best model from all classification model having best accuracy and auc_roc_score and does hyperparameter tuning Output: Best model selected for each cluster On Failure: Raise Exception Written By: Saurabh Naik Version: 1.0 Revisions: None
Here is the function:
def model_selection_and_tuning(config_path):
"""
Method Name: model_selection_and_tuning
Description: Selects a best model from all classification model having best accuracy and auc_roc_score
and does hyperparameter tuning
Output: Best model selected for each cluster
On Failure: Raise Exception
Written By: Saurabh Naik
Version: 1.0
Revisions: None
"""
try:
# Initializing Logger object
logger = App_Logger()
p = Path(__file__).parents[2]
path = str(p) + "/src/Training_Logs/ModelSelectionAndTuningLog.txt"
file = open(path, "a+")
logger.log(file, "Model Selection and tuning process Started ")
# Reading of params from params.yaml file
config = read_params(config_path)
target_col = config["base"]["target_col"]
train_data_path = config["preprocessed_data"]["train_data"]
test_data_path = config["preprocessed_data"]["test_data"]
# Reading test and train clustered data
p = Path(__file__).parents[2]
train_path = str(p) + str(train_data_path)
logger.log(file, 'Reading training data')
clustered_train_data = pd.read_csv(train_path)
test_path = str(p) + str(test_data_path)
logger.log(file, 'Reading test data')
clustered_test_data = pd.read_csv(test_path)
logger.log(file, 'Reading of train and test data done successfully!! Now model selection begins')
#Model Selection
X_train1 = clustered_train_data.drop(labels=target_col, axis=1)
X_test1 = clustered_test_data.drop(labels=target_col, axis=1)
y_train1 = clustered_train_data[[target_col]]
y_test1 = clustered_test_data[[target_col]]
param = find_best_model(X_train1, y_train1)
logger.log(file, 'Hyperparameter tuning is completed and after comparing auc_roc_score '
'and accuracy score of models we going to select model having hyperparameters: ' + str(param))
args1={key: val for key,
val in param.items() if key != 'classifier'}
if param['classifier']=='LogReg':
classifier_obj = linear_model.LogisticRegression(**args1)
model_name='LogisticRegression'
elif param['classifier']=='RandomForest':
classifier_obj = sklearn.ensemble.RandomForestClassifier(**args1)
model_name = 'RandomForest'
elif param['classifier']=='SVC':
classifier_obj = sklearn.svm.SVC(**args1)
model_name = 'SVC'
elif param['classifier']=='NaiveBayes':
classifier_obj = sklearn.naive_bayes.GaussianNB(**args1)
model_name = 'NaiveBayes'
elif param['classifier']=='decision-tree':
classifier_obj = sklearn.tree.DecisionTreeClassifier(**args1)
model_name = 'DecisionTree'
elif param['classifier']=='xgb':
classifier_obj = xgb.XGBClassifier(**args1)
model_name = 'XGBoost'
classifier_obj.fit(X_train1,y_train1)
logger.log(file, 'model trained Successfully!! Now testing begins')
y_pred = classifier_obj.predict(X_test1)
logger.log(file, 'confusion_matrix ' + str(confusion_matrix(y_test1, y_pred)))
logger.log(file, 'accuracy_score ' + str(accuracy_score(y_test1, y_pred)))
logger.log(file, 'roc_auc_score ' + str(roc_auc_score(y_test1, y_pred)))
logger.log(file, 'classification_report ' + str(classification_report(y_test1, y_pred)))
logger.log(file,'model tested successfully!!')
logger.log(file, 'Starting to Save ML model')
save_model(classifier_obj, model_name)
logger.log(file, model_name+'Model Saved')
logger.log(file, 'Model Selection and tuning Completed')
except Exception as e:
logger = App_Logger()
p = Path(__file__).parents[2]
path = str(p) + "/src/Training_Logs/ModelSelectionAndTuningLog.txt"
file = open(path, "a+")
logger.log(file, "error encountered due to: %s" % e)
raise e | Method Name: model_selection_and_tuning Description: Selects a best model from all classification model having best accuracy and auc_roc_score and does hyperparameter tuning Output: Best model selected for each cluster On Failure: Raise Exception Written By: Saurabh Naik Version: 1.0 Revisions: None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.