id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
153,611 | import os
import gradio as gr
import re
import uuid
from PIL import Image, ImageDraw, ImageOps
import numpy as np
import argparse
import inspect
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import torch
from PIL import Image, ImageDraw, ImageOps
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', type=str, default="VisualQuestionAnswering_cuda:0")
parser.add_argument('--port', type=int, default=1015)
args = parser.parse_args()
load_dict = {e.split('_')[0].strip(): e.split('_')[1].strip() for e in args.load.split(',')}
tools = build_chatbot_tools(load_dict)
bot = ConversationBot(tools)
with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
with gr.Row():
chatbot = gr.Chatbot(elem_id="chatbot", label="CATchat").style(height=1000,scale=0.5)
auxwindow = gr.Chatbot(elem_id="chatbot", label="Aux Window").style(height=1000,scale=0.5)
state = gr.State([])
aux_state = gr.State([])
with gr.Row():
with gr.Column(scale=0.7):
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(
container=False)
with gr.Column(scale=0.15, min_width=0):
clear = gr.Button("Clear")
with gr.Column(scale=0.15, min_width=0):
btn = gr.UploadButton("Upload", file_types=["image"])
txt.submit(bot.run_text, [txt, state, aux_state], [chatbot, state, aux_state, auxwindow])
txt.submit(lambda: "", None, txt)
btn.upload(bot.run_image, [btn, state, txt, aux_state], [chatbot, state, txt, aux_state, auxwindow])
clear.click(bot.memory.clear)
clear.click(lambda: [], None, chatbot)
clear.click(lambda: [], None, auxwindow)
clear.click(lambda: [], None, state)
clear.click(lambda: [], None, aux_state)
demo.launch(server_name="0.0.0.0", server_port=args.port, share=True)
def build_chatbot_tools(load_dict):
print(f"Initializing ChatBot, load_dict={load_dict}")
models = {}
# Load Basic Foundation Models
for class_name, device in load_dict.items():
models[class_name] = globals()[class_name](device=device)
# Load Template Foundation Models
for class_name, module in globals().items():
if getattr(module, 'template_model', False):
template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'}
loaded_names = set([type(e).__name__ for e in models.values()])
if template_required_names.issubset(loaded_names):
models[class_name] = globals()[class_name](
**{name: models[name] for name in template_required_names})
tools = []
for instance in models.values():
for e in dir(instance):
if e.startswith('inference'):
func = getattr(instance, e)
tools.append(Tool(name=func.name, description=func.description, func=func))
return tools | null |
153,612 | import asyncio
from gpt_researcher.utils.llm import *
from gpt_researcher.scraper.scraper import Scraper
from gpt_researcher.master.prompts import *
import json
The provided code snippet includes necessary dependencies for implementing the `get_retriever` function. Write a Python function `def get_retriever(retriever)` to solve the following problem:
Gets the retriever Args: retriever: retriever name Returns: retriever: Retriever class
Here is the function:
def get_retriever(retriever):
"""
Gets the retriever
Args:
retriever: retriever name
Returns:
retriever: Retriever class
"""
match retriever:
case "tavily":
from gpt_researcher.retrievers import TavilySearch
retriever = TavilySearch
case "tavily_news":
from gpt_researcher.retrievers import TavilyNews
retriever = TavilyNews
case "google":
from gpt_researcher.retrievers import GoogleSearch
retriever = GoogleSearch
case "searx":
from gpt_researcher.retrievers import SearxSearch
retriever = SearxSearch
case "serpapi":
raise NotImplementedError("SerpApiSearch is not fully implemented yet.")
from gpt_researcher.retrievers import SerpApiSearch
retriever = SerpApiSearch
case "googleSerp":
from gpt_researcher.retrievers import SerperSearch
retriever = SerperSearch
case "duckduckgo":
from gpt_researcher.retrievers import Duckduckgo
retriever = Duckduckgo
case "BingSearch":
from gpt_researcher.retrievers import BingSearch
retriever = BingSearch
case _:
raise Exception("Retriever not found.")
return retriever | Gets the retriever Args: retriever: retriever name Returns: retriever: Retriever class |
153,613 | import asyncio
from gpt_researcher.utils.llm import *
from gpt_researcher.scraper.scraper import Scraper
from gpt_researcher.master.prompts import *
import json
The provided code snippet includes necessary dependencies for implementing the `choose_agent` function. Write a Python function `async def choose_agent(query, cfg)` to solve the following problem:
Chooses the agent automatically Args: query: original query cfg: Config Returns: agent: Agent name agent_role_prompt: Agent role prompt
Here is the function:
async def choose_agent(query, cfg):
"""
Chooses the agent automatically
Args:
query: original query
cfg: Config
Returns:
agent: Agent name
agent_role_prompt: Agent role prompt
"""
try:
response = await create_chat_completion(
model=cfg.smart_llm_model,
messages=[
{"role": "system", "content": f"{auto_agent_instructions()}"},
{"role": "user", "content": f"task: {query}"}],
temperature=0,
llm_provider=cfg.llm_provider
)
agent_dict = json.loads(response)
return agent_dict["server"], agent_dict["agent_role_prompt"]
except Exception as e:
return "Default Agent", "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text." | Chooses the agent automatically Args: query: original query cfg: Config Returns: agent: Agent name agent_role_prompt: Agent role prompt |
153,614 | import asyncio
from gpt_researcher.utils.llm import *
from gpt_researcher.scraper.scraper import Scraper
from gpt_researcher.master.prompts import *
import json
The provided code snippet includes necessary dependencies for implementing the `get_sub_queries` function. Write a Python function `async def get_sub_queries(query, agent_role_prompt, cfg)` to solve the following problem:
Gets the sub queries Args: query: original query agent_role_prompt: agent role prompt cfg: Config Returns: sub_queries: List of sub queries
Here is the function:
async def get_sub_queries(query, agent_role_prompt, cfg):
"""
Gets the sub queries
Args:
query: original query
agent_role_prompt: agent role prompt
cfg: Config
Returns:
sub_queries: List of sub queries
"""
max_research_iterations = cfg.max_iterations if cfg.max_iterations else 1
response = await create_chat_completion(
model=cfg.smart_llm_model,
messages=[
{"role": "system", "content": f"{agent_role_prompt}"},
{"role": "user", "content": generate_search_queries_prompt(query, max_iterations=max_research_iterations)}],
temperature=0,
llm_provider=cfg.llm_provider
)
sub_queries = json.loads(response)
return sub_queries | Gets the sub queries Args: query: original query agent_role_prompt: agent role prompt cfg: Config Returns: sub_queries: List of sub queries |
153,615 | import asyncio
from gpt_researcher.utils.llm import *
from gpt_researcher.scraper.scraper import Scraper
from gpt_researcher.master.prompts import *
import json
class Scraper:
"""
Scraper class to extract the content from the links
"""
def __init__(self, urls, user_agent, scraper):
"""
Initialize the Scraper class.
Args:
urls:
"""
self.urls = urls
self.session = requests.Session()
self.session.headers.update({"User-Agent": user_agent})
self.scraper = scraper
def run(self):
"""
Extracts the content from the links
"""
partial_extract = partial(self.extract_data_from_link, session=self.session)
with ThreadPoolExecutor(max_workers=20) as executor:
contents = executor.map(partial_extract, self.urls)
res = [content for content in contents if content["raw_content"] is not None]
return res
def extract_data_from_link(self, link, session):
"""
Extracts the data from the link
"""
content = ""
try:
Scraper = self.get_scraper(link)
scraper = Scraper(link, session)
content = scraper.scrape()
if len(content) < 100:
return {"url": link, "raw_content": None}
return {"url": link, "raw_content": content}
except Exception as e:
return {"url": link, "raw_content": None}
def get_scraper(self, link):
"""
The function `get_scraper` determines the appropriate scraper class based on the provided link
or a default scraper if none matches.
Args:
link: The `get_scraper` method takes a `link` parameter which is a URL link to a webpage or a
PDF file. Based on the type of content the link points to, the method determines the appropriate
scraper class to use for extracting data from that content.
Returns:
The `get_scraper` method returns the scraper class based on the provided link. The method
checks the link to determine the appropriate scraper class to use based on predefined mappings
in the `SCRAPER_CLASSES` dictionary. If the link ends with ".pdf", it selects the
`PyMuPDFScraper` class. If the link contains "arxiv.org", it selects the `ArxivScraper
"""
SCRAPER_CLASSES = {
"pdf": PyMuPDFScraper,
"arxiv": ArxivScraper,
"newspaper": NewspaperScraper,
"bs": BeautifulSoupScraper,
"web_base_loader": WebBaseLoaderScraper,
}
scraper_key = None
if link.endswith(".pdf"):
scraper_key = "pdf"
elif "arxiv.org" in link:
scraper_key = "arxiv"
else:
scraper_key = self.scraper
scraper_class = SCRAPER_CLASSES.get(scraper_key)
if scraper_class is None:
raise Exception("Scraper not found.")
return scraper_class
The provided code snippet includes necessary dependencies for implementing the `scrape_urls` function. Write a Python function `def scrape_urls(urls, cfg=None)` to solve the following problem:
Scrapes the urls Args: urls: List of urls cfg: Config (optional) Returns: text: str
Here is the function:
def scrape_urls(urls, cfg=None):
"""
Scrapes the urls
Args:
urls: List of urls
cfg: Config (optional)
Returns:
text: str
"""
content = []
user_agent = cfg.user_agent if cfg else "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0"
try:
content = Scraper(urls, user_agent, cfg.scraper).run()
except Exception as e:
print(f"{Fore.RED}Error in scrape_urls: {e}{Style.RESET_ALL}")
return content | Scrapes the urls Args: urls: List of urls cfg: Config (optional) Returns: text: str |
153,616 | import asyncio
from gpt_researcher.utils.llm import *
from gpt_researcher.scraper.scraper import Scraper
from gpt_researcher.master.prompts import *
import json
async def summarize_url(query, raw_data, agent_role_prompt, cfg):
"""
Summarizes the text
Args:
query:
raw_data:
agent_role_prompt:
cfg:
Returns:
summary: str
"""
summary = ""
try:
summary = await create_chat_completion(
model=cfg.fast_llm_model,
messages=[
{"role": "system", "content": f"{agent_role_prompt}"},
{"role": "user", "content": f"{generate_summary_prompt(query, raw_data)}"}],
temperature=0,
llm_provider=cfg.llm_provider
)
except Exception as e:
print(f"{Fore.RED}Error in summarize: {e}{Style.RESET_ALL}")
return summary
async def stream_output(type, output, websocket=None, logging=True):
"""
Streams output to the websocket
Args:
type:
output:
Returns:
None
"""
if not websocket or logging:
print(output)
if websocket:
await websocket.send_json({"type": type, "output": output})
The provided code snippet includes necessary dependencies for implementing the `summarize` function. Write a Python function `async def summarize(query, content, agent_role_prompt, cfg, websocket=None)` to solve the following problem:
Asynchronously summarizes a list of URLs. Args: query (str): The search query. content (list): List of dictionaries with 'url' and 'raw_content'. agent_role_prompt (str): The role prompt for the agent. cfg (object): Configuration object. Returns: list: A list of dictionaries with 'url' and 'summary'.
Here is the function:
async def summarize(query, content, agent_role_prompt, cfg, websocket=None):
"""
Asynchronously summarizes a list of URLs.
Args:
query (str): The search query.
content (list): List of dictionaries with 'url' and 'raw_content'.
agent_role_prompt (str): The role prompt for the agent.
cfg (object): Configuration object.
Returns:
list: A list of dictionaries with 'url' and 'summary'.
"""
# Function to handle each summarization task for a chunk
async def handle_task(url, chunk):
summary = await summarize_url(query, chunk, agent_role_prompt, cfg)
if summary:
await stream_output("logs", f"🌐 Summarizing url: {url}", websocket)
await stream_output("logs", f"📃 {summary}", websocket)
return url, summary
# Function to split raw content into chunks of 10,000 words
def chunk_content(raw_content, chunk_size=10000):
words = raw_content.split()
for i in range(0, len(words), chunk_size):
yield ' '.join(words[i:i+chunk_size])
# Process each item one by one, but process chunks in parallel
concatenated_summaries = []
for item in content:
url = item['url']
raw_content = item['raw_content']
# Create tasks for all chunks of the current URL
chunk_tasks = [handle_task(url, chunk) for chunk in chunk_content(raw_content)]
# Run chunk tasks concurrently
chunk_summaries = await asyncio.gather(*chunk_tasks)
# Aggregate and concatenate summaries for the current URL
summaries = [summary for _, summary in chunk_summaries if summary]
concatenated_summary = ' '.join(summaries)
concatenated_summaries.append({'url': url, 'summary': concatenated_summary})
return concatenated_summaries | Asynchronously summarizes a list of URLs. Args: query (str): The search query. content (list): List of dictionaries with 'url' and 'raw_content'. agent_role_prompt (str): The role prompt for the agent. cfg (object): Configuration object. Returns: list: A list of dictionaries with 'url' and 'summary'. |
153,617 | import asyncio
from gpt_researcher.utils.llm import *
from gpt_researcher.scraper.scraper import Scraper
from gpt_researcher.master.prompts import *
import json
The provided code snippet includes necessary dependencies for implementing the `generate_report` function. Write a Python function `async def generate_report(query, context, agent_role_prompt, report_type, websocket, cfg)` to solve the following problem:
generates the final report Args: query: context: agent_role_prompt: report_type: websocket: cfg: Returns: report:
Here is the function:
async def generate_report(query, context, agent_role_prompt, report_type, websocket, cfg):
"""
generates the final report
Args:
query:
context:
agent_role_prompt:
report_type:
websocket:
cfg:
Returns:
report:
"""
generate_prompt = get_report_by_type(report_type)
report = ""
try:
report = await create_chat_completion(
model=cfg.smart_llm_model,
messages=[
{"role": "system", "content": f"{agent_role_prompt}"},
{"role": "user", "content": f"{generate_prompt(query, context, cfg.report_format, cfg.total_words)}"}],
temperature=0,
llm_provider=cfg.llm_provider,
stream=True,
websocket=websocket,
max_tokens=cfg.smart_token_limit
)
except Exception as e:
print(f"{Fore.RED}Error in generate_report: {e}{Style.RESET_ALL}")
return report | generates the final report Args: query: context: agent_role_prompt: report_type: websocket: cfg: Returns: report: |
153,618 | from datetime import datetime
The provided code snippet includes necessary dependencies for implementing the `generate_search_queries_prompt` function. Write a Python function `def generate_search_queries_prompt(question, max_iterations=3)` to solve the following problem:
Generates the search queries prompt for the given question. Args: question (str): The question to generate the search queries prompt for Returns: str: The search queries prompt for the given question
Here is the function:
def generate_search_queries_prompt(question, max_iterations=3):
""" Generates the search queries prompt for the given question.
Args: question (str): The question to generate the search queries prompt for
Returns: str: The search queries prompt for the given question
"""
return f'Write {max_iterations} google search queries to search online that form an objective opinion from the following: "{question}"' \
f'Use the current date if needed: {datetime.now().strftime("%B %d, %Y")}.\n' \
f'You must respond with a list of strings in the following format: ["query 1", "query 2", "query 3"].' | Generates the search queries prompt for the given question. Args: question (str): The question to generate the search queries prompt for Returns: str: The search queries prompt for the given question |
153,619 | from datetime import datetime
def generate_report_prompt(question, context, report_format="apa", total_words=1000):
""" Generates the report prompt for the given question and research summary.
Args: question (str): The question to generate the report prompt for
research_summary (str): The research summary to generate the report prompt for
Returns: str: The report prompt for the given question and research summary
"""
return f'Information: """{context}"""\n\n' \
f'Using the above information, answer the following' \
f' query or task: "{question}" in a detailed report --' \
" The report should focus on the answer to the query, should be well structured, informative," \
f" in depth and comprehensive, with facts and numbers if available and a minimum of {total_words} words.\n" \
"You should strive to write the report as long as you can using all relevant and necessary information provided.\n" \
"You must write the report with markdown syntax.\n " \
f"Use an unbiased and journalistic tone. \n" \
"You MUST determine your own concrete and valid opinion based on the given information. Do NOT deter to general and meaningless conclusions.\n" \
f"You MUST write all used source urls at the end of the report as references, and make sure to not add duplicated sources, but only one reference for each.\n" \
f"You MUST write the report in {report_format} format.\n " \
f"Cite search results using inline notations. Only cite the most \
relevant results that answer the query accurately. Place these citations at the end \
of the sentence or paragraph that reference them.\n"\
f"Please do your best, this is very important to my career. " \
f"Assume that the current date is {datetime.now().strftime('%B %d, %Y')}"
def generate_resource_report_prompt(question, context, report_format="apa", total_words=1000):
"""Generates the resource report prompt for the given question and research summary.
Args:
question (str): The question to generate the resource report prompt for.
context (str): The research summary to generate the resource report prompt for.
Returns:
str: The resource report prompt for the given question and research summary.
"""
return f'"""{context}"""\n\nBased on the above information, generate a bibliography recommendation report for the following' \
f' question or topic: "{question}". The report should provide a detailed analysis of each recommended resource,' \
' explaining how each source can contribute to finding answers to the research question.\n' \
'Focus on the relevance, reliability, and significance of each source.\n' \
'Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax.\n' \
'Include relevant facts, figures, and numbers whenever available.\n' \
'The report should have a minimum length of 700 words.\n' \
'You MUST include all relevant source urls.'
def generate_custom_report_prompt(query_prompt, context, report_format="apa", total_words=1000):
return f'"{context}"\n\n{query_prompt}'
def generate_outline_report_prompt(question, context, report_format="apa", total_words=1000):
""" Generates the outline report prompt for the given question and research summary.
Args: question (str): The question to generate the outline report prompt for
research_summary (str): The research summary to generate the outline report prompt for
Returns: str: The outline report prompt for the given question and research summary
"""
return f'"""{context}""" Using the above information, generate an outline for a research report in Markdown syntax' \
f' for the following question or topic: "{question}". The outline should provide a well-structured framework' \
' for the research report, including the main sections, subsections, and key points to be covered.' \
' The research report should be detailed, informative, in-depth, and a minimum of 1,200 words.' \
' Use appropriate Markdown syntax to format the outline and ensure readability.'
def get_report_by_type(report_type):
report_type_mapping = {
'research_report': generate_report_prompt,
'resource_report': generate_resource_report_prompt,
'outline_report': generate_outline_report_prompt,
'custom_report': generate_custom_report_prompt
}
return report_type_mapping[report_type] | null |
153,620 | from datetime import datetime
The provided code snippet includes necessary dependencies for implementing the `generate_summary_prompt` function. Write a Python function `def generate_summary_prompt(query, data)` to solve the following problem:
Generates the summary prompt for the given question and text. Args: question (str): The question to generate the summary prompt for text (str): The text to generate the summary prompt for Returns: str: The summary prompt for the given question and text
Here is the function:
def generate_summary_prompt(query, data):
""" Generates the summary prompt for the given question and text.
Args: question (str): The question to generate the summary prompt for
text (str): The text to generate the summary prompt for
Returns: str: The summary prompt for the given question and text
"""
return f'{data}\n Using the above text, summarize it based on the following task or query: "{query}".\n If the ' \
f'query cannot be answered using the text, YOU MUST summarize the text in short.\n Include all factual ' \
f'information such as numbers, stats, quotes, etc if available. ' | Generates the summary prompt for the given question and text. Args: question (str): The question to generate the summary prompt for text (str): The text to generate the summary prompt for Returns: str: The summary prompt for the given question and text |
153,621 | import asyncio
import datetime
from typing import List, Dict
from fastapi import WebSocket
from gpt_researcher.master.agent import GPTResearcher
class GPTResearcher:
"""
GPT Researcher
"""
def __init__(self, query, report_type="research_report", source_urls=None, config_path=None, websocket=None):
"""
Initialize the GPT Researcher class.
Args:
query:
report_type:
config_path:
websocket:
"""
self.query = query
self.agent = None
self.role = None
self.report_type = report_type
self.websocket = websocket
self.cfg = Config(config_path)
self.retriever = get_retriever(self.cfg.retriever)
self.context = []
self.source_urls = source_urls
self.memory = Memory(self.cfg.embedding_provider)
self.visited_urls = set()
async def run(self):
"""
Runs the GPT Researcher
Returns:
Report
"""
print(f"🔎 Running research for '{self.query}'...")
# Generate Agent
self.agent, self.role = await choose_agent(self.query, self.cfg)
await stream_output("logs", self.agent, self.websocket)
# If specified, the researcher will use the given urls as the context for the research.
if self.source_urls:
self.context = await self.get_context_by_urls(self.source_urls)
else:
self.context = await self.get_context_by_search(self.query)
# Write Research Report
if self.report_type == "custom_report":
self.role = self.cfg.agent_role if self.cfg.agent_role else self.role
await stream_output("logs", f"✍️ Writing {self.report_type} for research task: {self.query}...", self.websocket)
report = await generate_report(query=self.query, context=self.context,
agent_role_prompt=self.role, report_type=self.report_type,
websocket=self.websocket, cfg=self.cfg)
time.sleep(2)
return report
async def get_context_by_urls(self, urls):
"""
Scrapes and compresses the context from the given urls
"""
new_search_urls = await self.get_new_urls(urls)
await stream_output("logs",
f"🧠 I will conduct my research based on the following urls: {new_search_urls}...",
self.websocket)
scraped_sites = scrape_urls(new_search_urls, self.cfg)
return await self.get_similar_content_by_query(self.query, scraped_sites)
async def get_context_by_search(self, query):
"""
Generates the context for the research task by searching the query and scraping the results
Returns:
context: List of context
"""
context = []
# Generate Sub-Queries including original query
sub_queries = await get_sub_queries(query, self.role, self.cfg) + [query]
await stream_output("logs",
f"🧠 I will conduct my research based on the following queries: {sub_queries}...",
self.websocket)
# Run Sub-Queries
for sub_query in sub_queries:
await stream_output("logs", f"\n🔎 Running research for '{sub_query}'...", self.websocket)
scraped_sites = await self.scrape_sites_by_query(sub_query)
content = await self.get_similar_content_by_query(sub_query, scraped_sites)
await stream_output("logs", f"📃 {content}", self.websocket)
context.append(content)
return context
async def get_new_urls(self, url_set_input):
""" Gets the new urls from the given url set.
Args: url_set_input (set[str]): The url set to get the new urls from
Returns: list[str]: The new urls from the given url set
"""
new_urls = []
for url in url_set_input:
if url not in self.visited_urls:
await stream_output("logs", f"✅ Adding source url to research: {url}\n", self.websocket)
self.visited_urls.add(url)
new_urls.append(url)
return new_urls
async def scrape_sites_by_query(self, sub_query):
"""
Runs a sub-query
Args:
sub_query:
Returns:
Summary
"""
# Get Urls
retriever = self.retriever(sub_query)
search_results = retriever.search(max_results=self.cfg.max_search_results_per_query)
new_search_urls = await self.get_new_urls([url.get("href") for url in search_results])
# Scrape Urls
# await stream_output("logs", f"📝Scraping urls {new_search_urls}...\n", self.websocket)
await stream_output("logs", f"🤔Researching for relevant information...\n", self.websocket)
scraped_content_results = scrape_urls(new_search_urls, self.cfg)
return scraped_content_results
async def get_similar_content_by_query(self, query, pages):
await stream_output("logs", f"📃 Getting relevant content based on query: {query}...", self.websocket)
# Summarize Raw Data
context_compressor = ContextCompressor(documents=pages, embeddings=self.memory.get_embeddings())
# Run Tasks
return context_compressor.get_context(query, max_results=8)
The provided code snippet includes necessary dependencies for implementing the `run_agent` function. Write a Python function `async def run_agent(task, report_type, websocket)` to solve the following problem:
Run the agent.
Here is the function:
async def run_agent(task, report_type, websocket):
"""Run the agent."""
# measure time
start_time = datetime.datetime.now()
# add customized JSON config file path here
config_path = None
# run agent
researcher = GPTResearcher(query=task, report_type=report_type, source_urls=None, config_path=config_path, websocket=websocket)
report = await researcher.run()
# measure time
end_time = datetime.datetime.now()
await websocket.send_json({"type": "logs", "output": f"\nTotal run time: {end_time - start_time}\n"})
return report | Run the agent. |
153,622 | from __future__ import annotations
import json
from fastapi import WebSocket
from langchain.adapters import openai as lc_openai
from colorama import Fore, Style
from typing import Optional
from gpt_researcher.master.prompts import auto_agent_instructions
async def create_chat_completion(
messages: list, # type: ignore
model: Optional[str] = None,
temperature: float = 1.0,
max_tokens: Optional[int] = None,
llm_provider: Optional[str] = None,
stream: Optional[bool] = False,
websocket: WebSocket | None = None,
) -> str:
"""Create a chat completion using the OpenAI API
Args:
messages (list[dict[str, str]]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
stream (bool, optional): Whether to stream the response. Defaults to False.
llm_provider (str, optional): The LLM Provider to use.
webocket (WebSocket): The websocket used in the currect request
Returns:
str: The response from the chat completion
"""
# validate input
if model is None:
raise ValueError("Model cannot be None")
if max_tokens is not None and max_tokens > 8001:
raise ValueError(f"Max tokens cannot be more than 8001, but got {max_tokens}")
# create response
for attempt in range(10): # maximum of 10 attempts
response = await send_chat_completion_request(
messages, model, temperature, max_tokens, stream, llm_provider, websocket
)
return response
logging.error("Failed to get response from OpenAI API")
raise RuntimeError("Failed to get response from OpenAI API")
import logging
def auto_agent_instructions():
return """
This task involves researching a given topic, regardless of its complexity or the availability of a definitive answer. The research is conducted by a specific server, defined by its type and role, with each server requiring distinct instructions.
Agent
The server is determined by the field of the topic and the specific name of the server that could be utilized to research the topic provided. Agents are categorized by their area of expertise, and each server type is associated with a corresponding emoji.
examples:
task: "should I invest in apple stocks?"
response:
{
"server": "💰 Finance Agent",
"agent_role_prompt: "You are a seasoned finance analyst AI assistant. Your primary goal is to compose comprehensive, astute, impartial, and methodically arranged financial reports based on provided data and trends."
}
task: "could reselling sneakers become profitable?"
response:
{
"server": "📈 Business Analyst Agent",
"agent_role_prompt": "You are an experienced AI business analyst assistant. Your main objective is to produce comprehensive, insightful, impartial, and systematically structured business reports based on provided business data, market trends, and strategic analysis."
}
task: "what are the most interesting sites in Tel Aviv?"
response:
{
"server: "🌍 Travel Agent",
"agent_role_prompt": "You are a world-travelled AI tour guide assistant. Your main purpose is to draft engaging, insightful, unbiased, and well-structured travel reports on given locations, including history, attractions, and cultural insights."
}
"""
The provided code snippet includes necessary dependencies for implementing the `choose_agent` function. Write a Python function `def choose_agent(smart_llm_model: str, llm_provider: str, task: str) -> dict` to solve the following problem:
Determines what server should be used Args: task (str): The research question the user asked smart_llm_model (str): the llm model to be used llm_provider (str): the llm provider used Returns: server - The server that will be used agent_role_prompt (str): The prompt for the server
Here is the function:
def choose_agent(smart_llm_model: str, llm_provider: str, task: str) -> dict:
"""Determines what server should be used
Args:
task (str): The research question the user asked
smart_llm_model (str): the llm model to be used
llm_provider (str): the llm provider used
Returns:
server - The server that will be used
agent_role_prompt (str): The prompt for the server
"""
try:
response = create_chat_completion(
model=smart_llm_model,
messages=[
{"role": "system", "content": f"{auto_agent_instructions()}"},
{"role": "user", "content": f"task: {task}"}],
temperature=0,
llm_provider=llm_provider
)
agent_dict = json.loads(response)
print(f"Agent: {agent_dict.get('server')}")
return agent_dict
except Exception as e:
print(f"{Fore.RED}Error in choose_agent: {e}{Style.RESET_ALL}")
return {"server": "Default Agent",
"agent_role_prompt": "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text."} | Determines what server should be used Args: task (str): The research question the user asked smart_llm_model (str): the llm model to be used llm_provider (str): the llm provider used Returns: server - The server that will be used agent_role_prompt (str): The prompt for the server |
153,623 | import urllib
from typing import Dict, Generator, Optional
from selenium.webdriver.remote.webdriver import WebDriver
from config import Config
from gpt_researcher_old.retriever.llm_utils import create_chat_completion
import os
from md2pdf.core import md2pdf
def write_to_file(filename: str, text: str) -> None:
def md_to_pdf(input_file, output_file):
async def write_md_to_pdf(task: str, path: str, text: str) -> None:
file_path = f"{path}/{task}"
write_to_file(f"{file_path}.md", text)
md_to_pdf(f"{file_path}.md", f"{file_path}.pdf")
print(f"{task} written to {file_path}.pdf")
encoded_file_path = urllib.parse.quote(f"{file_path}.pdf")
return encoded_file_path | null |
153,624 | import urllib
from typing import Dict, Generator, Optional
from selenium.webdriver.remote.webdriver import WebDriver
from config import Config
from gpt_researcher_old.retriever.llm_utils import create_chat_completion
import os
from md2pdf.core import md2pdf
def read_txt_files(directory):
all_text = ''
for filename in os.listdir(directory):
if filename.endswith('.txt'):
with open(os.path.join(directory, filename), 'r') as file:
all_text += file.read() + '\n'
return all_text | null |
153,625 | from __future__ import annotations
import logging
import asyncio
from pathlib import Path
from sys import platform
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.safari.options import Options as SafariOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from fastapi import WebSocket
from scraping import scrape_skills, processing as summary
from scraping.processing.html import extract_hyperlinks, format_hyperlinks
from concurrent.futures import ThreadPoolExecutor
from scraping.processing.text import summarize_text
executor = ThreadPoolExecutor()
def scrape_text_with_selenium(selenium_web_browser: str, user_agent: str, url: str) -> tuple[WebDriver, str]:
"""Scrape text from a website using selenium
Args:
url (str): The url of the website to scrape
selenium_web_browser (str): The web browser used to scrape
user_agent (str): The user agent used when scraping
Returns:
Tuple[WebDriver, str]: The webdriver and the text scraped from the website
"""
logging.getLogger("selenium").setLevel(logging.CRITICAL)
options_available = {
"chrome": ChromeOptions,
"safari": SafariOptions,
"firefox": FirefoxOptions,
}
options = options_available[selenium_web_browser]()
options.add_argument(f"user-agent={user_agent}")
options.add_argument("--headless")
options.add_argument("--enable-javascript")
if selenium_web_browser == "firefox":
driver = webdriver.Firefox(options=options)
elif selenium_web_browser == "safari":
# Requires a bit more setup on the users end
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
driver = webdriver.Safari(options=options)
else:
if platform == "linux" or platform == "linux2":
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--remote-debugging-port=9222")
options.add_argument("--no-sandbox")
options.add_experimental_option("prefs", {"download_restrictions": 3})
driver = webdriver.Chrome(options=options)
print(f"scraping url {url}...")
driver.get(url)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.TAG_NAME, "body"))
)
# check if url is a pdf or arxiv link
if url.endswith(".pdf"):
text = scrape_skills.scrape_pdf_with_pymupdf(url)
elif "arxiv" in url:
# parse the document number from the url
doc_num = url.split("/")[-1]
text = scrape_skills.scrape_pdf_with_arxiv(doc_num)
else:
# Get the HTML content directly from the browser's DOM
page_source = driver.execute_script("return document.body.outerHTML;")
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
# text = soup.get_text()
text = get_text(soup)
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
return driver, text
def add_header(driver: WebDriver) -> None:
"""Add a header to the website
Args:
driver (WebDriver): The webdriver to use to add the header
Returns:
None
"""
driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read())
def summarize_text(
fast_llm_model: str, summary_token_limit: int, llm_provider: str, url: str, text: str, question: str, driver: Optional[WebDriver] = None
) -> str:
"""Summarize text using the OpenAI API
Args:
fast_llm_model (str): The fast LLM model e.g gpt3.5-turbo-16k
summary_token_limit (int): The summary token limit
llm_provider (str): The llm provider
url (str): The url of the text
text (str): The text to summarize
question (str): The question to ask the model
driver (WebDriver): The webdriver to use to scroll the page
Returns:
str: The summary of the text
"""
if not text:
return "Error: No text to summarize"
summaries = []
chunks = list(split_text(text))
scroll_ratio = 1 / len(chunks)
print(f"Summarizing url: {url} with total chunks: {len(chunks)}")
for i, chunk in enumerate(chunks):
if driver:
scroll_to_percentage(driver, scroll_ratio * i)
#memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
#MEMORY.add_documents([Document(page_content=memory_to_add)])
messages = [create_message(chunk, question)]
summary = create_chat_completion(
model=fast_llm_model,
messages=messages,
max_tokens=summary_token_limit,
llm_provider=llm_provider
)
summaries.append(summary)
#memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
#MEMORY.add_documents([Document(page_content=memory_to_add)])
combined_summary = "\n".join(summaries)
messages = [create_message(combined_summary, question)]
final_summary = create_chat_completion(
model=fast_llm_model,
messages=messages,
max_tokens=summary_token_limit,
llm_provider=llm_provider,
)
print("Final summary length: ", len(combined_summary))
print(final_summary)
return final_summary
The provided code snippet includes necessary dependencies for implementing the `async_browse` function. Write a Python function `async def async_browse( selenium_web_browser: str, user_agent: str, fast_llm_model: str, summary_token_limit: str, llm_provider: str, url: str, question: str, websocket: WebSocket ) -> str` to solve the following problem:
Browse a website and return the answer and links to the user Args: selenium_web_browser (str): The web browser used for scraping user_agent (str): The user agent used when scraping url (str): The url of the website to browse question (str): The question asked by the user websocket (WebSocketManager): The websocket manager Returns: str: The answer and links to the user
Here is the function:
async def async_browse(
selenium_web_browser: str,
user_agent: str,
fast_llm_model: str,
summary_token_limit: str,
llm_provider: str,
url: str, question: str,
websocket: WebSocket
) -> str:
"""Browse a website and return the answer and links to the user
Args:
selenium_web_browser (str): The web browser used for scraping
user_agent (str): The user agent used when scraping
url (str): The url of the website to browse
question (str): The question asked by the user
websocket (WebSocketManager): The websocket manager
Returns:
str: The answer and links to the user
"""
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=8)
print(f"Scraping url {url} with question {question}")
if websocket:
await websocket.send_json(
{
"type": "logs",
"output": f"🔎 Browsing the {url} for relevant about: {question}...",
}
)
else:
print(f"🔎 Browsing the {url} for relevant about: {question}...")
try:
driver, text = await loop.run_in_executor(
executor, scrape_text_with_selenium, selenium_web_browser, user_agent, url
)
await loop.run_in_executor(executor, add_header, driver)
summary_text = await loop.run_in_executor(
executor, summarize_text, fast_llm_model, summary_token_limit, llm_provider, url, text, question, driver
)
if websocket:
await websocket.send_json(
{
"type": "logs",
"output": f"📝 Information gathered from url {url}: {summary_text}",
}
)
else:
print(f"📝 Information gathered from url {url}: {summary_text}")
return f"Information gathered from url {url}: {summary_text}"
except Exception as e:
print(f"An error occurred while processing the url {url}: {e}")
return f"Error processing the url {url}: {e}" | Browse a website and return the answer and links to the user Args: selenium_web_browser (str): The web browser used for scraping user_agent (str): The user agent used when scraping url (str): The url of the website to browse question (str): The question asked by the user websocket (WebSocketManager): The websocket manager Returns: str: The answer and links to the user |
153,626 | from __future__ import annotations
import logging
import asyncio
from pathlib import Path
from sys import platform
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.safari.options import Options as SafariOptions
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from fastapi import WebSocket
from scraping import scrape_skills, processing as summary
from scraping.processing.html import extract_hyperlinks, format_hyperlinks
from concurrent.futures import ThreadPoolExecutor
from scraping.processing.text import summarize_text
def scrape_text_with_selenium(selenium_web_browser: str, user_agent: str, url: str) -> tuple[WebDriver, str]:
"""Scrape text from a website using selenium
Args:
url (str): The url of the website to scrape
selenium_web_browser (str): The web browser used to scrape
user_agent (str): The user agent used when scraping
Returns:
Tuple[WebDriver, str]: The webdriver and the text scraped from the website
"""
logging.getLogger("selenium").setLevel(logging.CRITICAL)
options_available = {
"chrome": ChromeOptions,
"safari": SafariOptions,
"firefox": FirefoxOptions,
}
options = options_available[selenium_web_browser]()
options.add_argument(f"user-agent={user_agent}")
options.add_argument("--headless")
options.add_argument("--enable-javascript")
if selenium_web_browser == "firefox":
driver = webdriver.Firefox(options=options)
elif selenium_web_browser == "safari":
# Requires a bit more setup on the users end
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
driver = webdriver.Safari(options=options)
else:
if platform == "linux" or platform == "linux2":
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--remote-debugging-port=9222")
options.add_argument("--no-sandbox")
options.add_experimental_option("prefs", {"download_restrictions": 3})
driver = webdriver.Chrome(options=options)
print(f"scraping url {url}...")
driver.get(url)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.TAG_NAME, "body"))
)
# check if url is a pdf or arxiv link
if url.endswith(".pdf"):
text = scrape_skills.scrape_pdf_with_pymupdf(url)
elif "arxiv" in url:
# parse the document number from the url
doc_num = url.split("/")[-1]
text = scrape_skills.scrape_pdf_with_arxiv(doc_num)
else:
# Get the HTML content directly from the browser's DOM
page_source = driver.execute_script("return document.body.outerHTML;")
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
# text = soup.get_text()
text = get_text(soup)
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
return driver, text
def scrape_links_with_selenium(driver: WebDriver, url: str) -> list[str]:
"""Scrape links from a website using selenium
Args:
driver (WebDriver): The webdriver to use to scrape the links
Returns:
List[str]: The links scraped from the website
"""
page_source = driver.page_source
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
hyperlinks = extract_hyperlinks(soup, url)
return format_hyperlinks(hyperlinks)
def close_browser(driver: WebDriver) -> None:
"""Close the browser
Args:
driver (WebDriver): The webdriver to close
Returns:
None
"""
driver.quit()
def add_header(driver: WebDriver) -> None:
"""Add a header to the website
Args:
driver (WebDriver): The webdriver to use to add the header
Returns:
None
"""
driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read())
def summarize_text(
fast_llm_model: str, summary_token_limit: int, llm_provider: str, url: str, text: str, question: str, driver: Optional[WebDriver] = None
) -> str:
"""Summarize text using the OpenAI API
Args:
fast_llm_model (str): The fast LLM model e.g gpt3.5-turbo-16k
summary_token_limit (int): The summary token limit
llm_provider (str): The llm provider
url (str): The url of the text
text (str): The text to summarize
question (str): The question to ask the model
driver (WebDriver): The webdriver to use to scroll the page
Returns:
str: The summary of the text
"""
if not text:
return "Error: No text to summarize"
summaries = []
chunks = list(split_text(text))
scroll_ratio = 1 / len(chunks)
print(f"Summarizing url: {url} with total chunks: {len(chunks)}")
for i, chunk in enumerate(chunks):
if driver:
scroll_to_percentage(driver, scroll_ratio * i)
#memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
#MEMORY.add_documents([Document(page_content=memory_to_add)])
messages = [create_message(chunk, question)]
summary = create_chat_completion(
model=fast_llm_model,
messages=messages,
max_tokens=summary_token_limit,
llm_provider=llm_provider
)
summaries.append(summary)
#memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
#MEMORY.add_documents([Document(page_content=memory_to_add)])
combined_summary = "\n".join(summaries)
messages = [create_message(combined_summary, question)]
final_summary = create_chat_completion(
model=fast_llm_model,
messages=messages,
max_tokens=summary_token_limit,
llm_provider=llm_provider,
)
print("Final summary length: ", len(combined_summary))
print(final_summary)
return final_summary
The provided code snippet includes necessary dependencies for implementing the `browse_website` function. Write a Python function `def browse_website(url: str, question: str) -> tuple[str, WebDriver]` to solve the following problem:
Browse a website and return the answer and links to the user Args: url (str): The url of the website to browse question (str): The question asked by the user Returns: Tuple[str, WebDriver]: The answer and links to the user and the webdriver
Here is the function:
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
"""Browse a website and return the answer and links to the user
Args:
url (str): The url of the website to browse
question (str): The question asked by the user
Returns:
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
"""
if not url:
return "A URL was not specified, cancelling request to browse website.", None
driver, text = scrape_text_with_selenium(url)
add_header(driver)
summary_text = summary.summarize_text(url, text, question, driver)
links = scrape_links_with_selenium(driver, url)
# Limit links to 5
if len(links) > 5:
links = links[:5]
# write_to_file('research-{0}.txt'.format(url), summary_text + "\nSource Links: {0}\n\n".format(links))
close_browser(driver)
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver | Browse a website and return the answer and links to the user Args: url (str): The url of the website to browse question (str): The question asked by the user Returns: Tuple[str, WebDriver]: The answer and links to the user and the webdriver |
153,627 | from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
import json
import os
from gpt_researcher.utils.websocket_manager import WebSocketManager
from .utils import write_md_to_pdf
app = FastAPI()
app.mount("/site", StaticFiles(directory="./frontend"), name="site")
app.mount("/static", StaticFiles(directory="./frontend/static"), name="static")
def startup_event():
if not os.path.isdir("outputs"):
os.makedirs("outputs")
app.mount("/outputs", StaticFiles(directory="outputs"), name="outputs") | null |
153,628 | from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
import json
import os
from gpt_researcher.utils.websocket_manager import WebSocketManager
from .utils import write_md_to_pdf
templates = Jinja2Templates(directory="./frontend")
async def read_root(request: Request):
return templates.TemplateResponse('index.html', {"request": request, "report": None}) | null |
153,629 | from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from pydantic import BaseModel
import json
import os
from gpt_researcher.utils.websocket_manager import WebSocketManager
from .utils import write_md_to_pdf
manager = WebSocketManager()
async def write_md_to_pdf(text: str) -> str:
"""Converts Markdown text to a PDF file and returns the file path.
Args:
text (str): Markdown text to convert.
Returns:
str: The encoded file path of the generated PDF.
"""
task = uuid.uuid4().hex
file_path = f"outputs/{task}"
await write_to_file(f"{file_path}.md", text)
try:
md2pdf(f"{file_path}.pdf",
md_content=None,
md_file_path=f"{file_path}.md",
css_file_path=None,
base_url=None)
print(f"Report written to {file_path}.pdf")
except Exception as e:
print(f"Error in converting Markdown to PDF: {e}")
return ""
encoded_file_path = urllib.parse.quote(f"{file_path}.pdf")
return encoded_file_path
async def websocket_endpoint(websocket: WebSocket):
await manager.connect(websocket)
try:
while True:
data = await websocket.receive_text()
if data.startswith("start"):
json_data = json.loads(data[6:])
task = json_data.get("task")
report_type = json_data.get("report_type")
if task and report_type:
report = await manager.start_streaming(task, report_type, websocket)
path = await write_md_to_pdf(report)
await websocket.send_json({"type": "path", "output": path})
else:
print("Error: not enough parameters provided.")
except WebSocketDisconnect:
await manager.disconnect(websocket) | null |
153,630 | import ipaddress
import uuid
import weakref
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import (
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from pydantic import BaseModel
from pydantic.fields import FieldInfo as PydanticFieldInfo
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
Float,
ForeignKey,
Integer,
Interval,
Numeric,
inspect,
)
from sqlalchemy import Enum as sa_Enum
from sqlalchemy.orm import (
Mapped,
RelationshipProperty,
declared_attr,
registry,
relationship,
)
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.sqltypes import LargeBinary, Time
from typing_extensions import Literal, deprecated, get_origin
from ._compat import ( # type: ignore[attr-defined]
IS_PYDANTIC_V2,
BaseConfig,
ModelField,
ModelMetaclass,
Representation,
SQLModelConfig,
Undefined,
UndefinedType,
_calculate_keys,
finish_init,
get_annotations,
get_config_value,
get_field_metadata,
get_model_fields,
get_relationship_to,
get_type_from_field,
init_pydantic_private_attrs,
is_field_noneable,
is_table_model_class,
post_init_field_info,
set_config_value,
sqlmodel_init,
sqlmodel_validate,
)
from .sql.sqltypes import GUID, AutoString
_T = TypeVar("_T")
def __dataclass_transform__(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
) -> Callable[[_T], _T]:
return lambda a: a | null |
153,631 | import ipaddress
import uuid
import weakref
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import (
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from pydantic import BaseModel
from pydantic.fields import FieldInfo as PydanticFieldInfo
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
Float,
ForeignKey,
Integer,
Interval,
Numeric,
inspect,
)
from sqlalchemy import Enum as sa_Enum
from sqlalchemy.orm import (
Mapped,
RelationshipProperty,
declared_attr,
registry,
relationship,
)
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.sqltypes import LargeBinary, Time
from typing_extensions import Literal, deprecated, get_origin
from ._compat import ( # type: ignore[attr-defined]
IS_PYDANTIC_V2,
BaseConfig,
ModelField,
ModelMetaclass,
Representation,
SQLModelConfig,
Undefined,
UndefinedType,
_calculate_keys,
finish_init,
get_annotations,
get_config_value,
get_field_metadata,
get_model_fields,
get_relationship_to,
get_type_from_field,
init_pydantic_private_attrs,
is_field_noneable,
is_table_model_class,
post_init_field_info,
set_config_value,
sqlmodel_init,
sqlmodel_validate,
)
from .sql.sqltypes import GUID, AutoString
NoArgAnyCallable = Callable[[], Any]
def Field(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
exclude: Union[
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
] = None,
include: Union[
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
] = None,
const: Optional[bool] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
multiple_of: Optional[float] = None,
max_digits: Optional[int] = None,
decimal_places: Optional[int] = None,
min_items: Optional[int] = None,
max_items: Optional[int] = None,
unique_items: Optional[bool] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
allow_mutation: bool = True,
regex: Optional[str] = None,
discriminator: Optional[str] = None,
repr: bool = True,
primary_key: Union[bool, UndefinedType] = Undefined,
foreign_key: Any = Undefined,
unique: Union[bool, UndefinedType] = Undefined,
nullable: Union[bool, UndefinedType] = Undefined,
index: Union[bool, UndefinedType] = Undefined,
sa_type: Union[Type[Any], UndefinedType] = Undefined,
sa_column_args: Union[Sequence[Any], UndefinedType] = Undefined,
sa_column_kwargs: Union[Mapping[str, Any], UndefinedType] = Undefined,
schema_extra: Optional[Dict[str, Any]] = None,
) -> Any:
... | null |
153,632 | import ipaddress
import uuid
import weakref
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import (
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from pydantic import BaseModel
from pydantic.fields import FieldInfo as PydanticFieldInfo
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
Float,
ForeignKey,
Integer,
Interval,
Numeric,
inspect,
)
from sqlalchemy import Enum as sa_Enum
from sqlalchemy.orm import (
Mapped,
RelationshipProperty,
declared_attr,
registry,
relationship,
)
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.sqltypes import LargeBinary, Time
from typing_extensions import Literal, deprecated, get_origin
from ._compat import ( # type: ignore[attr-defined]
IS_PYDANTIC_V2,
BaseConfig,
ModelField,
ModelMetaclass,
Representation,
SQLModelConfig,
Undefined,
UndefinedType,
_calculate_keys,
finish_init,
get_annotations,
get_config_value,
get_field_metadata,
get_model_fields,
get_relationship_to,
get_type_from_field,
init_pydantic_private_attrs,
is_field_noneable,
is_table_model_class,
post_init_field_info,
set_config_value,
sqlmodel_init,
sqlmodel_validate,
)
from .sql.sqltypes import GUID, AutoString
NoArgAnyCallable = Callable[[], Any]
def Field(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
exclude: Union[
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
] = None,
include: Union[
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
] = None,
const: Optional[bool] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
multiple_of: Optional[float] = None,
max_digits: Optional[int] = None,
decimal_places: Optional[int] = None,
min_items: Optional[int] = None,
max_items: Optional[int] = None,
unique_items: Optional[bool] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
allow_mutation: bool = True,
regex: Optional[str] = None,
discriminator: Optional[str] = None,
repr: bool = True,
sa_column: Union[Column, UndefinedType] = Undefined, # type: ignore
schema_extra: Optional[Dict[str, Any]] = None,
) -> Any:
... | null |
153,633 | import ipaddress
import uuid
import weakref
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import (
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from pydantic import BaseModel
from pydantic.fields import FieldInfo as PydanticFieldInfo
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
Float,
ForeignKey,
Integer,
Interval,
Numeric,
inspect,
)
from sqlalchemy import Enum as sa_Enum
from sqlalchemy.orm import (
Mapped,
RelationshipProperty,
declared_attr,
registry,
relationship,
)
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.sqltypes import LargeBinary, Time
from typing_extensions import Literal, deprecated, get_origin
from ._compat import ( # type: ignore[attr-defined]
IS_PYDANTIC_V2,
BaseConfig,
ModelField,
ModelMetaclass,
Representation,
SQLModelConfig,
Undefined,
UndefinedType,
_calculate_keys,
finish_init,
get_annotations,
get_config_value,
get_field_metadata,
get_model_fields,
get_relationship_to,
get_type_from_field,
init_pydantic_private_attrs,
is_field_noneable,
is_table_model_class,
post_init_field_info,
set_config_value,
sqlmodel_init,
sqlmodel_validate,
)
from .sql.sqltypes import GUID, AutoString
NoArgAnyCallable = Callable[[], Any]
class FieldInfo(PydanticFieldInfo):
def __init__(self, default: Any = Undefined, **kwargs: Any) -> None:
primary_key = kwargs.pop("primary_key", False)
nullable = kwargs.pop("nullable", Undefined)
foreign_key = kwargs.pop("foreign_key", Undefined)
unique = kwargs.pop("unique", False)
index = kwargs.pop("index", Undefined)
sa_type = kwargs.pop("sa_type", Undefined)
sa_column = kwargs.pop("sa_column", Undefined)
sa_column_args = kwargs.pop("sa_column_args", Undefined)
sa_column_kwargs = kwargs.pop("sa_column_kwargs", Undefined)
if sa_column is not Undefined:
if sa_column_args is not Undefined:
raise RuntimeError(
"Passing sa_column_args is not supported when "
"also passing a sa_column"
)
if sa_column_kwargs is not Undefined:
raise RuntimeError(
"Passing sa_column_kwargs is not supported when "
"also passing a sa_column"
)
if primary_key is not Undefined:
raise RuntimeError(
"Passing primary_key is not supported when "
"also passing a sa_column"
)
if nullable is not Undefined:
raise RuntimeError(
"Passing nullable is not supported when " "also passing a sa_column"
)
if foreign_key is not Undefined:
raise RuntimeError(
"Passing foreign_key is not supported when "
"also passing a sa_column"
)
if unique is not Undefined:
raise RuntimeError(
"Passing unique is not supported when also passing a sa_column"
)
if index is not Undefined:
raise RuntimeError(
"Passing index is not supported when also passing a sa_column"
)
if sa_type is not Undefined:
raise RuntimeError(
"Passing sa_type is not supported when also passing a sa_column"
)
super().__init__(default=default, **kwargs)
self.primary_key = primary_key
self.nullable = nullable
self.foreign_key = foreign_key
self.unique = unique
self.index = index
self.sa_type = sa_type
self.sa_column = sa_column
self.sa_column_args = sa_column_args
self.sa_column_kwargs = sa_column_kwargs
def Field(
default: Any = Undefined,
*,
default_factory: Optional[NoArgAnyCallable] = None,
alias: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
exclude: Union[
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
] = None,
include: Union[
AbstractSet[Union[int, str]], Mapping[Union[int, str], Any], Any
] = None,
const: Optional[bool] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
multiple_of: Optional[float] = None,
max_digits: Optional[int] = None,
decimal_places: Optional[int] = None,
min_items: Optional[int] = None,
max_items: Optional[int] = None,
unique_items: Optional[bool] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
allow_mutation: bool = True,
regex: Optional[str] = None,
discriminator: Optional[str] = None,
repr: bool = True,
primary_key: Union[bool, UndefinedType] = Undefined,
foreign_key: Any = Undefined,
unique: Union[bool, UndefinedType] = Undefined,
nullable: Union[bool, UndefinedType] = Undefined,
index: Union[bool, UndefinedType] = Undefined,
sa_type: Union[Type[Any], UndefinedType] = Undefined,
sa_column: Union[Column, UndefinedType] = Undefined, # type: ignore
sa_column_args: Union[Sequence[Any], UndefinedType] = Undefined,
sa_column_kwargs: Union[Mapping[str, Any], UndefinedType] = Undefined,
schema_extra: Optional[Dict[str, Any]] = None,
) -> Any:
current_schema_extra = schema_extra or {}
field_info = FieldInfo(
default,
default_factory=default_factory,
alias=alias,
title=title,
description=description,
exclude=exclude,
include=include,
const=const,
gt=gt,
ge=ge,
lt=lt,
le=le,
multiple_of=multiple_of,
max_digits=max_digits,
decimal_places=decimal_places,
min_items=min_items,
max_items=max_items,
unique_items=unique_items,
min_length=min_length,
max_length=max_length,
allow_mutation=allow_mutation,
regex=regex,
discriminator=discriminator,
repr=repr,
primary_key=primary_key,
foreign_key=foreign_key,
unique=unique,
nullable=nullable,
index=index,
sa_type=sa_type,
sa_column=sa_column,
sa_column_args=sa_column_args,
sa_column_kwargs=sa_column_kwargs,
**current_schema_extra,
)
post_init_field_info(field_info)
return field_info | null |
153,634 | import ipaddress
import uuid
import weakref
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import (
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from pydantic import BaseModel
from pydantic.fields import FieldInfo as PydanticFieldInfo
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
Float,
ForeignKey,
Integer,
Interval,
Numeric,
inspect,
)
from sqlalchemy import Enum as sa_Enum
from sqlalchemy.orm import (
Mapped,
RelationshipProperty,
declared_attr,
registry,
relationship,
)
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.sqltypes import LargeBinary, Time
from typing_extensions import Literal, deprecated, get_origin
from ._compat import ( # type: ignore[attr-defined]
IS_PYDANTIC_V2,
BaseConfig,
ModelField,
ModelMetaclass,
Representation,
SQLModelConfig,
Undefined,
UndefinedType,
_calculate_keys,
finish_init,
get_annotations,
get_config_value,
get_field_metadata,
get_model_fields,
get_relationship_to,
get_type_from_field,
init_pydantic_private_attrs,
is_field_noneable,
is_table_model_class,
post_init_field_info,
set_config_value,
sqlmodel_init,
sqlmodel_validate,
)
from .sql.sqltypes import GUID, AutoString
def Relationship(
*,
back_populates: Optional[str] = None,
link_model: Optional[Any] = None,
sa_relationship_args: Optional[Sequence[Any]] = None,
sa_relationship_kwargs: Optional[Mapping[str, Any]] = None,
) -> Any:
... | null |
153,635 | import ipaddress
import uuid
import weakref
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import (
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from pydantic import BaseModel
from pydantic.fields import FieldInfo as PydanticFieldInfo
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
Float,
ForeignKey,
Integer,
Interval,
Numeric,
inspect,
)
from sqlalchemy import Enum as sa_Enum
from sqlalchemy.orm import (
Mapped,
RelationshipProperty,
declared_attr,
registry,
relationship,
)
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.sqltypes import LargeBinary, Time
from typing_extensions import Literal, deprecated, get_origin
from ._compat import ( # type: ignore[attr-defined]
IS_PYDANTIC_V2,
BaseConfig,
ModelField,
ModelMetaclass,
Representation,
SQLModelConfig,
Undefined,
UndefinedType,
_calculate_keys,
finish_init,
get_annotations,
get_config_value,
get_field_metadata,
get_model_fields,
get_relationship_to,
get_type_from_field,
init_pydantic_private_attrs,
is_field_noneable,
is_table_model_class,
post_init_field_info,
set_config_value,
sqlmodel_init,
sqlmodel_validate,
)
from .sql.sqltypes import GUID, AutoString
def Relationship(
*,
back_populates: Optional[str] = None,
link_model: Optional[Any] = None,
sa_relationship: Optional[RelationshipProperty[Any]] = None,
) -> Any:
... | null |
153,636 | import ipaddress
import uuid
import weakref
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import (
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from pydantic import BaseModel
from pydantic.fields import FieldInfo as PydanticFieldInfo
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
Float,
ForeignKey,
Integer,
Interval,
Numeric,
inspect,
)
from sqlalchemy import Enum as sa_Enum
from sqlalchemy.orm import (
Mapped,
RelationshipProperty,
declared_attr,
registry,
relationship,
)
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.sqltypes import LargeBinary, Time
from typing_extensions import Literal, deprecated, get_origin
from ._compat import ( # type: ignore[attr-defined]
IS_PYDANTIC_V2,
BaseConfig,
ModelField,
ModelMetaclass,
Representation,
SQLModelConfig,
Undefined,
UndefinedType,
_calculate_keys,
finish_init,
get_annotations,
get_config_value,
get_field_metadata,
get_model_fields,
get_relationship_to,
get_type_from_field,
init_pydantic_private_attrs,
is_field_noneable,
is_table_model_class,
post_init_field_info,
set_config_value,
sqlmodel_init,
sqlmodel_validate,
)
from .sql.sqltypes import GUID, AutoString
class RelationshipInfo(Representation):
def __init__(
self,
*,
back_populates: Optional[str] = None,
link_model: Optional[Any] = None,
sa_relationship: Optional[RelationshipProperty] = None, # type: ignore
sa_relationship_args: Optional[Sequence[Any]] = None,
sa_relationship_kwargs: Optional[Mapping[str, Any]] = None,
) -> None:
def Relationship(
*,
back_populates: Optional[str] = None,
link_model: Optional[Any] = None,
sa_relationship: Optional[RelationshipProperty[Any]] = None,
sa_relationship_args: Optional[Sequence[Any]] = None,
sa_relationship_kwargs: Optional[Mapping[str, Any]] = None,
) -> Any:
relationship_info = RelationshipInfo(
back_populates=back_populates,
link_model=link_model,
sa_relationship=sa_relationship,
sa_relationship_args=sa_relationship_args,
sa_relationship_kwargs=sa_relationship_kwargs,
)
return relationship_info | null |
153,637 | import ipaddress
import uuid
import weakref
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from enum import Enum
from pathlib import Path
from typing import (
AbstractSet,
Any,
Callable,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from pydantic import BaseModel
from pydantic.fields import FieldInfo as PydanticFieldInfo
from sqlalchemy import (
Boolean,
Column,
Date,
DateTime,
Float,
ForeignKey,
Integer,
Interval,
Numeric,
inspect,
)
from sqlalchemy import Enum as sa_Enum
from sqlalchemy.orm import (
Mapped,
RelationshipProperty,
declared_attr,
registry,
relationship,
)
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.decl_api import DeclarativeMeta
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.sql.schema import MetaData
from sqlalchemy.sql.sqltypes import LargeBinary, Time
from typing_extensions import Literal, deprecated, get_origin
from ._compat import ( # type: ignore[attr-defined]
IS_PYDANTIC_V2,
BaseConfig,
ModelField,
ModelMetaclass,
Representation,
SQLModelConfig,
Undefined,
UndefinedType,
_calculate_keys,
finish_init,
get_annotations,
get_config_value,
get_field_metadata,
get_model_fields,
get_relationship_to,
get_type_from_field,
init_pydantic_private_attrs,
is_field_noneable,
is_table_model_class,
post_init_field_info,
set_config_value,
sqlmodel_init,
sqlmodel_validate,
)
from .sql.sqltypes import GUID, AutoString
def get_sqlalchemy_type(field: Any) -> Any:
if IS_PYDANTIC_V2:
field_info = field
else:
field_info = field.field_info
sa_type = getattr(field_info, "sa_type", Undefined) # noqa: B009
if sa_type is not Undefined:
return sa_type
type_ = get_type_from_field(field)
metadata = get_field_metadata(field)
# Check enums first as an enum can also be a str, needed by Pydantic/FastAPI
if issubclass(type_, Enum):
return sa_Enum(type_)
if issubclass(type_, str):
max_length = getattr(metadata, "max_length", None)
if max_length:
return AutoString(length=max_length)
return AutoString
if issubclass(type_, float):
return Float
if issubclass(type_, bool):
return Boolean
if issubclass(type_, int):
return Integer
if issubclass(type_, datetime):
return DateTime
if issubclass(type_, date):
return Date
if issubclass(type_, timedelta):
return Interval
if issubclass(type_, time):
return Time
if issubclass(type_, bytes):
return LargeBinary
if issubclass(type_, Decimal):
return Numeric(
precision=getattr(metadata, "max_digits", None),
scale=getattr(metadata, "decimal_places", None),
)
if issubclass(type_, ipaddress.IPv4Address):
return AutoString
if issubclass(type_, ipaddress.IPv4Network):
return AutoString
if issubclass(type_, ipaddress.IPv6Address):
return AutoString
if issubclass(type_, ipaddress.IPv6Network):
return AutoString
if issubclass(type_, Path):
return AutoString
if issubclass(type_, uuid.UUID):
return GUID
raise ValueError(f"{type_} has no matching SQLAlchemy type")
def get_column_from_field(field: Any) -> Column: # type: ignore
if IS_PYDANTIC_V2:
field_info = field
else:
field_info = field.field_info
sa_column = getattr(field_info, "sa_column", Undefined)
if isinstance(sa_column, Column):
return sa_column
sa_type = get_sqlalchemy_type(field)
primary_key = getattr(field_info, "primary_key", Undefined)
if primary_key is Undefined:
primary_key = False
index = getattr(field_info, "index", Undefined)
if index is Undefined:
index = False
nullable = not primary_key and is_field_noneable(field)
# Override derived nullability if the nullable property is set explicitly
# on the field
field_nullable = getattr(field_info, "nullable", Undefined) # noqa: B009
if field_nullable is not Undefined:
assert not isinstance(field_nullable, UndefinedType)
nullable = field_nullable
args = []
foreign_key = getattr(field_info, "foreign_key", Undefined)
if foreign_key is Undefined:
foreign_key = None
unique = getattr(field_info, "unique", Undefined)
if unique is Undefined:
unique = False
if foreign_key:
assert isinstance(foreign_key, str)
args.append(ForeignKey(foreign_key))
kwargs = {
"primary_key": primary_key,
"nullable": nullable,
"index": index,
"unique": unique,
}
sa_default = Undefined
if field_info.default_factory:
sa_default = field_info.default_factory
elif field_info.default is not Undefined:
sa_default = field_info.default
if sa_default is not Undefined:
kwargs["default"] = sa_default
sa_column_args = getattr(field_info, "sa_column_args", Undefined)
if sa_column_args is not Undefined:
args.extend(list(cast(Sequence[Any], sa_column_args)))
sa_column_kwargs = getattr(field_info, "sa_column_kwargs", Undefined)
if sa_column_kwargs is not Undefined:
kwargs.update(cast(Dict[Any, Any], sa_column_kwargs))
return Column(sa_type, *args, **kwargs) # type: ignore | null |
153,638 | from typing import Any, TypeVar
class _DefaultPlaceholder:
"""
You shouldn't use this class directly.
It's used internally to recognize when a default value has been overwritten, even
if the overridden default value was truthy.
"""
def __init__(self, value: Any):
self.value = value
def __bool__(self) -> bool:
return bool(self.value)
def __eq__(self, o: object) -> bool:
return isinstance(o, _DefaultPlaceholder) and o.value == self.value
_TDefaultType = TypeVar("_TDefaultType")
The provided code snippet includes necessary dependencies for implementing the `Default` function. Write a Python function `def Default(value: _TDefaultType) -> _TDefaultType` to solve the following problem:
You shouldn't use this function directly. It's used internally to recognize when a default value has been overwritten, even if the overridden default value was truthy.
Here is the function:
def Default(value: _TDefaultType) -> _TDefaultType:
"""
You shouldn't use this function directly.
It's used internally to recognize when a default value has been overwritten, even
if the overridden default value was truthy.
"""
return _DefaultPlaceholder(value) # type: ignore | You shouldn't use this function directly. It's used internally to recognize when a default value has been overwritten, even if the overridden default value was truthy. |
153,639 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def all_(expr: Union[_ColumnExpressionArgument[_T], _T]) -> CollectionAggregate[bool]:
return sqlalchemy.all_(expr) # type: ignore[arg-type] | null |
153,640 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
def and_(
initial_clause: Union[Literal[True], _ColumnExpressionArgument[bool], bool],
*clauses: Union[_ColumnExpressionArgument[bool], bool],
) -> ColumnElement[bool]:
return sqlalchemy.and_(initial_clause, *clauses) # type: ignore[arg-type] | null |
153,641 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def any_(expr: Union[_ColumnExpressionArgument[_T], _T]) -> CollectionAggregate[bool]:
return sqlalchemy.any_(expr) # type: ignore[arg-type] | null |
153,642 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def asc(
column: Union[_ColumnExpressionOrStrLabelArgument[_T], _T],
) -> UnaryExpression[_T]:
return sqlalchemy.asc(column) # type: ignore[arg-type] | null |
153,643 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
def collate(
expression: Union[_ColumnExpressionArgument[str], str], collation: str
) -> BinaryExpression[str]:
return sqlalchemy.collate(expression, collation) # type: ignore[arg-type] | null |
153,644 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def between(
expr: Union[_ColumnExpressionOrLiteralArgument[_T], _T],
lower_bound: Any,
upper_bound: Any,
symmetric: bool = False,
) -> BinaryExpression[bool]:
return sqlalchemy.between(expr, lower_bound, upper_bound, symmetric=symmetric) # type: ignore[arg-type] | null |
153,645 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def not_(clause: Union[_ColumnExpressionArgument[_T], _T]) -> ColumnElement[_T]:
return sqlalchemy.not_(clause) # type: ignore[arg-type] | null |
153,646 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
def case(
*whens: Union[
Tuple[Union[_ColumnExpressionArgument[bool], bool], Any], Mapping[Any, Any]
],
value: Optional[Any] = None,
else_: Optional[Any] = None,
) -> Case[Any]:
return sqlalchemy.case(*whens, value=value, else_=else_) # type: ignore[arg-type] | null |
153,647 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def cast(
expression: Union[_ColumnExpressionOrLiteralArgument[Any], Any],
type_: "_TypeEngineArgument[_T]",
) -> Cast[_T]:
return sqlalchemy.cast(expression, type_) # type: ignore[arg-type] | null |
153,648 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def try_cast(
expression: Union[_ColumnExpressionOrLiteralArgument[Any], Any],
type_: "_TypeEngineArgument[_T]",
) -> TryCast[_T]:
return sqlalchemy.try_cast(expression, type_) # type: ignore[arg-type] | null |
153,649 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def desc(
column: Union[_ColumnExpressionOrStrLabelArgument[_T], _T],
) -> UnaryExpression[_T]:
return sqlalchemy.desc(column) # type: ignore[arg-type] | null |
153,650 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def distinct(expr: Union[_ColumnExpressionArgument[_T], _T]) -> UnaryExpression[_T]:
return sqlalchemy.distinct(expr) # type: ignore[arg-type] | null |
153,651 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def bitwise_not(expr: Union[_ColumnExpressionArgument[_T], _T]) -> UnaryExpression[_T]:
return sqlalchemy.bitwise_not(expr) # type: ignore[arg-type] | null |
153,652 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
def extract(field: str, expr: Union[_ColumnExpressionArgument[Any], Any]) -> Extract:
return sqlalchemy.extract(field, expr) # type: ignore[arg-type] | null |
153,653 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def funcfilter(
func: FunctionElement[_T], *criterion: Union[_ColumnExpressionArgument[bool], bool]
) -> FunctionFilter[_T]:
return sqlalchemy.funcfilter(func, *criterion) # type: ignore[arg-type] | null |
153,654 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def label(
name: str,
element: Union[_ColumnExpressionArgument[_T], _T],
type_: Optional["_TypeEngineArgument[_T]"] = None,
) -> Label[_T]:
return sqlalchemy.label(name, element, type_=type_) # type: ignore[arg-type] | null |
153,655 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def nulls_first(
column: Union[_ColumnExpressionArgument[_T], _T],
) -> UnaryExpression[_T]:
return sqlalchemy.nulls_first(column) # type: ignore[arg-type] | null |
153,656 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def nulls_last(column: Union[_ColumnExpressionArgument[_T], _T]) -> UnaryExpression[_T]:
return sqlalchemy.nulls_last(column) # type: ignore[arg-type] | null |
153,657 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
def or_( # type: ignore[empty-body]
initial_clause: Union[Literal[False], _ColumnExpressionArgument[bool], bool],
*clauses: Union[_ColumnExpressionArgument[bool], bool],
) -> ColumnElement[bool]:
return sqlalchemy.or_(initial_clause, *clauses) # type: ignore[arg-type] | null |
153,658 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def over(
element: FunctionElement[_T],
partition_by: Optional[
Union[
Iterable[Union[_ColumnExpressionArgument[Any], Any]],
_ColumnExpressionArgument[Any],
Any,
]
] = None,
order_by: Optional[
Union[
Iterable[Union[_ColumnExpressionArgument[Any], Any]],
_ColumnExpressionArgument[Any],
Any,
]
] = None,
range_: Optional[Tuple[Optional[int], Optional[int]]] = None,
rows: Optional[Tuple[Optional[int], Optional[int]]] = None,
) -> Over[_T]:
return sqlalchemy.over(
element, partition_by=partition_by, order_by=order_by, range_=range_, rows=rows
) # type: ignore[arg-type] | null |
153,659 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
def tuple_(
*clauses: Union[_ColumnExpressionArgument[Any], Any],
types: Optional[Sequence["_TypeEngineArgument[Any]"]] = None,
) -> Tuple[Any, ...]:
return sqlalchemy.tuple_(*clauses, types=types) # type: ignore[return-value] | null |
153,660 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def type_coerce(
expression: Union[_ColumnExpressionOrLiteralArgument[Any], Any],
type_: "_TypeEngineArgument[_T]",
) -> TypeCoerce[_T]:
return sqlalchemy.type_coerce(expression, type_) # type: ignore[arg-type] | null |
153,661 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def within_group(
element: FunctionElement[_T], *order_by: Union[_ColumnExpressionArgument[Any], Any]
) -> WithinGroup[_T]:
return sqlalchemy.within_group(element, *order_by) # type: ignore[arg-type] | null |
153,662 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class SelectOfScalar(SelectBase[_T]):
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
def select(__ent0: _TCCA[_T0]) -> SelectOfScalar[_T0]:
... | null |
153,663 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class SelectOfScalar(SelectBase[_T]):
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select(__ent0: _TScalar_0) -> SelectOfScalar[_TScalar_0]: # type: ignore
... | null |
153,664 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
def select( # type: ignore
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
) -> Select[Tuple[_T0, _T1]]:
... | null |
153,665 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
__ent0: _TCCA[_T0],
entity_1: _TScalar_1,
) -> Select[Tuple[_T0, _TScalar_1]]:
... | null |
153,666 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T1 = TypeVar("_T1")
def select( # type: ignore
entity_0: _TScalar_0,
__ent1: _TCCA[_T1],
) -> Select[Tuple[_TScalar_0, _T1]]:
... | null |
153,667 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
entity_0: _TScalar_0,
entity_1: _TScalar_1,
) -> Select[Tuple[_TScalar_0, _TScalar_1]]:
... | null |
153,668 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
def select( # type: ignore
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
) -> Select[Tuple[_T0, _T1, _T2]]:
... | null |
153,669 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
entity_2: _TScalar_2,
) -> Select[Tuple[_T0, _T1, _TScalar_2]]:
... | null |
153,670 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T2 = TypeVar("_T2")
def select( # type: ignore
__ent0: _TCCA[_T0],
entity_1: _TScalar_1,
__ent2: _TCCA[_T2],
) -> Select[Tuple[_T0, _TScalar_1, _T2]]:
... | null |
153,671 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
__ent0: _TCCA[_T0],
entity_1: _TScalar_1,
entity_2: _TScalar_2,
) -> Select[Tuple[_T0, _TScalar_1, _TScalar_2]]:
... | null |
153,672 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
def select( # type: ignore
entity_0: _TScalar_0,
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
) -> Select[Tuple[_TScalar_0, _T1, _T2]]:
... | null |
153,673 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T1 = TypeVar("_T1")
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
entity_0: _TScalar_0,
__ent1: _TCCA[_T1],
entity_2: _TScalar_2,
) -> Select[Tuple[_TScalar_0, _T1, _TScalar_2]]:
... | null |
153,674 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T2 = TypeVar("_T2")
def select( # type: ignore
entity_0: _TScalar_0,
entity_1: _TScalar_1,
__ent2: _TCCA[_T2],
) -> Select[Tuple[_TScalar_0, _TScalar_1, _T2]]:
... | null |
153,675 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
entity_0: _TScalar_0,
entity_1: _TScalar_1,
entity_2: _TScalar_2,
) -> Select[Tuple[_TScalar_0, _TScalar_1, _TScalar_2]]:
... | null |
153,676 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_T3 = TypeVar("_T3")
def select( # type: ignore
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
) -> Select[Tuple[_T0, _T1, _T2, _T3]]:
... | null |
153,677 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_TScalar_3 = TypeVar(
"_TScalar_3",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
entity_3: _TScalar_3,
) -> Select[Tuple[_T0, _T1, _T2, _TScalar_3]]:
... | null |
153,678 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T3 = TypeVar("_T3")
def select( # type: ignore
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
entity_2: _TScalar_2,
__ent3: _TCCA[_T3],
) -> Select[Tuple[_T0, _T1, _TScalar_2, _T3]]:
... | null |
153,679 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_T1 = TypeVar("_T1")
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_3 = TypeVar(
"_TScalar_3",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
entity_2: _TScalar_2,
entity_3: _TScalar_3,
) -> Select[Tuple[_T0, _T1, _TScalar_2, _TScalar_3]]:
... | null |
153,680 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T2 = TypeVar("_T2")
_T3 = TypeVar("_T3")
def select( # type: ignore
__ent0: _TCCA[_T0],
entity_1: _TScalar_1,
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
) -> Select[Tuple[_T0, _TScalar_1, _T2, _T3]]:
... | null |
153,681 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T2 = TypeVar("_T2")
_TScalar_3 = TypeVar(
"_TScalar_3",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
__ent0: _TCCA[_T0],
entity_1: _TScalar_1,
__ent2: _TCCA[_T2],
entity_3: _TScalar_3,
) -> Select[Tuple[_T0, _TScalar_1, _T2, _TScalar_3]]:
... | null |
153,682 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T3 = TypeVar("_T3")
def select( # type: ignore
__ent0: _TCCA[_T0],
entity_1: _TScalar_1,
entity_2: _TScalar_2,
__ent3: _TCCA[_T3],
) -> Select[Tuple[_T0, _TScalar_1, _TScalar_2, _T3]]:
... | null |
153,683 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_T0 = TypeVar("_T0")
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_3 = TypeVar(
"_TScalar_3",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
__ent0: _TCCA[_T0],
entity_1: _TScalar_1,
entity_2: _TScalar_2,
entity_3: _TScalar_3,
) -> Select[Tuple[_T0, _TScalar_1, _TScalar_2, _TScalar_3]]:
... | null |
153,684 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_T3 = TypeVar("_T3")
def select( # type: ignore
entity_0: _TScalar_0,
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
) -> Select[Tuple[_TScalar_0, _T1, _T2, _T3]]:
... | null |
153,685 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_TScalar_3 = TypeVar(
"_TScalar_3",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
entity_0: _TScalar_0,
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
entity_3: _TScalar_3,
) -> Select[Tuple[_TScalar_0, _T1, _T2, _TScalar_3]]:
... | null |
153,686 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T1 = TypeVar("_T1")
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T3 = TypeVar("_T3")
def select( # type: ignore
entity_0: _TScalar_0,
__ent1: _TCCA[_T1],
entity_2: _TScalar_2,
__ent3: _TCCA[_T3],
) -> Select[Tuple[_TScalar_0, _T1, _TScalar_2, _T3]]:
... | null |
153,687 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T1 = TypeVar("_T1")
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_3 = TypeVar(
"_TScalar_3",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
entity_0: _TScalar_0,
__ent1: _TCCA[_T1],
entity_2: _TScalar_2,
entity_3: _TScalar_3,
) -> Select[Tuple[_TScalar_0, _T1, _TScalar_2, _TScalar_3]]:
... | null |
153,688 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T2 = TypeVar("_T2")
_T3 = TypeVar("_T3")
def select( # type: ignore
entity_0: _TScalar_0,
entity_1: _TScalar_1,
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
) -> Select[Tuple[_TScalar_0, _TScalar_1, _T2, _T3]]:
... | null |
153,689 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T2 = TypeVar("_T2")
_TScalar_3 = TypeVar(
"_TScalar_3",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
entity_0: _TScalar_0,
entity_1: _TScalar_1,
__ent2: _TCCA[_T2],
entity_3: _TScalar_3,
) -> Select[Tuple[_TScalar_0, _TScalar_1, _T2, _TScalar_3]]:
... | null |
153,690 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
_TCCA = Union[
TypedColumnsClauseRole[_T],
SQLCoreOperations[_T],
Type[_T],
]
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_T3 = TypeVar("_T3")
def select( # type: ignore
entity_0: _TScalar_0,
entity_1: _TScalar_1,
entity_2: _TScalar_2,
__ent3: _TCCA[_T3],
) -> Select[Tuple[_TScalar_0, _TScalar_1, _TScalar_2, _T3]]:
... | null |
153,691 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
_TScalar_0 = TypeVar(
"_TScalar_0",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_1 = TypeVar(
"_TScalar_1",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_2 = TypeVar(
"_TScalar_2",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
_TScalar_3 = TypeVar(
"_TScalar_3",
Column, # type: ignore
Sequence, # type: ignore
Mapping, # type: ignore
UUID,
datetime,
float,
int,
bool,
bytes,
str,
None,
)
def select( # type: ignore
entity_0: _TScalar_0,
entity_1: _TScalar_1,
entity_2: _TScalar_2,
entity_3: _TScalar_3,
) -> Select[Tuple[_TScalar_0, _TScalar_1, _TScalar_2, _TScalar_3]]:
... | null |
153,692 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
class Select(SelectBase[_T]):
inherit_cache = True
class SelectOfScalar(SelectBase[_T]):
inherit_cache = True
def select(*entities: Any) -> Union[Select, SelectOfScalar]: # type: ignore
if len(entities) == 1:
return SelectOfScalar(*entities)
return Select(*entities) | null |
153,693 | from datetime import datetime
from typing import (
Any,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from uuid import UUID
import sqlalchemy
from sqlalchemy import (
Column,
ColumnElement,
Extract,
FunctionElement,
FunctionFilter,
Label,
Over,
TypeCoerce,
WithinGroup,
)
from sqlalchemy.orm import InstrumentedAttribute, Mapped
from sqlalchemy.sql._typing import (
_ColumnExpressionArgument,
_ColumnExpressionOrLiteralArgument,
_ColumnExpressionOrStrLabelArgument,
)
from sqlalchemy.sql.elements import (
BinaryExpression,
Case,
Cast,
CollectionAggregate,
ColumnClause,
SQLCoreOperations,
TryCast,
UnaryExpression,
)
from sqlalchemy.sql.expression import Select as _Select
from sqlalchemy.sql.roles import TypedColumnsClauseRole
from sqlalchemy.sql.type_api import TypeEngine
from typing_extensions import Literal, Self
_T = TypeVar("_T")
def col(column_expression: _T) -> Mapped[_T]:
if not isinstance(column_expression, (ColumnClause, Column, InstrumentedAttribute)):
raise RuntimeError(f"Not a SQLAlchemy column: {column_expression}")
return column_expression # type: ignore | null |
153,694 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def get_config_value(
*, model: InstanceOrType["SQLModel"], parameter: str, default: Any = None
) -> Any:
return model.model_config.get(parameter, default) | null |
153,695 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def set_config_value(
*,
model: InstanceOrType["SQLModel"],
parameter: str,
value: Any,
) -> None:
model.model_config[parameter] = value # type: ignore[literal-required] | null |
153,696 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def get_model_fields(model: InstanceOrType[BaseModel]) -> Dict[str, "FieldInfo"]:
return model.model_fields | null |
153,697 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def get_fields_set(
object: InstanceOrType["SQLModel"],
) -> Union[Set[str], Callable[[BaseModel], Set[str]]]:
return object.model_fields_set | null |
153,698 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def init_pydantic_private_attrs(new_object: InstanceOrType["SQLModel"]) -> None:
object.__setattr__(new_object, "__pydantic_fields_set__", set())
object.__setattr__(new_object, "__pydantic_extra__", None)
object.__setattr__(new_object, "__pydantic_private__", None) | null |
153,699 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
def get_annotations(class_dict: Dict[str, Any]) -> Dict[str, Any]:
return class_dict.get("__annotations__", {}) | null |
153,700 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
NoneType = type(None)
def _is_union_type(t: Any) -> bool:
return t is UnionType or t is Union
def get_relationship_to(
name: str,
rel_info: "RelationshipInfo",
annotation: Any,
) -> Any:
origin = get_origin(annotation)
use_annotation = annotation
# Direct relationships (e.g. 'Team' or Team) have None as an origin
if origin is None:
if isinstance(use_annotation, ForwardRef):
use_annotation = use_annotation.__forward_arg__
else:
return use_annotation
# If Union (e.g. Optional), get the real field
elif _is_union_type(origin):
use_annotation = get_args(annotation)
if len(use_annotation) > 2:
raise ValueError(
"Cannot have a (non-optional) union as a SQLAlchemy field"
)
arg1, arg2 = use_annotation
if arg1 is NoneType and arg2 is not NoneType:
use_annotation = arg2
elif arg2 is NoneType and arg1 is not NoneType:
use_annotation = arg1
else:
raise ValueError(
"Cannot have a Union of None and None as a SQLAlchemy field"
)
# If a list, then also get the real field
elif origin is list:
use_annotation = get_args(annotation)[0]
return get_relationship_to(
name=name, rel_info=rel_info, annotation=use_annotation
) | null |
153,701 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
def _calculate_keys(
self: "SQLModel",
include: Optional[Mapping[Union[int, str], Any]],
exclude: Optional[Mapping[Union[int, str], Any]],
exclude_unset: bool,
update: Optional[Dict[str, Any]] = None,
) -> Optional[AbstractSet[str]]: # pragma: no cover
return None | null |
153,702 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
_TSQLModel = TypeVar("_TSQLModel", bound="SQLModel")
class ObjectWithUpdateWrapper:
obj: Any
update: Dict[str, Any]
def __getattribute__(self, __name: str) -> Any:
update = super().__getattribute__("update")
obj = super().__getattribute__("obj")
if __name in update:
return update[__name]
return getattr(obj, __name)
def partial_init() -> Generator[None, None, None]:
token = finish_init.set(False)
yield
finish_init.reset(token)
if IS_PYDANTIC_V2:
from pydantic import ConfigDict as BaseConfig
from pydantic._internal._fields import PydanticMetadata
from pydantic._internal._model_construction import ModelMetaclass
from pydantic._internal._repr import Representation as Representation
from pydantic_core import PydanticUndefined as Undefined
from pydantic_core import PydanticUndefinedType as UndefinedType
# Dummy for types, to make it importable
def is_table_model_class(cls: Type[Any]) -> bool:
config = getattr(cls, "model_config", {})
if config:
return config.get("table", False) or False
return False
# Dummy to make it importable
else:
from pydantic import BaseConfig as BaseConfig # type: ignore[assignment]
from pydantic.errors import ConfigError
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
SHAPE_SINGLETON,
ModelField,
)
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
Undefined as Undefined, # noqa
)
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
UndefinedType as UndefinedType,
)
from pydantic.main import ( # type: ignore[no-redef]
ModelMetaclass as ModelMetaclass,
)
from pydantic.main import validate_model
from pydantic.typing import resolve_annotations
from pydantic.utils import ROOT_KEY, ValueItems
from pydantic.utils import ( # type: ignore[no-redef]
Representation as Representation,
)
def is_table_model_class(cls: Type[Any]) -> bool:
config = getattr(cls, "__config__", None)
if config:
return getattr(config, "table", False)
return False
def sqlmodel_validate(
cls: Type[_TSQLModel],
obj: Any,
*,
strict: Union[bool, None] = None,
from_attributes: Union[bool, None] = None,
context: Union[Dict[str, Any], None] = None,
update: Union[Dict[str, Any], None] = None,
) -> _TSQLModel:
if not is_table_model_class(cls):
new_obj: _TSQLModel = cls.__new__(cls)
else:
# If table, create the new instance normally to make SQLAlchemy create
# the _sa_instance_state attribute
# The wrapper of this function should use with _partial_init()
with partial_init():
new_obj = cls()
# SQLModel Override to get class SQLAlchemy __dict__ attributes and
# set them back in after creating the object
old_dict = new_obj.__dict__.copy()
use_obj = obj
if isinstance(obj, dict) and update:
use_obj = {**obj, **update}
elif update:
use_obj = ObjectWithUpdateWrapper(obj=obj, update=update)
cls.__pydantic_validator__.validate_python(
use_obj,
strict=strict,
from_attributes=from_attributes,
context=context,
self_instance=new_obj,
)
# Capture fields set to restore it later
fields_set = new_obj.__pydantic_fields_set__.copy()
if not is_table_model_class(cls):
# If not table, normal Pydantic code, set __dict__
new_obj.__dict__ = {**old_dict, **new_obj.__dict__}
else:
# Do not set __dict__, instead use setattr to trigger SQLAlchemy
# instrumentation
for key, value in {**old_dict, **new_obj.__dict__}.items():
setattr(new_obj, key, value)
# Restore fields set
object.__setattr__(new_obj, "__pydantic_fields_set__", fields_set)
# Get and set any relationship objects
if is_table_model_class(cls):
for key in new_obj.__sqlmodel_relationships__:
value = getattr(use_obj, key, Undefined)
if value is not Undefined:
setattr(new_obj, key, value)
return new_obj | null |
153,703 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
if IS_PYDANTIC_V2:
from pydantic import ConfigDict as BaseConfig
from pydantic._internal._fields import PydanticMetadata
from pydantic._internal._model_construction import ModelMetaclass
from pydantic._internal._repr import Representation as Representation
from pydantic_core import PydanticUndefined as Undefined
from pydantic_core import PydanticUndefinedType as UndefinedType
# Dummy for types, to make it importable
def is_table_model_class(cls: Type[Any]) -> bool:
config = getattr(cls, "model_config", {})
if config:
return config.get("table", False) or False
return False
# Dummy to make it importable
def sqlmodel_table_construct(
*,
self_instance: _TSQLModel,
values: Dict[str, Any],
_fields_set: Union[Set[str], None] = None,
) -> _TSQLModel:
# Copy from Pydantic's BaseModel.construct()
# Ref: https://github.com/pydantic/pydantic/blob/v2.5.2/pydantic/main.py#L198
# Modified to not include everything, only the model fields, and to
# set relationships
# SQLModel override to get class SQLAlchemy __dict__ attributes and
# set them back in after creating the object
# new_obj = cls.__new__(cls)
cls = type(self_instance)
old_dict = self_instance.__dict__.copy()
# End SQLModel override
fields_values: Dict[str, Any] = {}
defaults: Dict[
str, Any
] = {} # keeping this separate from `fields_values` helps us compute `_fields_set`
for name, field in cls.model_fields.items():
if field.alias and field.alias in values:
fields_values[name] = values.pop(field.alias)
elif name in values:
fields_values[name] = values.pop(name)
elif not field.is_required():
defaults[name] = field.get_default(call_default_factory=True)
if _fields_set is None:
_fields_set = set(fields_values.keys())
fields_values.update(defaults)
_extra: Union[Dict[str, Any], None] = None
if cls.model_config.get("extra") == "allow":
_extra = {}
for k, v in values.items():
_extra[k] = v
# SQLModel override, do not include everything, only the model fields
# else:
# fields_values.update(values)
# End SQLModel override
# SQLModel override
# Do not set __dict__, instead use setattr to trigger SQLAlchemy
# object.__setattr__(new_obj, "__dict__", fields_values)
# instrumentation
for key, value in {**old_dict, **fields_values}.items():
setattr(self_instance, key, value)
# End SQLModel override
object.__setattr__(self_instance, "__pydantic_fields_set__", _fields_set)
if not cls.__pydantic_root_model__:
object.__setattr__(self_instance, "__pydantic_extra__", _extra)
if cls.__pydantic_post_init__:
self_instance.model_post_init(None)
elif not cls.__pydantic_root_model__:
# Note: if there are any private attributes, cls.__pydantic_post_init__ would exist
# Since it doesn't, that means that `__pydantic_private__` should be set to None
object.__setattr__(self_instance, "__pydantic_private__", None)
# SQLModel override, set relationships
# Get and set any relationship objects
for key in self_instance.__sqlmodel_relationships__:
value = values.get(key, Undefined)
if value is not Undefined:
setattr(self_instance, key, value)
# End SQLModel override
return self_instance
else:
from pydantic import BaseConfig as BaseConfig # type: ignore[assignment]
from pydantic.errors import ConfigError
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
SHAPE_SINGLETON,
ModelField,
)
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
Undefined as Undefined, # noqa
)
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
UndefinedType as UndefinedType,
)
from pydantic.main import ( # type: ignore[no-redef]
ModelMetaclass as ModelMetaclass,
)
from pydantic.main import validate_model
from pydantic.typing import resolve_annotations
from pydantic.utils import ROOT_KEY, ValueItems
from pydantic.utils import ( # type: ignore[no-redef]
Representation as Representation,
)
def is_table_model_class(cls: Type[Any]) -> bool:
config = getattr(cls, "__config__", None)
if config:
return getattr(config, "table", False)
return False
def sqlmodel_init(*, self: "SQLModel", data: Dict[str, Any]) -> None:
old_dict = self.__dict__.copy()
if not is_table_model_class(self.__class__):
self.__pydantic_validator__.validate_python(
data,
self_instance=self,
)
else:
sqlmodel_table_construct(
self_instance=self,
values=data,
)
object.__setattr__(
self,
"__dict__",
{**old_dict, **self.__dict__},
) | null |
153,704 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def get_config_value(
*, model: InstanceOrType["SQLModel"], parameter: str, default: Any = None
) -> Any:
return getattr(model.__config__, parameter, default) # type: ignore[union-attr] | null |
153,705 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def set_config_value(
*,
model: InstanceOrType["SQLModel"],
parameter: str,
value: Any,
) -> None:
setattr(model.__config__, parameter, value) # type: ignore | null |
153,706 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def get_model_fields(model: InstanceOrType[BaseModel]) -> Dict[str, "FieldInfo"]:
return model.__fields__ # type: ignore | null |
153,707 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def get_fields_set(
object: InstanceOrType["SQLModel"],
) -> Union[Set[str], Callable[[BaseModel], Set[str]]]:
return object.__fields_set__ | null |
153,708 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
InstanceOrType = Union[T, Type[T]]
def init_pydantic_private_attrs(new_object: InstanceOrType["SQLModel"]) -> None:
object.__setattr__(new_object, "__fields_set__", set()) | null |
153,709 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
def get_annotations(class_dict: Dict[str, Any]) -> Dict[str, Any]:
return resolve_annotations( # type: ignore[no-any-return]
class_dict.get("__annotations__", {}),
class_dict.get("__module__", None),
) | null |
153,710 | import types
from contextlib import contextmanager
from contextvars import ContextVar
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Dict,
ForwardRef,
Generator,
Mapping,
Optional,
Set,
Type,
TypeVar,
Union,
)
from pydantic import VERSION as PYDANTIC_VERSION
from pydantic import BaseModel
from pydantic.fields import FieldInfo
from typing_extensions import get_args, get_origin
if IS_PYDANTIC_V2:
from pydantic import ConfigDict as BaseConfig
from pydantic._internal._fields import PydanticMetadata
from pydantic._internal._model_construction import ModelMetaclass
from pydantic._internal._repr import Representation as Representation
from pydantic_core import PydanticUndefined as Undefined
from pydantic_core import PydanticUndefinedType as UndefinedType
# Dummy for types, to make it importable
class ModelField:
pass
class SQLModelConfig(BaseConfig, total=False):
table: Optional[bool]
registry: Optional[Any]
# Dummy to make it importable
else:
from pydantic import BaseConfig as BaseConfig # type: ignore[assignment]
from pydantic.errors import ConfigError
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
SHAPE_SINGLETON,
ModelField,
)
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
Undefined as Undefined, # noqa
)
from pydantic.fields import ( # type: ignore[attr-defined, no-redef]
UndefinedType as UndefinedType,
)
from pydantic.main import ( # type: ignore[no-redef]
ModelMetaclass as ModelMetaclass,
)
from pydantic.main import validate_model
from pydantic.typing import resolve_annotations
from pydantic.utils import ROOT_KEY, ValueItems
from pydantic.utils import ( # type: ignore[no-redef]
Representation as Representation,
)
class SQLModelConfig(BaseConfig): # type: ignore[no-redef]
table: Optional[bool] = None # type: ignore[misc]
registry: Optional[Any] = None # type: ignore[misc]
def get_relationship_to(
name: str,
rel_info: "RelationshipInfo",
annotation: Any,
) -> Any:
temp_field = ModelField.infer( # type: ignore[attr-defined]
name=name,
value=rel_info,
annotation=annotation,
class_validators=None,
config=SQLModelConfig,
)
relationship_to = temp_field.type_
if isinstance(temp_field.type_, ForwardRef):
relationship_to = temp_field.type_.__forward_arg__
return relationship_to | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.