language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | shared/logging/src/airflow_shared/logging/types.py | {
"start": 938,
"end": 1762
} | class ____(FilteringBoundLogger, Protocol): # noqa: D101
name: str
def isEnabledFor(self, level: int): ...
def getEffectiveLevel(self) -> int: ...
# FilteringBoundLogger defines these methods with `event: str` -- in a few places in Airflow we do
# `self.log.exception(e)` or `self.log.info(rule_results_df)` so we correct the types to allow for this
# (as the code already did)
def debug(self, event: Any, *args: Any, **kw: Any) -> Any: ...
def info(self, event: Any, *args: Any, **kw: Any) -> Any: ...
def warning(self, event: Any, *args: Any, **kw: Any) -> Any: ...
def error(self, event: Any, *args: Any, **kw: Any) -> Any: ...
def exception(self, event: Any, *args: Any, **kw: Any) -> Any: ...
def log(self, level: int, event: Any, *args: Any, **kw: Any) -> Any: ...
| Logger |
python | Netflix__metaflow | metaflow/_vendor/click/exceptions.py | {
"start": 7282,
"end": 7745
} | class ____(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename, hint=None):
ui_filename = filename_to_ui(filename)
if hint is None:
hint = "unknown error"
ClickException.__init__(self, hint)
self.ui_filename = ui_filename
self.filename = filename
def format_message(self):
return "Could not open file {}: {}".format(self.ui_filename, self.message)
| FileError |
python | getsentry__sentry | src/sentry/issue_detection/performance_detection.py | {
"start": 2436,
"end": 25752
} | class ____:
"""
Wrapper that binds an Event and PerformanceProblem together and allow the problem to be saved
to and fetch from Nodestore
"""
def __init__(self, event: Event | GroupEvent, problem: PerformanceProblem):
self.event = event
self.problem = problem
@property
def identifier(self) -> str:
return self.build_identifier(self.event.event_id, self.problem.fingerprint)
@classmethod
def build_identifier(cls, event_id: str, problem_hash: str) -> str:
identifier = hashlib.md5(f"{problem_hash}:{event_id}".encode()).hexdigest()
return f"p-i-e:{identifier}"
@property
def evidence_hashes(self) -> dict[str, list[str]]:
evidence_ids = self.problem.to_dict()
evidence_hashes = {}
spans_by_id = {span["span_id"]: span for span in self.event.data.get("spans", [])}
trace = get_path(self.event.data, "contexts", "trace")
if trace:
spans_by_id[trace["span_id"]] = trace
for key in ["parent", "cause", "offender"]:
span_ids = evidence_ids.get(key + "_span_ids", []) or []
spans = [spans_by_id.get(id) for id in span_ids]
hashes = [span.get("hash") for span in spans if span]
evidence_hashes[key + "_span_hashes"] = hashes
return evidence_hashes
def save(self) -> None:
nodestore.backend.set(self.identifier, self.problem.to_dict())
@classmethod
def fetch(cls, event: Event, problem_hash: str) -> EventPerformanceProblem | None:
return cls.fetch_multi([(event, problem_hash)])[0]
@classmethod
def fetch_multi(
cls, items: Sequence[tuple[Event | GroupEvent, str]]
) -> list[EventPerformanceProblem | None]:
ids = [cls.build_identifier(event.event_id, problem_hash) for event, problem_hash in items]
results = nodestore.backend.get_multi(ids)
ret: list[EventPerformanceProblem | None] = []
for _id, (event, _) in zip(ids, items):
result = results.get(_id)
if result:
ret.append(cls(event, PerformanceProblem.from_dict(result)))
else:
ret.append(None)
return ret
# Facade in front of performance detection to limit impact of detection on our events ingestion
def detect_performance_problems(
data: dict[str, Any], project: Project, standalone: bool = False
) -> list[PerformanceProblem]:
try:
rate = options.get("performance.issues.all.problem-detection")
if rate and rate > random.random():
# Add an experimental tag to be able to find these spans in production while developing. Should be removed later.
sentry_sdk.set_tag("_did_analyze_performance_issue", "true")
with (
metrics.timer("performance.detect_performance_issue", sample_rate=0.01),
sentry_sdk.start_span(op="py.detect_performance_issue", name="none") as sdk_span,
):
return _detect_performance_problems(data, sdk_span, project, standalone=standalone)
except Exception:
logging.exception("Failed to detect performance problems")
return []
# Merges system defaults, with default project settings and saved project settings.
def get_merged_settings(project_id: int | None = None) -> dict[str | Any, Any]:
system_settings = {
"n_plus_one_db_count": options.get("performance.issues.n_plus_one_db.count_threshold"),
"n_plus_one_db_duration_threshold": options.get(
"performance.issues.n_plus_one_db.duration_threshold"
),
"slow_db_query_duration_threshold": options.get(
"performance.issues.slow_db_query.duration_threshold"
),
"render_blocking_fcp_min": options.get(
"performance.issues.render_blocking_assets.fcp_minimum_threshold"
),
"render_blocking_fcp_max": options.get(
"performance.issues.render_blocking_assets.fcp_maximum_threshold"
),
"render_blocking_fcp_ratio": options.get(
"performance.issues.render_blocking_assets.fcp_ratio_threshold"
),
"render_blocking_bytes_min": options.get(
"performance.issues.render_blocking_assets.size_threshold"
),
"consecutive_http_spans_max_duration_between_spans": options.get(
"performance.issues.consecutive_http.max_duration_between_spans"
),
"consecutive_http_spans_count_threshold": options.get(
"performance.issues.consecutive_http.consecutive_count_threshold"
),
"consecutive_http_spans_span_duration_threshold": options.get(
"performance.issues.consecutive_http.span_duration_threshold"
),
"consecutive_http_spans_min_time_saved_threshold": options.get(
"performance.issues.consecutive_http.min_time_saved_threshold"
),
"large_http_payload_size_threshold": options.get(
"performance.issues.large_http_payload.size_threshold"
),
"large_http_payload_filtered_paths": options.get(
"performance.issues.large_http_payload.filtered_paths"
),
"db_on_main_thread_duration_threshold": options.get(
"performance.issues.db_on_main_thread.total_spans_duration_threshold"
),
"file_io_on_main_thread_duration_threshold": options.get(
"performance.issues.file_io_on_main_thread.total_spans_duration_threshold"
),
"uncompressed_asset_duration_threshold": options.get(
"performance.issues.uncompressed_asset.duration_threshold"
),
"uncompressed_asset_size_threshold": options.get(
"performance.issues.uncompressed_asset.size_threshold"
),
"consecutive_db_min_time_saved_threshold": options.get(
"performance.issues.consecutive_db.min_time_saved_threshold"
),
"http_request_delay_threshold": options.get(
"performance.issues.http_overhead.http_request_delay_threshold"
),
"n_plus_one_api_calls_total_duration_threshold": options.get(
"performance.issues.n_plus_one_api_calls.total_duration"
),
"sql_injection_query_value_length_threshold": options.get(
"performance.issues.sql_injection.query_value_length_threshold"
),
"web_vitals_count": options.get("performance.issues.web_vitals.count_threshold"),
}
default_project_settings = (
projectoptions.get_well_known_default(
"sentry:performance_issue_settings",
project=project_id,
)
if project_id
else {}
)
project_option_settings = (
ProjectOption.objects.get_value(
project_id, "sentry:performance_issue_settings", default_project_settings
)
if project_id
else DEFAULT_PROJECT_PERFORMANCE_DETECTION_SETTINGS
)
project_settings = {
**default_project_settings,
**project_option_settings,
} # Merge saved project settings into default so updating the default to add new settings works in the future.
return {**system_settings, **project_settings}
# Gets the thresholds to perform performance detection.
# Duration thresholds are in milliseconds.
# Allowed span ops are allowed span prefixes. (eg. 'http' would work for a span with 'http.client' as its op)
def get_detection_settings(
project_id: int | None = None, organization: Organization | None = None
) -> dict[DetectorType, dict[str, Any]]:
settings = get_merged_settings(project_id)
return {
DetectorType.SLOW_DB_QUERY: {
"duration_threshold": settings["slow_db_query_duration_threshold"], # ms
"allowed_span_ops": ["db"],
"detection_enabled": settings["slow_db_queries_detection_enabled"],
},
DetectorType.RENDER_BLOCKING_ASSET_SPAN: {
"fcp_minimum_threshold": settings["render_blocking_fcp_min"], # ms
"fcp_maximum_threshold": settings["render_blocking_fcp_max"], # ms
"fcp_ratio_threshold": settings["render_blocking_fcp_ratio"], # in the range [0, 1]
"minimum_size_bytes": settings["render_blocking_bytes_min"], # in bytes
"maximum_size_bytes": 1_000_000_000, # 1GB
"detection_enabled": settings["large_render_blocking_asset_detection_enabled"],
},
DetectorType.N_PLUS_ONE_DB_QUERIES: {
"count": settings["n_plus_one_db_count"],
"duration_threshold": settings["n_plus_one_db_duration_threshold"], # ms
"detection_enabled": settings["n_plus_one_db_queries_detection_enabled"],
},
DetectorType.EXPERIMENTAL_N_PLUS_ONE_DB_QUERIES: {
"count": settings["n_plus_one_db_count"],
"duration_threshold": settings["n_plus_one_db_duration_threshold"], # ms
"detection_enabled": settings["n_plus_one_db_queries_detection_enabled"],
},
DetectorType.CONSECUTIVE_DB_OP: {
# time saved by running all queries in parallel
"min_time_saved": settings["consecutive_db_min_time_saved_threshold"], # ms
# ratio between time saved and total db span durations
"min_time_saved_ratio": 0.1,
# The minimum duration of a single independent span in ms, used to prevent scenarios with a ton of small spans
"span_duration_threshold": 30, # ms
"consecutive_count_threshold": 2,
"detection_enabled": settings["consecutive_db_queries_detection_enabled"],
},
DetectorType.FILE_IO_MAIN_THREAD: {
# 16ms is when frame drops will start being evident
"duration_threshold": settings["file_io_on_main_thread_duration_threshold"],
"detection_enabled": settings["file_io_on_main_thread_detection_enabled"],
},
DetectorType.DB_MAIN_THREAD: {
# Basically the same as file io, but db instead, so continue using 16ms
"duration_threshold": settings["db_on_main_thread_duration_threshold"],
"detection_enabled": settings["db_on_main_thread_detection_enabled"],
},
DetectorType.N_PLUS_ONE_API_CALLS: {
"total_duration": settings["n_plus_one_api_calls_total_duration_threshold"], # ms
"concurrency_threshold": 5, # ms
"count": 10,
"allowed_span_ops": ["http.client"],
"detection_enabled": settings["n_plus_one_api_calls_detection_enabled"],
},
DetectorType.EXPERIMENTAL_N_PLUS_ONE_API_CALLS: {
"total_duration": settings["n_plus_one_api_calls_total_duration_threshold"], # ms
"concurrency_threshold": 15, # ms
"count": 5,
"allowed_span_ops": ["http.client"],
"detection_enabled": settings["n_plus_one_api_calls_detection_enabled"],
},
DetectorType.M_N_PLUS_ONE_DB: {
"total_duration_threshold": settings["n_plus_one_db_duration_threshold"], # ms
"minimum_occurrences_of_pattern": 3,
"max_sequence_length": 5,
"detection_enabled": settings["n_plus_one_db_queries_detection_enabled"],
},
DetectorType.EXPERIMENTAL_M_N_PLUS_ONE_DB_QUERIES: {
"total_duration_threshold": settings["n_plus_one_db_duration_threshold"], # ms
"minimum_occurrences_of_pattern": 3,
"max_sequence_length": 8,
"max_allowable_depth": 3, # This should not be user-configurable, to avoid O(n^2) complexity and load issues.
"min_percentage_of_db_spans": 0.05,
"detection_enabled": settings["n_plus_one_db_queries_detection_enabled"],
},
DetectorType.UNCOMPRESSED_ASSETS: {
"size_threshold_bytes": settings["uncompressed_asset_size_threshold"],
"duration_threshold": settings["uncompressed_asset_duration_threshold"], # ms
"allowed_span_ops": ["resource.css", "resource.script"],
"detection_enabled": settings["uncompressed_assets_detection_enabled"],
},
DetectorType.CONSECUTIVE_HTTP_OP: {
"span_duration_threshold": settings[
"consecutive_http_spans_span_duration_threshold"
], # ms
"min_time_saved": settings["consecutive_http_spans_min_time_saved_threshold"], # ms
"consecutive_count_threshold": settings["consecutive_http_spans_count_threshold"],
"max_duration_between_spans": settings[
"consecutive_http_spans_max_duration_between_spans"
], # ms
"detection_enabled": settings["consecutive_http_spans_detection_enabled"],
},
DetectorType.LARGE_HTTP_PAYLOAD: {
"payload_size_threshold": settings["large_http_payload_size_threshold"],
"detection_enabled": settings["large_http_payload_detection_enabled"],
"minimum_span_duration": 100, # ms
"organization": organization,
"filtered_paths": settings["large_http_payload_filtered_paths"],
},
DetectorType.HTTP_OVERHEAD: {
"http_request_delay_threshold": settings["http_request_delay_threshold"],
"detection_enabled": settings["http_overhead_detection_enabled"],
},
DetectorType.SQL_INJECTION: {
"detection_enabled": settings["db_query_injection_detection_enabled"],
"query_value_length_threshold": settings["sql_injection_query_value_length_threshold"],
},
DetectorType.QUERY_INJECTION: {
"detection_enabled": settings["db_query_injection_detection_enabled"]
},
}
DETECTOR_CLASSES: list[type[PerformanceDetector]] = [
ConsecutiveDBSpanDetector,
ConsecutiveHTTPSpanDetector,
DBMainThreadDetector,
SlowDBQueryDetector,
RenderBlockingAssetSpanDetector,
NPlusOneDBSpanDetector,
NPlusOneDBSpanExperimentalDetector,
FileIOMainThreadDetector,
NPlusOneAPICallsDetector,
NPlusOneAPICallsExperimentalDetector,
MNPlusOneDBSpanDetector,
MNPlusOneDBSpanExperimentalDetector,
UncompressedAssetSpanDetector,
LargeHTTPPayloadDetector,
HTTPOverheadDetector,
SQLInjectionDetector,
QueryInjectionDetector,
]
def _detect_performance_problems(
data: dict[str, Any], sdk_span: Any, project: Project, standalone: bool = False
) -> list[PerformanceProblem]:
event_id = data.get("event_id", None)
organization = project.organization
with sentry_sdk.start_span(op="function", name="get_detection_settings"):
detection_settings = get_detection_settings(project.id, organization)
if standalone or features.has("organizations:issue-detection-sort-spans", organization):
# The performance detectors expect the span list to be ordered/flattened in the way they
# are structured in the tree. This is an implicit assumption in the performance detectors.
# So we build a tree and flatten it depth first.
# TODO: See if we can update the detectors to work without this assumption so we can
# just pass it a list of spans.
with sentry_sdk.start_span(op="performance_detection", name="sort_spans"):
tree, segment_id = build_tree(data.get("spans", []))
data = {**data, "spans": flatten_tree(tree, segment_id)}
with sentry_sdk.start_span(op="initialize", name="PerformanceDetector"):
detectors: list[PerformanceDetector] = [
detector_class(detection_settings, data)
for detector_class in DETECTOR_CLASSES
if detector_class.is_detection_allowed_for_system()
]
for detector in detectors:
with sentry_sdk.start_span(
op="function", name=f"run_detector_on_data.{detector.type.value}"
):
run_detector_on_data(detector, data)
with sentry_sdk.start_span(op="function", name="report_metrics_for_detectors"):
# Metrics reporting only for detection, not created issues.
report_metrics_for_detectors(
data,
event_id,
detectors,
sdk_span,
organization,
project,
standalone=standalone,
)
problems: list[PerformanceProblem] = []
with sentry_sdk.start_span(op="performance_detection", name="is_creation_allowed"):
for detector in detectors:
if all(
[
detector.is_creation_allowed_for_organization(organization),
detector.is_creation_allowed_for_project(project),
]
):
problems.extend(detector.stored_problems.values())
else:
continue
unique_problems = set(problems)
if len(unique_problems) > 0:
metrics.incr(
"performance.performance_issue.performance_problem_emitted",
len(unique_problems),
sample_rate=1.0,
)
# TODO: Make sure upstream is all compatible with set before switching output type.
return list(unique_problems)
def run_detector_on_data(detector: PerformanceDetector, data: dict[str, Any]) -> None:
if not detector.is_event_eligible(data):
return
spans = data.get("spans", [])
for span in spans:
detector.visit_span(span)
detector.on_complete()
def build_tree(spans: Sequence[dict[str, Any]]) -> tuple[dict[str, Any], str | None]:
span_tree: dict[str, tuple[dict[str, Any], list[dict[str, Any]]]] = {}
segment_id = None
for span in spans:
span_id = span["span_id"]
is_root = span.get("is_segment", False)
if is_root:
segment_id = span_id
if span_id not in span_tree:
span_tree[span_id] = (span, [])
for span, _ in span_tree.values():
parent_id = span.get("parent_span_id")
if parent_id is not None and parent_id in span_tree:
_, children = span_tree[parent_id]
children.append(span)
return span_tree, segment_id
def dfs(
visited: set[str], flattened_spans: list[dict[str, Any]], tree: dict[str, Any], span_id: str
) -> None:
stack = [span_id]
while len(stack):
span_id = stack.pop()
span, children = tree[span_id]
if span_id not in visited:
flattened_spans.append(span)
tree.pop(span_id)
visited.add(span_id)
for child in sorted(children, key=lambda span: span["start_timestamp"], reverse=True):
if child["span_id"] not in visited:
stack.append(child["span_id"])
def flatten_tree(tree: dict[str, Any], segment_id: str | None) -> list[dict[str, Any]]:
visited: set[str] = set()
flattened_spans: list[dict[str, Any]] = []
if segment_id:
dfs(visited, flattened_spans, tree, segment_id)
# Catch all for orphan spans
remaining = sorted(tree.items(), key=lambda span: span[1][0]["start_timestamp"])
for span_id, _ in remaining:
if span_id not in visited:
dfs(visited, flattened_spans, tree, span_id)
return flattened_spans
# Reports metrics and creates spans for detection
def report_metrics_for_detectors(
event: dict[str, Any],
event_id: str | None,
detectors: Sequence[PerformanceDetector],
sdk_span: Any,
organization: Organization,
project: Project,
standalone: bool = False,
) -> None:
all_detected_problems = [i for d in detectors for i in d.stored_problems]
has_detected_problems = bool(all_detected_problems)
sdk_name = get_sdk_name(event)
try:
# Setting a tag isn't critical, the transaction doesn't exist sometimes, if it's called outside prod code (eg. load-mocks / tests)
set_tag = sdk_span.containing_transaction.set_tag
except AttributeError:
set_tag = lambda *args: None
if has_detected_problems:
set_tag("_pi_all_issue_count", len(all_detected_problems))
set_tag("_pi_sdk_name", sdk_name or "")
set_tag("is_standalone_spans", standalone)
metrics.incr(
"performance.performance_issue.aggregate",
len(all_detected_problems),
tags={"sdk_name": sdk_name, "is_standalone_spans": standalone},
)
if event_id:
set_tag("_pi_transaction", event_id)
tags = event.get("tags", [])
browser_name = next(
(tag[1] for tag in tags if tag is not None and tag[0] == "browser.name" and len(tag) == 2),
None,
)
allowed_browser_name = "Other"
if browser_name in [
"Chrome",
"Firefox",
"Safari",
"Electron",
"Chrome Mobile",
"Edge",
"Mobile Safari",
"Opera",
"Opera Mobile",
"Chrome Mobile WebView",
"Chrome Mobile iOS",
"Samsung Internet",
"Firefox Mobile",
]:
# Reduce cardinality in case there are custom browser name tags.
allowed_browser_name = browser_name
detected_tags = {
"sdk_name": sdk_name,
"is_early_adopter": bool(organization.flags.early_adopter),
"is_standalone_spans": standalone,
}
event_integrations = event.get("sdk", {}).get("integrations", []) or []
for integration_name in INTEGRATIONS_OF_INTEREST:
if integration_name in event_integrations:
detected_tags["integration_" + integration_name.lower()] = True
for allowed_sdk_name in SDKS_OF_INTEREST:
if allowed_sdk_name == sdk_name:
detected_tags["sdk_" + allowed_sdk_name.lower()] = True
for detector in detectors:
detector_key = detector.type.value
detected_problems = detector.stored_problems
detected_problem_keys = list(detected_problems.keys())
detected_tags[detector_key] = bool(len(detected_problem_keys))
if not detected_problem_keys:
continue
if detector.type in [DetectorType.UNCOMPRESSED_ASSETS]:
detected_tags["browser_name"] = allowed_browser_name
if detector.type in [DetectorType.CONSECUTIVE_HTTP_OP]:
detected_tags["is_frontend"] = is_event_from_browser_javascript_sdk(event)
first_problem = detected_problems[detected_problem_keys[0]]
if first_problem.fingerprint:
set_tag(f"_pi_{detector_key}_fp", first_problem.fingerprint)
span_id = first_problem.offender_span_ids[0]
set_tag(f"_pi_{detector_key}", span_id)
op_tags = {
"is_standalone_spans": standalone,
"is_creation_allowed": all(
[
detector.is_creation_allowed_for_organization(organization),
detector.is_creation_allowed_for_project(project),
]
),
}
for problem in detected_problems.values():
op = problem.op
op_tags[f"op_{op}"] = True
metrics.incr(
f"performance.performance_issue.{detector_key}",
len(detected_problem_keys),
tags=op_tags,
)
metrics.incr(
"performance.performance_issue.detected",
instance=str(has_detected_problems),
tags=detected_tags,
)
| EventPerformanceProblem |
python | dagster-io__dagster | python_modules/automation/automation/parse_spark_configs.py | {
"start": 508,
"end": 6181
} | class ____(Enum):
STRING = "StringSource"
INT = "IntSource"
FLOAT = "Float"
BOOL = "Bool"
MEMORY = "StringSource" # TODO: We should handle memory field types
TIME = "StringSource" # TODO: We should handle time field types
CONFIG_TYPES = {
#
# APPLICATION PROPERTIES
"spark.app.name": ConfigType.STRING,
"spark.driver.cores": ConfigType.INT,
"spark.driver.maxResultSize": ConfigType.MEMORY,
"spark.driver.memory": ConfigType.MEMORY,
"spark.driver.memoryOverhead": ConfigType.MEMORY,
"spark.executor.memory": ConfigType.MEMORY,
"spark.executor.pyspark.memory": ConfigType.MEMORY,
"spark.executor.memoryOverhead": ConfigType.MEMORY,
"spark.extraListeners": ConfigType.STRING,
"spark.local.dir": ConfigType.STRING,
"spark.logConf": ConfigType.BOOL,
# TODO: Validate against https://spark.apache.org/docs/latest/submitting-applications.html#master-urls
"spark.master": ConfigType.STRING,
# TODO: Validate against client/cluster *only*.
"spark.submit.deployMode": ConfigType.STRING,
"spark.log.callerContext": ConfigType.STRING,
"spark.driver.supervise": ConfigType.BOOL,
#
# RUNTIME ENVIRONMENT
"spark.driver.extraClassPath": ConfigType.STRING,
"spark.driver.extraJavaOptions": ConfigType.STRING,
"spark.driver.extraLibraryPath": ConfigType.STRING,
"spark.driver.userClassPathFirst": ConfigType.BOOL,
"spark.executor.extraClassPath": ConfigType.STRING,
"spark.executor.extraJavaOptions": ConfigType.STRING,
"spark.executor.extraLibraryPath": ConfigType.STRING,
"spark.executor.logs.rolling.maxRetainedFiles": ConfigType.INT,
"spark.executor.logs.rolling.enableCompression": ConfigType.BOOL,
"spark.executor.logs.rolling.maxSize": ConfigType.INT,
# TODO: Can only be 'time' or 'size'
"spark.executor.logs.rolling.strategy": ConfigType.STRING,
"spark.executor.logs.rolling.time.interval": ConfigType.STRING,
"spark.executor.userClassPathFirst": ConfigType.BOOL,
"spark.redaction.regex": ConfigType.STRING,
"spark.python.profile": ConfigType.BOOL,
# TODO: Should be a path?
"spark.python.profile.dump": ConfigType.STRING,
"spark.python.worker.memory": ConfigType.MEMORY,
"spark.python.worker.reuse": ConfigType.BOOL,
"spark.files": ConfigType.STRING,
"spark.submit.pyFiles": ConfigType.STRING,
"spark.jars": ConfigType.STRING,
"spark.jars.packages": ConfigType.STRING,
"spark.jars.excludes": ConfigType.STRING,
"spark.jars.ivy": ConfigType.STRING,
"spark.jars.ivySettings": ConfigType.STRING,
"spark.jars.repositories": ConfigType.STRING,
"spark.pyspark.driver.python": ConfigType.STRING,
"spark.pyspark.python": ConfigType.STRING,
#
# SHUFFLE BEHAVIOR
"spark.reducer.maxSizeInFlight": ConfigType.MEMORY,
"spark.reducer.maxReqsInFlight": ConfigType.INT,
"spark.reducer.maxBlocksInFlightPerAddress": ConfigType.INT,
"spark.maxRemoteBlockSizeFetchToMem": ConfigType.INT,
"spark.shuffle.compress": ConfigType.BOOL,
"spark.shuffle.file.buffer": ConfigType.MEMORY,
"spark.shuffle.io.maxRetries": ConfigType.INT,
"spark.shuffle.io.numConnectionsPerPeer": ConfigType.INT,
"spark.shuffle.io.preferDirectBufs": ConfigType.BOOL,
"spark.shuffle.io.retryWait": ConfigType.TIME,
"spark.shuffle.service.enabled": ConfigType.BOOL,
"spark.shuffle.service.port": ConfigType.INT,
"spark.shuffle.service.index.cache.size": ConfigType.MEMORY,
"spark.shuffle.maxChunksBeingTransferred": ConfigType.INT,
"spark.shuffle.sort.bypassMergeThreshold": ConfigType.INT,
"spark.shuffle.spill.compress": ConfigType.BOOL,
"spark.shuffle.accurateBlockThreshold": ConfigType.INT,
"spark.shuffle.registration.timeout": ConfigType.INT,
"spark.shuffle.registration.maxAttempts": ConfigType.INT,
#
# SPARK UI
### TODO
#
# COMPRESSION AND SERIALIZATION
### TODO
#
# MEMORY MANAGEMENT
"spark.memory.fraction": ConfigType.FLOAT,
"spark.memory.storageFraction": ConfigType.FLOAT,
"spark.memory.offHeap.enabled": ConfigType.BOOL,
"spark.memory.offHeap.size": ConfigType.INT,
"spark.memory.useLegacyMode": ConfigType.BOOL,
"spark.shuffle.memoryFraction": ConfigType.FLOAT,
"spark.storage.memoryFraction": ConfigType.FLOAT,
"spark.storage.unrollFraction": ConfigType.FLOAT,
"spark.storage.replication.proactive": ConfigType.BOOL,
"spark.cleaner.periodicGC.interval": ConfigType.TIME,
"spark.cleaner.referenceTracking": ConfigType.BOOL,
"spark.cleaner.referenceTracking.blocking": ConfigType.BOOL,
"spark.cleaner.referenceTracking.blocking.shuffle": ConfigType.BOOL,
"spark.cleaner.referenceTracking.cleanCheckpoints": ConfigType.BOOL,
#
# EXECUTION BEHAVIOR
"spark.broadcast.blockSize": ConfigType.MEMORY,
"spark.executor.cores": ConfigType.INT,
"spark.default.parallelism": ConfigType.INT,
"spark.executor.heartbeatInterval": ConfigType.TIME,
"spark.files.fetchTimeout": ConfigType.TIME,
"spark.files.useFetchCache": ConfigType.BOOL,
"spark.files.overwrite": ConfigType.BOOL,
"spark.files.maxPartitionBytes": ConfigType.INT,
"spark.files.openCostInBytes": ConfigType.INT,
"spark.hadoop.cloneConf": ConfigType.BOOL,
"spark.hadoop.validateOutputSpecs": ConfigType.BOOL,
"spark.storage.memoryMapThreshold": ConfigType.MEMORY,
# TODO: Can only be 1 or 2.
"spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version": ConfigType.INT,
#
# NETWORKING
### TODO
#
# SCHEDULING
### TODO
#
# DYNAMIC ALLOCATION
### TODO
}
| ConfigType |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/ecs.py | {
"start": 1935,
"end": 3441
} | class ____(EcsBaseSensor):
"""
Poll the cluster state until it reaches a terminal state; raises AirflowException with the failure reason.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/sensor:EcsClusterStateSensor`
:param cluster_name: The name of your cluster.
:param target_state: Success state to watch for. (Default: "ACTIVE")
:param failure_states: Fail if any of these states are reached before the
Success State. (Default: "FAILED" or "INACTIVE")
"""
template_fields: Sequence[str] = aws_template_fields("cluster_name", "target_state", "failure_states")
def __init__(
self,
*,
cluster_name: str,
target_state: EcsClusterStates | None = EcsClusterStates.ACTIVE,
failure_states: set[EcsClusterStates] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.cluster_name = cluster_name
self.target_state = target_state
self.failure_states = failure_states or {EcsClusterStates.FAILED, EcsClusterStates.INACTIVE}
def poke(self, context: Context):
cluster_state = EcsClusterStates(self.hook.get_cluster_state(cluster_name=self.cluster_name))
self.log.info("Cluster state: %s, waiting for: %s", cluster_state, self.target_state)
_check_failed(cluster_state, self.target_state, self.failure_states)
return cluster_state == self.target_state
| EcsClusterStateSensor |
python | scrapy__scrapy | tests/pipelines.py | {
"start": 42,
"end": 173
} | class ____:
def open_spider(self):
1 / 0
def process_item(self, item):
return item
| ZeroDivisionErrorPipeline |
python | ray-project__ray | rllib/examples/_old_api_stack/models/mobilenet_v2_with_lstm_models.py | {
"start": 452,
"end": 3177
} | class ____(RecurrentNetwork):
"""A conv. + recurrent keras net example using a pre-trained MobileNet."""
def __init__(
self, obs_space, action_space, num_outputs, model_config, name, cnn_shape
):
super(MobileV2PlusRNNModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
self.cell_size = 16
visual_size = cnn_shape[0] * cnn_shape[1] * cnn_shape[2]
state_in_h = tf.keras.layers.Input(shape=(self.cell_size,), name="h")
state_in_c = tf.keras.layers.Input(shape=(self.cell_size,), name="c")
seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)
inputs = tf.keras.layers.Input(shape=(None, visual_size), name="visual_inputs")
input_visual = inputs
input_visual = tf.reshape(
input_visual, [-1, cnn_shape[0], cnn_shape[1], cnn_shape[2]]
)
cnn_input = tf.keras.layers.Input(shape=cnn_shape, name="cnn_input")
cnn_model = tf.keras.applications.mobilenet_v2.MobileNetV2(
alpha=1.0,
include_top=True,
weights=None,
input_tensor=cnn_input,
pooling=None,
)
vision_out = cnn_model(input_visual)
vision_out = tf.reshape(
vision_out, [-1, tf.shape(inputs)[1], vision_out.shape.as_list()[-1]]
)
lstm_out, state_h, state_c = tf.keras.layers.LSTM(
self.cell_size, return_sequences=True, return_state=True, name="lstm"
)(
inputs=vision_out,
mask=tf.sequence_mask(seq_in),
initial_state=[state_in_h, state_in_c],
)
# Postprocess LSTM output with another hidden layer and compute values.
logits = tf.keras.layers.Dense(
self.num_outputs, activation=tf.keras.activations.linear, name="logits"
)(lstm_out)
values = tf.keras.layers.Dense(1, activation=None, name="values")(lstm_out)
# Create the RNN model
self.rnn_model = tf.keras.Model(
inputs=[inputs, seq_in, state_in_h, state_in_c],
outputs=[logits, values, state_h, state_c],
)
self.rnn_model.summary()
@override(RecurrentNetwork)
def forward_rnn(self, inputs, state, seq_lens):
model_out, self._value_out, h, c = self.rnn_model([inputs, seq_lens] + state)
return model_out, [h, c]
@override(ModelV2)
def get_initial_state(self):
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
| MobileV2PlusRNNModel |
python | ansible__ansible | test/units/cli/test_console.py | {
"start": 832,
"end": 1723
} | class ____(unittest.TestCase):
def test_parse(self):
cli = ConsoleCLI(['ansible test'])
cli.parse()
self.assertTrue(cli.parser is not None)
def test_module_args(self):
cli = ConsoleCLI(['ansible test'])
cli.parse()
res = cli.module_args('copy')
self.assertTrue(cli.parser is not None)
self.assertIn('src', res)
self.assertIn('backup', res)
self.assertIsInstance(res, list)
@patch('ansible.utils.display.Display.display')
def test_helpdefault(self, mock_display):
cli = ConsoleCLI(['ansible test'])
cli.parse()
cli.modules = set(['copy'])
cli.helpdefault('copy')
self.assertTrue(cli.parser is not None)
self.assertTrue(len(mock_display.call_args_list) > 0,
"display.display should have been called but was not")
| TestConsoleCLI |
python | tensorflow__tensorflow | tensorflow/python/data/ops/dataset_ops.py | {
"start": 185608,
"end": 185740
} | class ____(DatasetV2):
"""Abstract class representing a dataset with no inputs."""
def _inputs(self):
return []
| DatasetSource |
python | sympy__sympy | sympy/vector/vector.py | {
"start": 15412,
"end": 15684
} | class ____(BasisDependentZero, Vector):
"""
Class to denote a zero vector
"""
_op_priority = 12.1
_pretty_form = '0'
_latex_form = r'\mathbf{\hat{0}}'
def __new__(cls):
obj = BasisDependentZero.__new__(cls)
return obj
| VectorZero |
python | django__django | tests/aggregation/models.py | {
"start": 285,
"end": 509
} | class ____(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
duration = models.DurationField(blank=True, null=True)
def __str__(self):
return self.name
| Publisher |
python | getsentry__sentry | src/sentry/dynamic_sampling/types.py | {
"start": 109,
"end": 343
} | class ____(models.TextChoices):
"""Defines the scope where target sample rates are configured in an
organization."""
ORGANIZATION = "organization", _("Organization")
PROJECT = "project", _("Project")
| DynamicSamplingMode |
python | pytorch__pytorch | test/export/test_converter.py | {
"start": 670,
"end": 51147
} | class ____(TestCase):
def setUp(self):
init_torchbind_implementations()
self.torch_bind_ops = [
torch.ops._TorchScriptTesting.queue_pop,
torch.ops._TorchScriptTesting.queue_push,
torch.ops._TorchScriptTesting.queue_size,
]
def tearDown(self):
return
def _check_equal_ts_ep_converter(
self,
M,
tracing_inputs,
option: Optional[list[str]] = None,
check_persistent=False,
lifted_tensor_constants=None,
runtime_inputs: Optional[list[Any]] = None,
) -> list[ExportedProgram]:
# By default, it tests both jit.trace and jit.script.
if option is None:
option = ["trace", "script"]
if check_persistent:
num_iterations = 10
else:
num_iterations = 1
ep_list = []
for opt in option:
if opt == "script":
# Separate two models for testing non-functional effects
if check_persistent:
original_ts_model = torch.jit.script(M())
ts_model = torch.jit.script(M())
eager_model = M()
else:
original_ts_model = torch.jit.script(M)
ts_model = torch.jit.script(M)
eager_model = M
elif opt == "trace":
if check_persistent:
original_ts_model = torch.jit.trace(M(), tracing_inputs)
ts_model = torch.jit.trace(M(), tracing_inputs)
eager_model = M()
else:
original_ts_model = torch.jit.trace(M, tracing_inputs)
ts_model = torch.jit.trace(M, tracing_inputs)
eager_model = M
else:
raise RuntimeError(f"Unrecognized mode for torch.jit: {opt}")
converter = TS2EPConverter(ts_model, tracing_inputs)
ep = converter.convert()
ep_list.append(ep)
if runtime_inputs is None:
runtime_inputs = []
for inp in [tracing_inputs] + runtime_inputs:
for _ in range(num_iterations):
orig_out, _ = pytree.tree_flatten(original_ts_model(*inp))
ep_out, _ = pytree.tree_flatten(ep.module()(*inp))
# Check module.
if isinstance(eager_model, torch.nn.Module):
expected_state_dict = OrderedDict()
expected_state_dict.update(ts_model.state_dict())
if lifted_tensor_constants:
expected_state_dict.update(lifted_tensor_constants)
self.assertEqual(
ep.state_dict.keys(),
expected_state_dict.keys(),
)
# Check results
self._check_tensor_list_equal(ep_out, orig_out)
return ep_list
def _check_tensor_list_equal(self, xs: list[torch.Tensor], ys: list[torch.Tensor]):
self.assertEqual(len(xs), len(ys))
for x, y in zip(xs, ys):
if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
self.assertEqual(x.shape, y.shape)
self.assertTrue(torch.allclose(x, y))
else:
self.assertEqual(type(x), type(y))
self.assertEqual(x, y)
def test_ts2ep_converter_basic(self):
class MSingle(torch.nn.Module):
def forward(self, x, y):
return x + y
class MMulti(torch.nn.Module):
def forward(self, x, y):
x = x.cos() + 1
y = y.sin() - 1
return x, y
inp = (torch.ones(1, 3), torch.ones(1, 3))
runtime_inps = [
(torch.ones(1, 4), torch.ones(1, 4)),
(torch.ones(1, 5), torch.ones(1, 5)),
]
self._check_equal_ts_ep_converter(MSingle(), inp, runtime_inputs=runtime_inps)
self._check_equal_ts_ep_converter(MMulti(), inp, runtime_inputs=runtime_inps)
def test_ts2ep_converter_container_output(self):
# Output is a List.
class MOutputList(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor):
a = x * x
b = y + y
return [a, b]
# Output is a Tuple.
class MOutputTuple(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor):
a = x * x
b = y + y
return (a, b)
# Output is a Dict.
class MOutputDict(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor):
a = x * x
b = y + y
return {"data": {"mul": a, "add": b}}
inp = (torch.tensor(4), torch.tensor(4))
runtime_inputs = [
(torch.tensor(5), torch.tensor(5)),
(torch.tensor(1), torch.tensor(1)),
]
# Traced function must use immutable structure as output.
self._check_equal_ts_ep_converter(
MOutputList(), inp, ["script"], runtime_inputs=runtime_inputs
)
self._check_equal_ts_ep_converter(
MOutputTuple(), inp, runtime_inputs=runtime_inputs
)
self._check_equal_ts_ep_converter(
MOutputDict(), inp, ["script"], runtime_inputs=runtime_inputs
)
def test_aten_dim(self):
class Module(torch.nn.Module):
def forward(self, x):
num_dim = x.dim()
return torch.ones(num_dim)
inp = (torch.ones(1, 3),)
self._check_equal_ts_ep_converter(
Module(), inp, runtime_inputs=[(torch.ones(1, 5),)]
)
def test_aten_len(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor):
length = len(x)
return torch.ones(length)
# aten::len.Tensor
inp = (torch.ones(2, 3),)
self._check_equal_ts_ep_converter(Module(), inp)
class Module(torch.nn.Module):
def forward(self, x: list[int]):
length = len(x)
return torch.ones(length)
# aten::len.t
inp = ([1, 2, 3],)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
class Module(torch.nn.Module):
def forward(self, x: dict[int, str]):
length = len(x)
return torch.ones(length)
# aten::len.Dict_int
inp = ({1: "a", 2: "b", 3: "c"},)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
class Module(torch.nn.Module):
def forward(self, x: dict[bool, str]):
length = len(x)
return torch.ones(length)
# aten::len.Dict_bool
inp = ({True: "a", False: "b"},)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
class Module(torch.nn.Module):
def forward(self, x: dict[float, str]):
length = len(x)
return torch.ones(length)
# aten::len.Dict_float
inp = ({1.2: "a", 3.4: "b"},)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
class Module(torch.nn.Module):
def forward(self, x: dict[torch.Tensor, str]):
length = len(x)
return torch.ones(length)
# aten::len.Dict_Tensor
inp = ({torch.zeros(2, 3): "a", torch.ones(2, 3): "b"},)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
# aten::len.str and aten::len.Dict_str are not supported
# since torch._C._jit_flatten does not support str
# inp = ("abcdefg",)
# self._check_equal_ts_ep_converter(Module(), inp)
# inp = ({"a": 1, "b": 2},)
# self._check_equal_ts_ep_converter(Module(), inp)
def test_aten_add_t(self):
# python list append
class Module(torch.nn.Module):
def forward(self, x: list[torch.Tensor]):
out = []
out = out + x
a = torch.cat(out)
out = out + x
b = torch.cat(out)
return a, b
inp = ([torch.ones(2, 3), torch.ones(2, 3)],)
runtime_inputs = [
([torch.ones(4, 6), torch.ones(8, 6)],),
([torch.ones(4, 4), torch.ones(4, 4)],),
]
self._check_equal_ts_ep_converter(
Module(), inp, ["script"], runtime_inputs=runtime_inputs
)
def test_aten_to_dtype_with_mutating_storage(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = x.to(y.dtype)
torch.ops.aten.index_put_(x, [torch.tensor([0])], y)
return x
inp = (torch.ones(2, 3), torch.tensor([0, 0, 0]))
self._check_equal_ts_ep_converter(Module(), inp)
def test_prim_min(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
x_len = len(x)
y_len = len(y)
# prim::min.int
len_int = min(x_len, y_len)
# prim::min.float
len_float = int(min(x_len * 2.0, y_len * 2.0))
# prim::min.self_int
len_self_int = min([x_len, y_len])
# prim::min.self_float
len_self_float = int(min([x_len * 2.0, y_len * 2.0]))
# prim::min.float_int
len_float_int = int(min(x_len * 2.0, y_len))
# prim::min.int_float
len_int_float = int(min(x_len, y_len * 2.0))
return torch.ones(
len_int
+ len_float
+ len_self_int
+ len_self_float
+ len_float_int
+ len_int_float
)
inp = (torch.randn(10, 2), torch.randn(5))
self._check_equal_ts_ep_converter(Module(), inp)
def test_prim_max(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
x_len = len(x)
y_len = len(y)
# prim::max.int
len_int = max(x_len, y_len)
# prim::max.float
len_float = int(max(x_len * 2.0, y_len * 2.0))
# prim::max.self_int
len_self_int = max([x_len, y_len])
# prim::max.self_float
len_self_float = int(max([x_len * 2.0, y_len * 2.0]))
# prim::max.float_int
len_float_int = int(max(x_len * 2.0, y_len))
# prim::max.int_float
len_int_float = int(max(x_len, y_len * 2.0))
return torch.ones(
len_int
+ len_float
+ len_self_int
+ len_self_float
+ len_float_int
+ len_int_float
)
inp = (torch.randn(10, 2), torch.randn(5))
self._check_equal_ts_ep_converter(Module(), inp)
def test_aten___getitem___list(self):
class Module(torch.nn.Module):
def forward(self, x):
y = torch.split(x, 2)
return y[0]
inp = (torch.rand((3, 2)),)
runtime_inps = [(torch.rand((3, 8)),)]
self._check_equal_ts_ep_converter(Module(), inp, runtime_inputs=runtime_inps)
def test_aten___getitem___dict(self):
class Module(torch.nn.Module):
def forward(self, x):
y = torch.split(x, 2)
d_int = {0: y[0], 1: y[1]}
d_str = {"0": y[0], "1": y[1]}
d_bool = {True: y[0], False: y[1]}
d_float = {0.1: y[0], 2.3: y[1]}
return d_int[0], d_str["0"], d_bool[True], d_float[0.1]
inp = (torch.rand((3, 2)),)
self._check_equal_ts_ep_converter(Module(), inp)
def test_prim_device(self):
class Module(torch.nn.Module):
def forward(self, x):
device = x.device
return torch.ones(2, 3, device=device)
inp = (torch.rand(3, 4),)
self._check_equal_ts_ep_converter(Module(), inp)
@requires_cuda
def test_prim_device_cuda(self):
class Module(torch.nn.Module):
def forward(self, x):
device = x.device
return torch.ones(2, 3, device=device)
inp = (torch.rand((3, 4), device="cuda:0"),)
self._check_equal_ts_ep_converter(Module(), inp)
def test_prim_dtype(self):
class Module(torch.nn.Module):
def forward(self, x):
dtype = x.dtype
return torch.ones(2, 3, dtype=dtype)
for dtype in [
torch.float32,
torch.double,
]:
inp = (torch.rand((3, 4), dtype=dtype),)
self._check_equal_ts_ep_converter(Module(), inp)
for dtype in [
torch.uint8,
torch.int8,
torch.int32,
]:
inp = (torch.randint(high=128, size=(3, 4), dtype=dtype),)
self._check_equal_ts_ep_converter(Module(), inp)
def test_convert_if_basic(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor):
if x:
return y * y
else:
return y + y
inp = (torch.tensor(True), torch.tensor(4))
ep_list = self._check_equal_ts_ep_converter(M(), inp)
for ep in ep_list[1:]:
torch.testing.assert_close(
ep.module()(torch.tensor(False), torch.tensor(4)),
M()(torch.tensor(False), torch.tensor(4)),
)
def test_convert_if_tuple_out(self):
class M(torch.nn.Module):
def true_fn(self, y, z):
return (z * z, z + z)
def false_fn(self, y, z):
return (y * y * y, y + y)
def forward(self, x: torch.Tensor, y: torch.Tensor):
z = y * y
if x:
res = self.true_fn(y, z)
else:
res = self.false_fn(y, z)
return res[0] + res[1]
inp = (torch.tensor(True), torch.tensor(4))
ep_list = self._check_equal_ts_ep_converter(M(), inp)
for ep in ep_list[1:]:
torch.testing.assert_close(
ep.module()(torch.tensor(False), torch.tensor(4)),
M()(torch.tensor(False), torch.tensor(4)),
)
def test_convert_if_multiple_out(self):
class M(torch.nn.Module):
def true_fn(self, y, z):
return z * z
def false_fn(self, y, z):
return y * y * y
def forward(self, x: torch.Tensor, y: torch.Tensor):
z = y * y
if x:
res1 = self.true_fn(y, z)
res2 = y
else:
res1 = z
res2 = self.false_fn(y, z)
return res1 + res2
inp = (torch.tensor(True), torch.tensor(4))
ep_list = self._check_equal_ts_ep_converter(M(), inp)
for ep in ep_list[1:]:
torch.testing.assert_close(
ep.module()(torch.tensor(False), torch.tensor(4)),
M()(torch.tensor(False), torch.tensor(4)),
)
def test_profiler__record_function(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
handle = torch.ops.profiler._record_function_enter_new("foo", None)
y = x * 2 + 4
torch.ops.profiler._record_function_exit(handle)
return y
x = torch.randn(10, 10)
self._check_equal_ts_ep_converter(Module(), (x,))
def test_aten_floordiv(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x // 2
x = torch.randn(10, 10)
self._check_equal_ts_ep_converter(Module(), (x,))
def test_aten___is__(self):
class Module(torch.nn.Module):
def forward(
self, x: torch.Tensor, y: torch.Tensor
) -> tuple[bool, torch.Tensor]:
z = x + 1
return x is y, z
# Traced function must return output that has tensors.
inp = (torch.randn(10, 10), torch.rand(10, 10))
runtime_inps = [(torch.randn(20, 2), torch.rand(20, 2))]
self._check_equal_ts_ep_converter(
Module(), inp, ["script"], runtime_inputs=runtime_inps
)
def test_aten___isnot__(self):
class Module(torch.nn.Module):
def forward(
self, x: torch.Tensor, y: torch.Tensor
) -> tuple[bool, torch.Tensor]:
z = x + 1
return x is not y, z
# Traced function must return output that has tensors.
inp = (torch.randn(10, 10), torch.rand(10, 10))
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
def test_aten___not__(self):
class Module(torch.nn.Module):
def forward(
self, x: torch.Tensor, y: torch.Tensor
) -> tuple[bool, torch.Tensor]:
z = x + 1
return not (x is not y), z
# Traced function must return output that has tensors.
inp = (torch.randn(10, 10), torch.rand(10, 10))
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
def test_ts2ep_converter_unpack(self):
class MUnpackList(torch.nn.Module):
def forward(self, x):
x, y = torch.split(x, 2)
return x + y
class MUnpackTuple(torch.nn.Module):
def forward(self, x_tuple: tuple[torch.Tensor, torch.Tensor]):
x, y = x_tuple
x = x.cos()
return x + y
inp = (torch.ones(4),)
self._check_equal_ts_ep_converter(MUnpackList(), inp)
inp = ((torch.zeros(1, 4), torch.ones(1, 4)),)
self._check_equal_ts_ep_converter(MUnpackTuple(), inp)
@unittest.skipIf(
IS_WINDOWS,
"torch.cond doesn't go through torch.compile on windows"
"causing output not normalized as list",
)
def test_convert_retrace_nested_scripted_modules(self):
class Wrapper(torch.nn.Module):
def __init__(self, mod) -> None:
super().__init__()
self.mod = mod
def forward(self, x, y):
return self.mod(x, y)
class LinearM(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.linear = torch.nn.Linear(dim, dim)
def forward(self, x, y):
return self.linear(y)
class M(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
m = LinearM(dim)
m = torch.jit.script(m)
self.mod1 = m
self.mod2 = Wrapper(m)
def forward(self, x: torch.Tensor, y: torch.Tensor):
if x:
return -self.mod1(x, y) - self.mod2(x, y)
else:
return -self.mod1(x, y) + self.mod2(x, y)
class NestedM(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
m = M(dim)
m = torch.jit.script(m)
self.mod1 = m
self.mod2 = Wrapper(m)
def forward(self, x: torch.Tensor, y: torch.Tensor):
if x:
return self.mod1(x, y) + self.mod2(x, y)
else:
return self.mod1(x, y) - self.mod2(x, y)
inp = (
torch.tensor(True),
torch.randn([3, 3]),
)
self._check_equal_ts_ep_converter(NestedM(3), inp)
def test_convert_nn_module_with_nested_param(self):
class M(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.linear = torch.nn.Linear(dim, dim)
def forward(self, x: torch.Tensor):
return self.linear(x)
class NestedM(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.linear = torch.nn.Linear(dim, dim)
self.m = M(dim)
def forward(self, x: torch.Tensor):
return self.linear(self.m(x))
class SuperNestedM(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.linear = torch.nn.Linear(dim, dim)
self.m = NestedM(dim)
def forward(self, x: torch.Tensor):
return self.linear(self.m(x))
inp = (torch.ones(3),)
orig_m = NestedM(3)
self._check_equal_ts_ep_converter(orig_m, inp)
orig_m = SuperNestedM(3)
self._check_equal_ts_ep_converter(orig_m, inp)
def test_convert_nn_module_with_nested_buffer(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.nn.Buffer(torch.randn(1))
def forward(self, x: torch.Tensor):
return self.w + x
class NestedM(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m = M()
self.w = torch.nn.Buffer(torch.randn(1))
def forward(self, x: torch.Tensor):
return self.w + self.m(x)
class SuperNestedM(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m = NestedM()
self.w = torch.nn.Buffer(torch.randn(1))
def forward(self, x: torch.Tensor):
return self.w + self.m(x)
inp = (torch.ones(1),)
orig_m = NestedM()
self._check_equal_ts_ep_converter(orig_m, inp)
orig_m = SuperNestedM()
self._check_equal_ts_ep_converter(orig_m, inp)
def test_convert_nn_module_with_nested_if_and_buffer(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.nn.Buffer(torch.randn(1))
self.count = 1
def forward(self, x: torch.Tensor):
return self.w + x + self.count
class NestedM(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m1 = M()
self.m2 = M()
self.w = torch.nn.Buffer(torch.randn(1))
def forward(self, x: torch.Tensor):
if torch.sum(x) > 1:
return self.w + self.m1(x)
else:
return self.w + self.m2(x)
# Super nested, parameters need to be lifted
# multiple times.
class SuperNestedM(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m1 = NestedM()
self.m2 = NestedM()
self.w = torch.nn.Buffer(torch.randn(1))
def forward(self, x: torch.Tensor):
if torch.max(x) > 1:
return self.w + self.m1(x)
else:
return self.w + self.m2(x)
# Super nested module testing.
inp = (torch.ones(1),)
orig_m = SuperNestedM()
ep_list = self._check_equal_ts_ep_converter(orig_m, inp)
t = inp[0]
t -= 1
for ep in ep_list:
torch.testing.assert_close(
ep.module()(*inp),
orig_m(*inp),
)
@unittest.skipIf(
IS_WINDOWS,
"torch.cond doesn't go through torch.compile on windows"
"causing output not normalized as list",
)
def test_convert_nn_module_with_nested_if_and_param(self):
class M(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.linear = torch.nn.Linear(dim, dim)
def forward(self, x: torch.Tensor):
return self.linear(x)
class NestedM(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.m1 = M(dim)
self.m2 = M(dim)
self.linear = torch.nn.Linear(dim, dim)
def forward(self, x: torch.Tensor):
if torch.sum(x) > 1:
return self.linear(self.m1(x))
else:
return self.linear(self.m2(x))
# Super nested, parameters need to be lifted
# multiple times.
class SuperNestedM1(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.m1 = NestedM(dim)
self.m2 = NestedM(dim)
self.linear = torch.nn.Linear(dim, dim)
def forward(self, x: torch.Tensor):
if torch.max(x) > 1:
return self.linear(self.m1(x))
else:
return self.linear(self.m2(x))
# Super nested, even the input needs to be
# lifted recursively due to value propagation optimization.
class SuperNestedM2(torch.nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.m1 = NestedM(dim)
self.m2 = NestedM(dim)
self.linear = torch.nn.Linear(dim, dim)
def forward(self, x: torch.Tensor):
if torch.sum(x) > 1:
return self.linear(self.m1(x))
else:
return self.linear(self.m2(x))
# Basic module testing.
inp = (torch.ones(3),)
orig_m = M(3)
ep_list = self._check_equal_ts_ep_converter(orig_m, inp)
t = inp[0]
t -= 0.8
for ep in ep_list[1:]:
torch.testing.assert_close(
ep.module()(*inp),
orig_m(*inp),
)
# Nested module testing.
inp = (torch.ones(3),)
orig_m = NestedM(3)
ep_list = self._check_equal_ts_ep_converter(orig_m, inp)
t = inp[0]
t -= 0.8
# Skip jit.traced because it specializes on one path.
for ep in ep_list[1:]:
torch.testing.assert_close(
ep.module()(*inp),
orig_m(*inp),
)
# Super nested module testing.
inp = (torch.ones(3),)
orig_m = SuperNestedM1(3)
ep_list = self._check_equal_ts_ep_converter(orig_m, inp)
t = inp[0]
t -= 0.8
# Skip jit.traced because it specializes on one path.
for ep in ep_list[1:]:
torch.testing.assert_close(
ep.module()(*inp),
orig_m(*inp),
)
# Super nested module testing.
inp = (torch.ones(3),)
orig_m = SuperNestedM2(3)
ep_list = self._check_equal_ts_ep_converter(orig_m, inp)
t = inp[0]
t -= 0.8
# Skip jit.traced because it specializes on one path.
for ep in ep_list[1:]:
torch.testing.assert_close(
ep.module()(*inp),
orig_m(*inp),
)
def test_convert_if_duplicate_attr_names(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = 1
self.h = 2
def forward(self, x: torch.Tensor, y: int):
self.w = self.w * 10
self.h = self.h * 20
if y > 10:
res = self.w + x
else:
res = self.h + x
if y < 10:
res = self.w + res
else:
res = self.h + res
return res
inp = (torch.ones(3), 5)
self._check_equal_ts_ep_converter(M(), inp, option=["script"])
def test_ts2ep_converter_contains(self):
class MIn(torch.nn.Module):
def forward(self, x: torch.Tensor):
return x.dtype in [torch.float32, torch.float64]
class MNotIn(torch.nn.Module):
def forward(self, x: torch.Tensor):
return x.dtype in [torch.int8]
class MTensorIn(torch.nn.Module):
def forward(self, x: torch.Tensor, x_dict: dict[torch.Tensor, str]):
return x in x_dict
# Traced function must return output that has tensors.
inp = (torch.tensor(4),)
self._check_equal_ts_ep_converter(MIn(), inp, ["script"])
self._check_equal_ts_ep_converter(MNotIn(), inp, ["script"])
# TODO: update test to use reference for in.
inp = (torch.tensor(4), {torch.tensor(4): "foo"})
self._check_equal_ts_ep_converter(MTensorIn(), inp, ["script"])
inp = (torch.tensor(1), {torch.tensor(4): "foo"})
self._check_equal_ts_ep_converter(MTensorIn(), inp, ["script"])
def test_ts2ep_converter_custom_op(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
torch._dynamo.config.capture_scalar_outputs = True
torch._dynamo.config.capture_dynamic_output_shape_ops = True
torch.library.define(
"mylib::foo",
"(Tensor x) -> Tensor",
lib=lib,
)
# PyTorch custorm op implementation
@torch.library.impl(
"mylib::foo",
"CompositeExplicitAutograd",
lib=lib,
)
def foo_impl(x):
return x + x
# Meta function of the custom op.
@torch.library.register_fake(
"mylib::foo",
lib=lib,
)
def foo_meta(x):
return x + x
class M(torch.nn.Module):
def forward(self, x):
return torch.ops.mylib.foo(x)
inp = (torch.randn(3, 3),)
m = M()
self._check_equal_ts_ep_converter(m, inp)
def test_convert_func_without_param(self):
def func1(x, y):
return x + y
def func2(x, y):
if x.sum() > 0:
return x + y
else:
return x - y
inp = (
torch.tensor(1),
torch.tensor(1),
)
self._check_equal_ts_ep_converter(func1, inp)
ep_list = self._check_equal_ts_ep_converter(func2, inp)
t = inp[0]
t -= 1
for ep in ep_list[1:]:
torch.testing.assert_close(
ep.module()(*inp),
func2(*inp),
)
def test_implicit_constant_to_tensor_handling(self):
def func1(x):
return x + 2
def func2(x, y):
return x * y / (x - 2 * y) + y
def func3(x):
return x + torch.tensor([3])
def func4():
val = torch.tensor(float("inf"))
return torch.full((10, 10), val)
def func5():
x = -1
return x * torch.ones(1, dtype=torch.float), torch.zeros(
1, dtype=torch.float
)
def func6(x1, x2, x3, x4):
return (
x1.numel(),
x1.size(),
x2.numel(),
x2.size(),
x3.numel(),
x3.size(),
x4.numel(),
x4.size(),
torch.ones(x1.numel()), # Just make sure downstream ops still work.
torch.ones(x1.size()), # Just make sure downstream ops still work.
)
class M1(torch.nn.Module):
def __init__(self, value):
super().__init__()
self.x = torch.tensor(value)
def forward(self):
return self.x.clone()
class M2(torch.nn.Module):
def forward(self, x):
return torch.tensor(4) + x
inp = (torch.randn([2, 2]),)
self._check_equal_ts_ep_converter(func1, inp)
inp = (torch.randn([2, 2]), torch.randn([2, 2]))
self._check_equal_ts_ep_converter(func2, inp)
inp = (torch.randn([2, 2]),)
self._check_equal_ts_ep_converter(func3, inp)
self._check_equal_ts_ep_converter(func4, ())
self._check_equal_ts_ep_converter(M1(5), ())
inp = (torch.randn(2),)
self._check_equal_ts_ep_converter(M2(), inp)
self._check_equal_ts_ep_converter(func5, ())
inp = (
torch.randn([2, 3, 4]).to(torch.int8),
torch.randn([2, 3, 4]).to(torch.int32),
torch.randn([2, 3, 4]).to(torch.float32),
torch.randn([2, 3, 4]).to(torch.float64),
)
self._check_equal_ts_ep_converter(func6, inp)
# TODO: Additional check once dynamic shape is supported.
# for ep in ep_list:
# self.assertEqual(
# ep.module()(
# torch.randn([1, 1, 1]).to(torch.int8),
# torch.randn([1, 1, 1]).to(torch.int32),
# torch.randn([1, 1, 1]).to(torch.float32),
# torch.randn([1, 1, 1]).to(torch.float64),
# )[0], 1
# )
def test_aten_tensor_dtype_int(self):
class M(torch.nn.Module):
def forward(self, x):
y = torch.tensor(1, dtype=torch.int32)
return y + x
ep_list = self._check_equal_ts_ep_converter(M(), (torch.tensor(1),))
for ep in ep_list:
self.assertEqual(len(ep.constants), 1)
def test_aten_tensor_prim_dtype(self):
class M(torch.nn.Module):
def forward(self, x):
y = torch.tensor(1, dtype=x.dtype)
return y + x
ep_list = self._check_equal_ts_ep_converter(M(), (torch.tensor(1),))
for ep in ep_list:
self.assertEqual(len(ep.constants), 1)
def test_aten_tensor_dynamic(self):
class M(torch.nn.Module):
def forward(self, x):
s = x.shape[0]
y = torch.tensor(s)
return y
ep_list = self._check_equal_ts_ep_converter(M(), (torch.ones(3),))
for ep in ep_list:
self.assertEqual(len(ep.constants), 0)
# TODO: Additional check once dynamic shape is supported.
# for ep in ep_list:
# torch.testing.assert_close(
# ep.module()(torch.ones(4)),
# M()(torch.ones(4)),
# )
class M(torch.nn.Module):
def forward(self, x):
s = x.shape[0]
y = torch.tensor([s, s * 2, 1])
return y
ep_list = self._check_equal_ts_ep_converter(M(), (torch.ones(3),))
# Trace directly inline a tensor constant.
for ep in ep_list[1:]:
self.assertEqual(len(ep.constants), 0)
# TODO: Additional check once dynamic shape is supported.
# for ep in ep_list:
# torch.testing.assert_close(
# ep.module()(torch.ones(4)),
# M()(torch.ones(4)),
# )
def test_prim_tolist(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor) -> list[int]:
return x.tolist()
inp = (torch.tensor([1, 2, 3]),)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor) -> list[list[int]]:
return x.tolist()
inp = (torch.tensor([[1, 2, 3], [4, 5, 6]]),)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
def test_get_tensor_constants(self):
# Since self.data is only read but not written, it is lifted as
# constant tensors.
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.data = torch.randn(3, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self.data
class Goo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.data = torch.randn(3, 2)
self.foo = Foo()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x + self.data + self.foo.data + self.foo(x)
inp = (torch.randn(3, 2),)
goo = Goo()
self._check_equal_ts_ep_converter(goo, inp)
def test_prim_SetAttr(self):
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.data = torch.nn.Buffer(torch.ones(3, 2))
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.data = self.data + x
return x + x
inp = (torch.ones(3, 2),)
self._check_equal_ts_ep_converter(
Module, inp, ["script"], check_persistent=True
)
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.data = torch.nn.Buffer(torch.ones(3, 2))
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.data = self.data + x
return x + self.data
inp = (torch.ones(3, 2),)
self._check_equal_ts_ep_converter(
Module, inp, ["script"], check_persistent=True
)
# export lifts a tensor constant (self.data) as an input if it is not assigned.
# If it is assigned, export will error and ask users to register it as a buffer.
# In converter, we change tensor constants that are assigned as a buffer automatically,
# since it might be hard to manually register them as buffers.
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.data = torch.ones(3, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.data = self.data + x
return x + self.data
inp = (torch.ones(3, 2),)
self._check_equal_ts_ep_converter(
Module,
inp,
["script"],
check_persistent=True,
lifted_tensor_constants=OrderedDict([("data", torch.ones(3, 2))]),
)
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.count = 0
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.count += 1
return x + self.count
# check_persistent is False since export specializes on non-tensor constants
inp = (torch.ones(3, 2),)
self._check_equal_ts_ep_converter(
Module(), inp, ["script"], check_persistent=False
)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.count = 0
def forward(self, x):
count1 = self.count
self.count += 1
count2 = self.count
self.count += 1
count3 = self.count
return x + count1 + count2 + count3
inp = (torch.ones(1),)
self._check_equal_ts_ep_converter(M(), inp, ["script"], check_persistent=False)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w2 = torch.nn.Buffer(torch.ones(1))
def forward(self, x: torch.Tensor):
self.w2 += 1
return self.w2
inp = (torch.ones(1),)
self._check_equal_ts_ep_converter(M, inp, ["script"], check_persistent=True)
def test_raise_exception(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int) -> torch.Tensor:
if y > 0:
raise RuntimeError("test")
return x + y
# match non-strict export behavior that errors when the given input leads to
# RaiseException.
with self.assertRaisesRegex(torch.jit.Error, "builtins.RuntimeError"):
inp = (torch.randn(3, 2), 1)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
# Matching non-strict export behavior that only executes 1 if-branch according
# to the given input.
inp = (torch.randn(3, 2), 0)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int) -> torch.Tensor:
z = x
if y > 0:
raise RuntimeError("test")
# z = x
else:
z = x + y
return x + y + z
# match non-strict export behavior that errors when the given input leads to
# RaiseException.
with self.assertRaisesRegex(torch.jit.Error, "builtins.RuntimeError"):
inp = (torch.randn(3, 2), 1)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
# Matching non-strict export behavior that only executes 1 if-branch according
# to the given input.
inp = (torch.randn(3, 2), 0)
self._check_equal_ts_ep_converter(Module(), inp, ["script"])
def test_context_manager(self):
class ContextManager:
def __init__(self) -> None:
self.count = 0
return
def __enter__(self):
self.count += 1
return
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.count -= 1
return
class M(torch.nn.Module):
def forward(self, x, y):
with ContextManager():
res = x + y
return res
inp = (torch.ones(3, 3), torch.ones(3, 3))
self._check_equal_ts_ep_converter(M(), inp)
def test_hidden_input_name(self):
@torch.jit.script
def func1(x):
return x + 1
def func2(*args):
v = torch.cat(args, dim=1)
return v * v
inp = (torch.randn([1, 1]),)
self._check_equal_ts_ep_converter(func1, inp)
inp = (torch.ones(5, 5),)
# Cannot script again.
self._check_equal_ts_ep_converter(torch.ops.aten.relu, inp, ["trace"])
M = 2
Ns = [4, 2, 1]
empty = torch.tensor([], dtype=torch.double)
values = [empty] + [torch.randn(M, N) for N in Ns]
# Cannot script variable length inputs.
self._check_equal_ts_ep_converter(func2, tuple(values), ["trace"])
def test_ts2ep_multi_outputs_on_call_ops(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.pool = torch.nn.AdaptiveMaxPool2d((2, 2), return_indices=True)
def forward(self, x: torch.Tensor, y: torch.Tensor):
return (
torch.max(x, dim=0),
torch.topk(x, 3),
torch.sort(x, dim=0),
self.pool(y),
)
inp = (torch.randn([4, 4]), torch.randn([1, 1, 10, 10]))
self._check_equal_ts_ep_converter(M(), inp)
def test_aten_append_t(self):
class M(torch.nn.Module):
def forward(self, x: list[torch.Tensor]):
out = []
out.append(x[0] + x[1])
out.append(x[0] - x[1])
out1 = torch.cat(out)
out.append(x[0] * x[1])
out2 = torch.cat(out)
return out, out1, out2
inp = ([torch.ones(2, 3), torch.ones(2, 3)],)
# Trace already unrolls the list.
self._check_equal_ts_ep_converter(M(), inp, ["script"])
def test_convert_script_object(self):
class M1(torch.nn.Module):
def __init__(self):
super().__init__()
self.tq = _empty_tensor_queue()
def forward(self, x: torch.Tensor):
self.tq.push(x)
torch.ops._TorchScriptTesting.queue_push(self.tq, x.cos())
return torch.ops._TorchScriptTesting.queue_pop(self.tq), self.tq.pop()
inp = (torch.randn(2, 3),)
self._check_equal_ts_ep_converter(M1(), inp, ["script"])
def test_ts2ep_with_loop(self):
def func1(x, x_list: list[torch.Tensor]):
a, b, c = x, x, x
for _ in range(1, 5, 2):
for k in range(5):
a = a + a + k
b = b + b - k
x_list.append(x_list[k] + x_list[k + 1])
for k in range(5):
b = b + b - k
c = c + c * k
x_list.append(x_list[k] + x_list[k + 1] - x_list[k + 2])
return x, x_list
def func2(x): # noqa: F841
for i in range(x.size(0)):
x = x * x * i
return x
def func3(x): # noqa: F841
while x.sum() < 10:
x += x.sin()
return x
inp = (
torch.tensor(1),
[torch.ones([2, 2]), torch.ones([2, 2]) * 2],
)
runtime_inps = [
(
torch.tensor(1),
[torch.ones([8, 8]), torch.ones([8, 8]) * 2],
)
]
# Trace unrolls the loop.
self._check_equal_ts_ep_converter(
func1, inp, ["script"], runtime_inputs=runtime_inps
)
# TODO: (2/N)
# Trace unrolls the loop.
# self._check_equal_ts_ep_converter(func2, inp, ["script"])
# TODO: (3/N)
# Trace unrolls the loop.
# self._check_equal_ts_ep_converter(func3, inp, ["script"])
@unittest.skipIf(
IS_WINDOWS,
"Windows does not support qnnpack",
)
# qnnpack not supported on s390x
@xfailIfS390X
def test_ts2ep_convert_quantized_model1(self):
class Standalone(torch.nn.Module):
def __init__(self):
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
self.relu = torch.nn.ReLU()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.dequant(x)
return x
def fuse_model(self):
torch.ao.quantization.fuse_modules(
self, [["conv2", "relu"]], inplace=True
)
with override_quantized_engine("qnnpack"):
model = Standalone()
model.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack")
model.fuse_model()
torch.ao.quantization.prepare(model, inplace=True)
model(torch.randn(4, 1, 4, 4))
torch.ao.quantization.convert(model, inplace=True)
# Use customized checking here, because state_dict of quantization will be
# modified by the quantization pass.
inp = (torch.randn(4, 1, 4, 4),)
original_ts_model = torch.jit.script(model)
ts_model = torch.jit.script(model)
converter = TS2EPConverter(ts_model, inp)
ep = converter.convert()
orig_out, _ = pytree.tree_flatten(original_ts_model(*inp))
ep_out, _ = pytree.tree_flatten(ep.module()(*inp))
self._check_tensor_list_equal(orig_out, ep_out)
# qnnpack/xnnpack not supported on s390x.
# it is required by
# torch.ops.prepacked.linear_clamp_prepack
# and
# torch.ops.prepacked.linear_clamp_run
@xfailIfS390X
def test_ts2ep_convert_quantized_model_with_opcontext(self):
class M(torch.nn.Module):
def __init__(self, linear_op):
super().__init__()
self.linear_op = linear_op
def forward(self, x):
x = torch.ops.prepacked.linear_clamp_run(x, self.linear_op)
return x
linear_op = torch.ops.prepacked.linear_clamp_prepack(
torch.randn(10, 10), torch.randn(10)
)
m = M(linear_op)
inp = (torch.randn(1, 10),)
self._check_equal_ts_ep_converter(m, inp, ["script"])
# qnnpack/xnnpack not supported on s390x.
# it is required by
# torch.ops.prepacked.linear_clamp_prepack
# and
# torch.ops.prepacked.linear_clamp_run
@xfailIfS390X
def test_ts2ep_convert_quantized_model_with_opcontext_and_constant(self):
class M(torch.nn.Module):
def __init__(self, linear_op):
super().__init__()
self.linear_op = linear_op
def forward(self, x):
x = torch.ops.prepacked.linear_clamp_run(
x + torch.ones(1), self.linear_op
)
return x
linear_op = torch.ops.prepacked.linear_clamp_prepack(
torch.randn(10, 10), torch.randn(10)
)
m = M(linear_op)
inp = (torch.randn(1, 10),)
self._check_equal_ts_ep_converter(m, inp, ["script"])
if __name__ == "__main__":
run_tests()
| TestConverter |
python | PrefectHQ__prefect | tests/server/api/test_server.py | {
"start": 14823,
"end": 19094
} | class ____:
def test_singleton_on_port(self):
server_8001 = SubprocessASGIServer(port=8001)
assert server_8001 is SubprocessASGIServer(port=8001)
server_random = SubprocessASGIServer()
assert server_random is SubprocessASGIServer()
assert server_8001 is not server_random
def test_find_available_port_returns_available_port(self):
server = SubprocessASGIServer()
port = server.find_available_port()
assert server.is_port_available(port)
assert 8000 <= port < 9000
def test_is_port_available_returns_true_for_available_port(self):
server = SubprocessASGIServer()
port = server.find_available_port()
assert server.is_port_available(port)
def test_is_port_available_returns_false_for_unavailable_port(self):
server = SubprocessASGIServer()
with contextlib.closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)
) as sock:
sock.bind(("127.0.0.1", 12345))
assert not server.is_port_available(12345)
def test_start_is_idempotent(self, respx_mock, monkeypatch):
popen_mock = MagicMock()
monkeypatch.setattr("prefect.server.api.server.subprocess.Popen", popen_mock)
respx_mock.get("http://127.0.0.1:8000/api/health").respond(status_code=200)
server = SubprocessASGIServer(port=8000)
server.start()
server.start()
assert popen_mock.call_count == 1
def test_address_returns_correct_address(self):
server = SubprocessASGIServer(port=8000)
assert server.address == "http://127.0.0.1:8000"
def test_address_returns_correct_api_url(self):
server = SubprocessASGIServer(port=8000)
assert server.api_url == "http://127.0.0.1:8000/api"
@pytest.mark.skip(reason="This test is flaky and needs to be fixed")
def test_start_and_stop_server(self):
server = SubprocessASGIServer()
server.start()
health_response = httpx.get(f"{server.address}/api/health")
assert health_response.status_code == 200
server.stop()
with pytest.raises(httpx.RequestError):
httpx.get(f"{server.api_url}/health")
@pytest.mark.skip(reason="This test is flaky and needs to be fixed")
def test_run_as_context_manager(self):
with SubprocessASGIServer() as server:
health_response = httpx.get(f"{server.api_url}/health")
assert health_response.status_code == 200
with pytest.raises(httpx.RequestError):
httpx.get(f"{server.api_url}/health")
@pytest.mark.skip(reason="This test is flaky and needs to be fixed")
def test_run_a_flow_against_subprocess_server(self):
@flow
def f():
return 42
server = SubprocessASGIServer()
server.start()
with temporary_settings({PREFECT_API_URL: server.api_url}):
assert f() == 42
client = get_client(sync_client=True)
assert len(client.read_flow_runs()) == 1
server.stop()
def test_run_with_temp_db(self):
"""
This test ensures that the format of the database connection URL used for the default
test profile does not retain state between subprocess server runs.
"""
@flow
def f():
return 42
with temporary_settings(
{PREFECT_API_DATABASE_CONNECTION_URL: "sqlite+aiosqlite:///:memory:"}
):
SubprocessASGIServer._instances = {}
server = SubprocessASGIServer()
server.start(timeout=30)
with temporary_settings({PREFECT_API_URL: server.api_url}):
assert f() == 42
client = get_client(sync_client=True)
assert len(client.read_flow_runs()) == 1
server.stop()
# do it again to ensure the db is recreated
server = SubprocessASGIServer()
server.start(timeout=30)
with temporary_settings({PREFECT_API_URL: server.api_url}):
assert f() == 42
client = get_client(sync_client=True)
assert len(client.read_flow_runs()) == 1
server.stop()
| TestSubprocessASGIServer |
python | spyder-ide__spyder | spyder/plugins/editor/widgets/codeeditor/tests/assets/autopep8_max_line.py | {
"start": 385,
"end": 580
} | class ____:
def __init__(self):
super().__init__()
self.x = 2
def method3(self):
pass
def method2(self):
pass
def method1(self):
pass
| Class1 |
python | bokeh__bokeh | tests/unit/bokeh/document/test_events__document.py | {
"start": 18313,
"end": 19407
} | class ____:
def test_init(self) -> None:
doc = Document()
m = SomeModel()
e = bde.RootRemovedEvent(doc, m, "setter", "invoker")
assert e.document == doc
assert e.model == m
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_kind(self) -> None:
assert bde.RootRemovedEvent.kind == "RootRemoved"
def test_to_serializable(self) -> None:
doc = Document()
m = SomeModel()
e = bde.RootRemovedEvent(doc, m, "setter", "invoker")
s = Serializer()
r = s.encode(e)
assert r == dict(kind=e.kind, model=m.ref)
assert s.buffers == []
def test_dispatch(self) -> None:
doc = Document()
m = SomeModel()
e = bde.RootRemovedEvent(doc, m, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
# SessionCallbackAdded --------------------------------------------------------
| TestRootRemovedEvent |
python | django__django | django/template/backends/dummy.py | {
"start": 261,
"end": 1325
} | class ____(BaseEngine):
app_dirname = "template_strings"
def __init__(self, params):
params = params.copy()
options = params.pop("OPTIONS").copy()
if options:
raise ImproperlyConfigured("Unknown options: {}".format(", ".join(options)))
super().__init__(params)
def from_string(self, template_code):
return Template(template_code)
def get_template(self, template_name):
tried = []
for template_file in self.iter_template_filenames(template_name):
try:
with open(template_file, encoding="utf-8") as fp:
template_code = fp.read()
except FileNotFoundError:
tried.append(
(
Origin(template_file, template_name, self),
"Source does not exist",
)
)
else:
return Template(template_code)
raise TemplateDoesNotExist(template_name, tried=tried, backend=self)
| TemplateStrings |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 50586,
"end": 54391
} | class ____(BuiltinFunctionT):
_id = "abs"
_inputs = [("value", INT256_T)]
_return_type = INT256_T
def _try_fold(self, node):
validate_call_args(node, 1)
value = node.args[0].get_folded_value()
if not isinstance(value, vy_ast.Int):
raise UnfoldableNode
value = abs(value.value)
return vy_ast.Int.from_node(node, value=value)
def build_IR(self, expr, context):
value = Expr.parse_value_expr(expr.args[0], context)
sub = [
"with",
"orig",
value,
[
"if",
["slt", "orig", 0],
# clamp orig != -2**255 (because it maps to itself under negation)
["seq", ["assert", ["ne", "orig", ["sub", 0, "orig"]]], ["sub", 0, "orig"]],
"orig",
],
]
return IRnode.from_list(sub, typ=INT256_T)
# CREATE* functions
CREATE2_SENTINEL = dummy_node_for_type(BYTES32_T)
# create helper functions
# generates CREATE op sequence + zero check for result
def _create_ir(value, buf, length, salt, revert_on_failure=True):
args = [value, buf, length]
create_op = "create"
if salt is not CREATE2_SENTINEL:
create_op = "create2"
args.append(salt)
ret = IRnode.from_list(ensure_eval_once("create_builtin", [create_op, *args]))
if not revert_on_failure:
return ret
with ret.cache_when_complex("addr") as (b1, addr):
ret = IRnode.from_list(["seq", check_create_operation(addr), addr])
return b1.resolve(ret)
# calculate the gas used by create for a given number of bytes
def _create_addl_gas_estimate(size, should_use_create2):
ret = 200 * size
if should_use_create2:
ret += SHA3_PER_WORD * ceil32(size) // 32
return ret
def eip1167_bytecode():
# NOTE cyclic import?
from vyper.ir.compile_ir import assembly_to_evm
loader_asm = [
"PUSH1",
0x2D,
"RETURNDATASIZE",
"DUP2",
"PUSH1",
0x09,
"RETURNDATASIZE",
"CODECOPY",
"RETURN",
]
forwarder_pre_asm = [
"CALLDATASIZE",
"RETURNDATASIZE",
"RETURNDATASIZE",
"CALLDATACOPY",
"RETURNDATASIZE",
"RETURNDATASIZE",
"RETURNDATASIZE",
"CALLDATASIZE",
"RETURNDATASIZE",
"PUSH20", # [address to delegate to]
]
forwarder_post_asm = [
"GAS",
"DELEGATECALL",
"RETURNDATASIZE",
"DUP3",
"DUP1",
"RETURNDATACOPY",
"SWAP1",
"RETURNDATASIZE",
"SWAP2",
"PUSH1",
0x2B, # jumpdest of whole program.
"JUMPI",
"REVERT",
"JUMPDEST",
"RETURN",
]
return (
assembly_to_evm(loader_asm)[0],
assembly_to_evm(forwarder_pre_asm)[0],
assembly_to_evm(forwarder_post_asm)[0],
)
# "standard" initcode for code which can be larger than 256 bytes.
# returns the code starting from 0x0b with len `codesize`.
# NOTE: it assumes codesize <= 2**24.
def _create_preamble(codesize):
from vyper.ir.compile_ir import assembly_to_evm
evm_len = 0x0B # 11 bytes
asm = [
# use PUSH3 to be able to deal with larger contracts
"PUSH3",
# blank space for codesize
0x00,
0x00,
0x00,
"RETURNDATASIZE",
"DUP2",
"PUSH1",
evm_len,
"RETURNDATASIZE",
"CODECOPY",
"RETURN",
]
evm = assembly_to_evm(asm)[0]
assert len(evm) == evm_len, evm
shl_bits = (evm_len - 4) * 8 # codesize needs to go right after the PUSH3
# mask codesize into the aforementioned "blank space"
return ["or", bytes_to_int(evm), shl(shl_bits, codesize)], evm_len
| Abs |
python | mlflow__mlflow | mlflow/gateway/providers/mlflow.py | {
"start": 1393,
"end": 2065
} | class ____(BaseModel):
predictions: list[list[StrictFloat]]
@field_validator("predictions", mode="before")
def validate_predictions(cls, predictions):
if isinstance(predictions, list) and not predictions:
raise ValueError("The input list is empty")
if isinstance(predictions, list) and all(
isinstance(item, list) and not item for item in predictions
):
raise ValueError("One or more lists in the returned prediction response are empty")
elif all(isinstance(item, float) for item in predictions):
return [predictions]
else:
return predictions
| EmbeddingsResponse |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk3cairo.py | {
"start": 151,
"end": 1312
} | class ____(FigureCanvasCairo, FigureCanvasGTK3):
def on_draw_event(self, widget, ctx):
if self._idle_draw_id:
GLib.source_remove(self._idle_draw_id)
self._idle_draw_id = 0
self.draw()
with (self.toolbar._wait_cursor_for_draw_cm() if self.toolbar
else nullcontext()):
allocation = self.get_allocation()
# Render the background before scaling, as the allocated size here is in
# logical pixels.
Gtk.render_background(
self.get_style_context(), ctx,
0, 0, allocation.width, allocation.height)
scale = self.device_pixel_ratio
# Scale physical drawing to logical size.
ctx.scale(1 / scale, 1 / scale)
self._renderer.set_context(ctx)
# Set renderer to physical size so it renders in full resolution.
self._renderer.width = allocation.width * scale
self._renderer.height = allocation.height * scale
self._renderer.dpi = self.figure.dpi
self.figure.draw(self._renderer)
@_BackendGTK3.export
| FigureCanvasGTK3Cairo |
python | walkccc__LeetCode | solutions/2552. Count Increasing Quadruplets/2552.py | {
"start": 0,
"end": 693
} | class ____:
def countQuadruplets(self, nums: list[int]) -> int:
ans = 0
# dp[j] := the number of triplets (i, j, k) where i < j < k and nums[i] < nums[k] <
# nums[j]. Keep this information for l to use later.
dp = [0] * len(nums)
# k can be treated as l.
for k in range(2, len(nums)):
numLessThanK = 0
# j can be treated as i.
for j in range(k):
if nums[j] < nums[k]:
numLessThanK += 1 # nums[i] < nums[k]
# nums[j] < nums[l], so we should add dp[j] since we find a new
# quadruplets for (i, j, k, l).
ans += dp[j]
elif nums[j] > nums[k]:
dp[j] += numLessThanK
return ans
| Solution |
python | python-attrs__attrs | tests/test_make.py | {
"start": 24468,
"end": 34939
} | class ____:
"""
Tests for keyword-only attributes.
"""
def test_adds_keyword_only_arguments(self):
"""
Attributes can be added as keyword-only.
"""
@attr.s
class C:
a = attr.ib()
b = attr.ib(default=2, kw_only=True)
c = attr.ib(kw_only=True)
d = attr.ib(default=attr.Factory(lambda: 4), kw_only=True)
c = C(1, c=3)
assert c.a == 1
assert c.b == 2
assert c.c == 3
assert c.d == 4
def test_ignores_kw_only_when_init_is_false(self):
"""
Specifying ``kw_only=True`` when ``init=False`` is essentially a no-op.
"""
@attr.s
class C:
x = attr.ib(init=False, default=0, kw_only=True)
y = attr.ib()
c = C(1)
assert c.x == 0
assert c.y == 1
def test_keyword_only_attributes_presence(self):
"""
Raises `TypeError` when keyword-only arguments are
not specified.
"""
@attr.s
class C:
x = attr.ib(kw_only=True)
with pytest.raises(TypeError) as e:
C()
assert (
"missing 1 required keyword-only argument: 'x'"
) in e.value.args[0]
def test_keyword_only_attributes_unexpected(self):
"""
Raises `TypeError` when unexpected keyword argument passed.
"""
@attr.s
class C:
x = attr.ib(kw_only=True)
with pytest.raises(TypeError) as e:
C(x=5, y=10)
assert "got an unexpected keyword argument 'y'" in e.value.args[0]
def test_keyword_only_attributes_can_come_in_any_order(self):
"""
Mandatory vs non-mandatory attr order only matters when they are part
of the __init__ signature and when they aren't kw_only (which are
moved to the end and can be mandatory or non-mandatory in any order,
as they will be specified as keyword args anyway).
"""
@attr.s
class C:
a = attr.ib(kw_only=True)
b = attr.ib(kw_only=True, default="b")
c = attr.ib(kw_only=True)
d = attr.ib()
e = attr.ib(default="e")
f = attr.ib(kw_only=True)
g = attr.ib(kw_only=True, default="g")
h = attr.ib(kw_only=True)
i = attr.ib(init=False)
c = C("d", a="a", c="c", f="f", h="h")
assert c.a == "a"
assert c.b == "b"
assert c.c == "c"
assert c.d == "d"
assert c.e == "e"
assert c.f == "f"
assert c.g == "g"
assert c.h == "h"
def test_keyword_only_attributes_allow_subclassing(self):
"""
Subclass can define keyword-only attributed without defaults,
when the base class has attributes with defaults.
"""
@attr.s
class Base:
x = attr.ib(default=0)
@attr.s
class C(Base):
y = attr.ib(kw_only=True)
c = C(y=1)
assert c.x == 0
assert c.y == 1
def test_keyword_only_class_level(self):
"""
`kw_only` can be provided at the attr.s level, converting all
attributes to `kw_only.`
"""
@attr.s(kw_only=True)
class C:
x = attr.ib()
y = attr.ib(kw_only=True)
with pytest.raises(TypeError):
C(0, y=1)
c = C(x=0, y=1)
assert c.x == 0
assert c.y == 1
def test_keyword_only_class_level_subclassing(self):
"""
When `force_kw_only=True`, subclass `kw_only` propagates to attrs
inherited from the base, allowing non-default following default.
"""
@attr.s
class Base:
x = attr.ib(default=0)
@attr.s(kw_only=True)
class C(Base):
y = attr.ib()
with pytest.raises(TypeError):
C(1)
with pytest.raises(TypeError):
C(0, y=1)
c = C(x=0, y=1)
assert c.x == 0
assert c.y == 1
@attr.s(kw_only=True, force_kw_only=False)
class C(Base):
y = attr.ib()
c = C(0, y=1)
assert c.x == 0
assert c.y == 1
def test_init_false_attribute_after_keyword_attribute(self):
"""
A positional attribute cannot follow a `kw_only` attribute,
but an `init=False` attribute can because it won't appear
in `__init__`
"""
@attr.s
class KwArgBeforeInitFalse:
kwarg = attr.ib(kw_only=True)
non_init_function_default = attr.ib(init=False)
non_init_keyword_default = attr.ib(
init=False, default="default-by-keyword"
)
@non_init_function_default.default
def _init_to_init(self):
return self.kwarg + "b"
c = KwArgBeforeInitFalse(kwarg="a")
assert c.kwarg == "a"
assert c.non_init_function_default == "ab"
assert c.non_init_keyword_default == "default-by-keyword"
def test_init_false_attribute_after_keyword_attribute_with_inheritance(
self,
):
"""
A positional attribute cannot follow a `kw_only` attribute,
but an `init=False` attribute can because it won't appear
in `__init__`. This test checks that we allow this
even when the `kw_only` attribute appears in a parent class
"""
@attr.s
class KwArgBeforeInitFalseParent:
kwarg = attr.ib(kw_only=True)
@attr.s
class KwArgBeforeInitFalseChild(KwArgBeforeInitFalseParent):
non_init_function_default = attr.ib(init=False)
non_init_keyword_default = attr.ib(
init=False, default="default-by-keyword"
)
@non_init_function_default.default
def _init_to_init(self):
return self.kwarg + "b"
c = KwArgBeforeInitFalseChild(kwarg="a")
assert c.kwarg == "a"
assert c.non_init_function_default == "ab"
assert c.non_init_keyword_default == "default-by-keyword"
def test_attr_level_kw_only_and_class_level_kw_only(self):
"""
If a field explicitly sets 'kw_only=False', it should not be made
keyword-only even if the class sets 'kw_only=True'.
"""
@attr.s(kw_only=True)
class OldClassOldBehavior:
yes = attr.ib()
no = attr.ib(kw_only=False)
@attr.s(kw_only=True, force_kw_only=False)
class OldClassNewBehavior:
yes = attr.ib()
no = attr.ib(kw_only=False)
@attr.define(kw_only=True, force_kw_only=True)
class NewClassOldBehavior:
yes = attr.field()
no = attr.field(kw_only=False)
@attr.define(kw_only=True)
class NewClassNewBehavior:
yes = attr.field()
no = attr.field(kw_only=False)
for cls in [OldClassNewBehavior, NewClassNewBehavior]:
fs = fields_dict(cls)
assert fs["yes"].kw_only is True
assert fs["no"].kw_only is False
for cls in [OldClassOldBehavior, NewClassOldBehavior]:
fs = fields_dict(cls)
assert fs["yes"].kw_only is True
assert fs["no"].kw_only is True
def test_kw_only_inheritance(self):
"""
Comprehensive test about how `kw_only` works when there's multiple
levels of inheritance with different `kw_only` settings.
"""
@attr.define()
class A:
a = attr.field()
a_t = attr.field(kw_only=True)
a_f = attr.field(kw_only=False)
@attr.define(kw_only=True)
class B(A):
b = attr.field()
b_t = attr.field(kw_only=True)
b_f = attr.field(kw_only=False)
@attr.define(kw_only=False)
class C(B):
c = attr.field()
c_t = attr.field(kw_only=True)
c_f = attr.field(kw_only=False)
fs = fields_dict(A)
assert fs["a"].kw_only is False
assert fs["a_t"].kw_only is True
assert fs["a_f"].kw_only is False
fs = fields_dict(B)
assert fs["a"].kw_only is False
assert fs["a_t"].kw_only is True
assert fs["a_f"].kw_only is False
assert fs["b"].kw_only is True
assert fs["b_t"].kw_only is True
assert fs["b_f"].kw_only is False
fs = fields_dict(C)
assert fs["a"].kw_only is False
assert fs["a_t"].kw_only is True
assert fs["a_f"].kw_only is False
assert fs["b"].kw_only is True
assert fs["b_t"].kw_only is True
assert fs["b_f"].kw_only is False
assert fs["c"].kw_only is False
assert fs["c_t"].kw_only is True
assert fs["c_f"].kw_only is False
def test_kw_only_inheritance_force_kw_only(self):
"""
Similar to above, but when `force_kw_only` (pre-25.3.0 behavior).
"""
@attr.define(force_kw_only=True)
class A:
a = attr.field()
a_t = attr.field(kw_only=True)
a_f = attr.field(kw_only=False)
@attr.define(kw_only=True, force_kw_only=True)
class B(A):
b = attr.field()
b_t = attr.field(kw_only=True)
b_f = attr.field(kw_only=False)
@attr.define(kw_only=False, force_kw_only=True)
class C(B):
c = attr.field()
c_t = attr.field(kw_only=True)
c_f = attr.field(kw_only=False)
fs = fields_dict(A)
assert fs["a"].kw_only is False
assert fs["a_t"].kw_only is True
assert fs["a_f"].kw_only is False
fs = fields_dict(B)
assert fs["a"].kw_only is True
assert fs["a_t"].kw_only is True
assert fs["a_f"].kw_only is True
assert fs["b"].kw_only is True
assert fs["b_t"].kw_only is True
assert fs["b_f"].kw_only is True
fs = fields_dict(C)
assert fs["a"].kw_only is False
assert fs["a_t"].kw_only is True
assert fs["a_f"].kw_only is False
assert fs["b"].kw_only is True
assert fs["b_t"].kw_only is True
assert fs["b_f"].kw_only is True
assert fs["c"].kw_only is False
assert fs["c_t"].kw_only is True
assert fs["c_f"].kw_only is False
@attr.s
| TestKeywordOnlyAttributes |
python | getsentry__sentry | tests/sentry/web/frontend/test_doc_integration_avatar.py | {
"start": 200,
"end": 735
} | class ____(APITestCase):
endpoint = "sentry-doc-integration-avatar-url"
def test_headers(self) -> None:
doc = self.create_doc_integration(name="spiderman", has_avatar=True)
url = reverse(self.endpoint, args=[doc.avatar.get().ident])
response = self.client.get(url)
assert response.status_code == 200
assert response["Cache-Control"] == FOREVER_CACHE
assert response.get("Vary") == "Accept-Language, Cookie"
assert response.get("Set-Cookie") is None
| DocIntegrationAvatartest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-salesforce/unit_tests/config_builder.py | {
"start": 122,
"end": 1280
} | class ____:
def __init__(self) -> None:
self._config = {
"client_id": "fake_client_id",
"client_secret": "fake_client_secret",
"refresh_token": "fake_refresh_token",
"start_date": "2010-01-18T21:18:20Z",
"is_sandbox": False,
"wait_timeout": 15,
}
def start_date(self, start_date: datetime) -> "ConfigBuilder":
self._config["start_date"] = start_date.strftime("%Y-%m-%dT%H:%M:%SZ")
return self
def stream_slice_step(self, stream_slice_step: str) -> "ConfigBuilder":
self._config["stream_slice_step"] = stream_slice_step
return self
def client_id(self, client_id: str) -> "ConfigBuilder":
self._config["client_id"] = client_id
return self
def client_secret(self, client_secret: str) -> "ConfigBuilder":
self._config["client_secret"] = client_secret
return self
def refresh_token(self, refresh_token: str) -> "ConfigBuilder":
self._config["refresh_token"] = refresh_token
return self
def build(self) -> Mapping[str, Any]:
return self._config
| ConfigBuilder |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py | {
"start": 1888,
"end": 2627
} | class ____(
AutoMaterializeRuleEvaluationData,
NamedTuple(
"_ParentUpdatedRuleEvaluationData",
[
("updated_asset_keys", frozenset[AssetKey]),
("will_update_asset_keys", frozenset[AssetKey]),
],
),
):
@property
def metadata(self) -> MetadataMapping:
return {
**{
f"updated_parent_{i + 1}": MetadataValue.asset(k)
for i, k in enumerate(sorted(self.updated_asset_keys))
},
**{
f"will_update_parent_{i + 1}": MetadataValue.asset(k)
for i, k in enumerate(sorted(self.will_update_asset_keys))
},
}
@whitelist_for_serdes
| ParentUpdatedRuleEvaluationData |
python | ray-project__ray | rllib/utils/tests/test_actor_manager.py | {
"start": 2009,
"end": 16030
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_sync_call_healthy_only(self):
"""Test synchronous remote calls to only healthy actors."""
actors = [Actor.remote(i) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
results = []
for _ in range(10):
results.extend(manager.foreach_actor(lambda w: w.call()).ignore_errors())
# Wait for actors to recover.
wait_for_restore()
# Notice that since we only fire calls against healthy actors,
# we wouldn't be aware that the actors have been recovered.
# So once an actor is taken out of the lineup (10% chance),
# it will not go back in, and we should have few results here.
# Basically takes us 7 calls to kill all the actors.
# Note that we can hardcode 10 here because we are using deterministic
# sequences of random numbers.
self.assertEqual(len(results), 7)
manager.clear()
def test_sync_call_all_actors(self):
"""Test synchronous remote calls to all actors, regardless of their states."""
actors = [Actor.remote(i) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
results = []
for _ in range(10):
# Make sure we have latest states of all actors.
results.extend(
manager.foreach_actor(lambda w: w.call(), healthy_only=False)
)
# Wait for actors to recover.
wait_for_restore()
# We fired against all actors regardless of their status.
# So we should get 40 results back.
self.assertEqual(len(results), 40)
# Since the actors are always restored before next round of calls,
# we should get more results back.
# Some of these calls still failed, but 15 good results in total.
# Note that we can hardcode 15 here because we are using deterministic
# sequences of random numbers.
self.assertEqual(len([r for r in results if r.ok]), 15)
manager.clear()
def test_sync_call_return_obj_refs(self):
"""Test synchronous remote calls to all actors asking for raw ObjectRefs."""
actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
results = list(
manager.foreach_actor(
lambda w: w.call(),
healthy_only=False,
return_obj_refs=True,
)
)
# We fired against all actors regardless of their status.
# So we should get 40 results back.
self.assertEqual(len(results), 4)
for r in results:
# Each result is an ObjectRef.
self.assertTrue(r.ok)
self.assertTrue(isinstance(r.get(), ray.ObjectRef))
manager.clear()
def test_sync_call_fire_and_forget(self):
"""Test synchronous remote calls with 0 timeout_seconds."""
actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
results1 = []
for _ in range(10):
manager.probe_unhealthy_actors(mark_healthy=True)
results1.extend(
manager.foreach_actor(lambda w: w.call(), timeout_seconds=0)
)
# Wait for actors to recover.
wait_for_restore()
# Timeout is 0, so we returned immediately.
# We may get a couple of results back if the calls are fast,
# but that is not important.
results2 = [
r.get()
for r in manager.foreach_actor(
lambda w: w.call(), healthy_only=False
).ignore_errors()
]
# Results from blocking calls show the # of calls happend on
# each remote actor. 11 calls to each actor in total.
self.assertEqual(results2, [11, 11, 11, 11])
manager.clear()
def test_sync_call_same_actor_multiple_times(self):
"""Test multiple synchronous remote calls to the same actor."""
actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
# 2 synchronous call to actor 0.
results = manager.foreach_actor(
lambda w: w.call(),
remote_actor_ids=[0, 0],
)
# Returns 1 and 2, representing the first and second calls to actor 0.
self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2])
manager.clear()
def test_async_call_same_actor_multiple_times(self):
"""Test multiple asynchronous remote calls to the same actor."""
actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
# 2 asynchronous call to actor 0.
num_of_calls = manager.foreach_actor_async(
lambda w: w.call(),
remote_actor_ids=[0, 0],
)
self.assertEqual(num_of_calls, 2)
# Now, let's actually fetch the results.
results = manager.fetch_ready_async_reqs(timeout_seconds=None)
# Returns 1 and 2, representing the first and second calls to actor 0.
self.assertEqual([r.get() for r in results.ignore_errors()], [1, 2])
manager.clear()
def test_sync_call_not_ignore_error(self):
"""Test synchronous remote calls that returns errors."""
actors = [Actor.remote(i) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
results = []
for _ in range(10):
manager.probe_unhealthy_actors(mark_healthy=True)
results.extend(manager.foreach_actor(lambda w: w.call()))
# Wait for actors to recover.
wait_for_restore()
# Some calls did error out.
self.assertTrue(any([not r.ok for r in results]))
manager.clear()
def test_sync_call_not_bringing_back_actors(self):
"""Test successful remote calls will not bring back actors unless told to."""
actors = [Actor.remote(i) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
results = manager.foreach_actor(lambda w: w.call())
# Some calls did error out.
self.assertTrue(any([not r.ok for r in results]))
# Wait for actors to recover.
wait_for_restore()
manager.probe_unhealthy_actors(mark_healthy=False)
# Restored actors were not marked healthy (`mark_healthy=False` above).
# Only 2 healthy actors.
self.assertEqual(manager.num_healthy_actors(), 2)
manager.clear()
def test_async_call(self):
"""Test asynchronous remote calls work."""
actors = [Actor.remote(i) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
results = []
for _ in range(10):
manager.foreach_actor_async(lambda w: w.call())
results.extend(manager.fetch_ready_async_reqs(timeout_seconds=None))
# Wait for actors to recover.
wait_for_restore()
# Note that we can hardcode the numbers here because of the deterministic
# lists of random numbers we use.
# 7 calls succeeded, 4 failed.
# The number of results back is much lower than 40, because we do not probe
# the actors with this test. As soon as an actor errors out, it will get
# taken out of the lineup forever.
self.assertEqual(len([r for r in results if r.ok]), 7)
self.assertEqual(len([r for r in results if not r.ok]), 4)
manager.clear()
def test_async_calls_get_dropped_if_inflight_requests_over_limit(self):
"""Test asynchronous remote calls get dropped if too many in-flight calls."""
actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]
manager = FaultTolerantActorManager(
actors=actors,
max_remote_requests_in_flight_per_actor=2,
)
# 2 asynchronous call to actor 1.
num_of_calls = manager.foreach_actor_async(
lambda w: w.call(),
remote_actor_ids=[0, 0],
)
self.assertEqual(num_of_calls, 2)
# Now, let's try to make another async call to actor 1.
num_of_calls = manager.foreach_actor_async(
lambda w: w.call(),
healthy_only=False,
remote_actor_ids=[0],
)
# We actually made 0 calls.
self.assertEqual(num_of_calls, 0)
manager.clear()
def test_healthy_only_works_for_list_of_functions(self):
"""Test healthy only mode works when a list of funcs are provided."""
actors = [Actor.remote(i) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
# Mark first and second actor as unhealthy.
manager.set_actor_state(1, False)
manager.set_actor_state(2, False)
def f(id, _):
return id
func = [functools.partial(f, i) for i in range(4)]
manager.foreach_actor_async(func, healthy_only=True)
results = manager.fetch_ready_async_reqs(timeout_seconds=None)
# Should get results back from calling actor 0 and 3.
self.assertEqual([r.get() for r in results], [0, 3])
manager.clear()
def test_probe_unhealthy_actors(self):
"""Test probe brings back unhealthy actors."""
actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
# Mark first and second actor as unhealthy.
manager.set_actor_state(1, False)
manager.set_actor_state(2, False)
# These actors are actually healthy.
manager.probe_unhealthy_actors(mark_healthy=True)
# Both actors are now healthy.
self.assertEqual(len(manager.healthy_actor_ids()), 4)
def test_tags(self):
"""Test that tags work for async calls."""
actors = [Actor.remote(i, maybe_crash=False) for i in range(4)]
manager = FaultTolerantActorManager(actors=actors)
manager.foreach_actor_async(lambda w: w.ping(), tag="pingpong")
manager.foreach_actor_async(lambda w: w.call(), tag="call")
results_ping_pong = manager.fetch_ready_async_reqs(
tags="pingpong", timeout_seconds=10.0
)
results_call = manager.fetch_ready_async_reqs(tags="call", timeout_seconds=2.0)
self.assertEqual(len(results_ping_pong), 4)
self.assertEqual(len(results_call), 4)
for result in results_ping_pong:
data = result.get()
self.assertEqual(data, "pong")
self.assertEqual(result.tag, "pingpong")
for result in results_call:
data = result.get()
self.assertEqual(data, 1)
self.assertEqual(result.tag, "call")
# test with default tag
manager.foreach_actor_async(lambda w: w.ping())
manager.foreach_actor_async(lambda w: w.call())
time.sleep(1)
results = manager.fetch_ready_async_reqs(timeout_seconds=5)
self.assertEqual(len(results), 8)
for result in results:
data = result.get()
self.assertEqual(result.tag, None)
if isinstance(data, str):
self.assertEqual(data, "pong")
elif isinstance(data, int):
self.assertEqual(data, 2)
else:
raise ValueError("data is not str or int")
# test with custom tags
manager.foreach_actor_async(lambda w: w.ping(), tag="pingpong")
manager.foreach_actor_async(lambda w: w.call(), tag="call")
time.sleep(1)
results = manager.fetch_ready_async_reqs(
timeout_seconds=5, tags=["pingpong", "call"]
)
self.assertEqual(len(results), 8)
for result in results:
data = result.get()
if isinstance(data, str):
self.assertEqual(data, "pong")
self.assertEqual(result.tag, "pingpong")
elif isinstance(data, int):
self.assertEqual(data, 3)
self.assertEqual(result.tag, "call")
else:
raise ValueError("data is not str or int")
# test with incorrect tags
manager.foreach_actor_async(lambda w: w.ping(), tag="pingpong")
manager.foreach_actor_async(lambda w: w.call(), tag="call")
time.sleep(1)
results = manager.fetch_ready_async_reqs(timeout_seconds=5, tags=["incorrect"])
self.assertEqual(len(results), 0)
# now test that passing no tags still gives back all of the results
results = manager.fetch_ready_async_reqs(timeout_seconds=5)
self.assertEqual(len(results), 8)
for result in results:
data = result.get()
if isinstance(data, str):
self.assertEqual(data, "pong")
self.assertEqual(result.tag, "pingpong")
elif isinstance(data, int):
self.assertEqual(data, 4)
self.assertEqual(result.tag, "call")
else:
raise ValueError("result is not str or int")
def test_foreach_actor_async_fetch_ready(self):
"""Test foreach_actor_async_fetch_ready works."""
actors = [Actor.remote(i, maybe_crash=False) for i in range(2)]
manager = FaultTolerantActorManager(actors=actors)
manager.foreach_actor_async_fetch_ready(lambda w: w.ping(), tag="ping")
time.sleep(5)
results = manager.foreach_actor_async_fetch_ready(
lambda w: w.ping(), tag="ping"
)
self.assertEqual(len(results), 2)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestActorManager |
python | pypa__pipenv | pipenv/vendor/click/exceptions.py | {
"start": 7577,
"end": 8096
} | class ____(UsageError):
"""Raised if an option is generally supplied but the use of the option
was incorrect. This is for instance raised if the number of arguments
for an option is not correct.
.. versionadded:: 4.0
:param option_name: the name of the option being used incorrectly.
"""
def __init__(
self, option_name: str, message: str, ctx: t.Optional["Context"] = None
) -> None:
super().__init__(message, ctx)
self.option_name = option_name
| BadOptionUsage |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_mcp_tool_use_block.py | {
"start": 220,
"end": 444
} | class ____(BaseModel):
id: str
input: Dict[str, object]
name: str
"""The name of the MCP tool"""
server_name: str
"""The name of the MCP server"""
type: Literal["mcp_tool_use"]
| BetaMCPToolUseBlock |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 21243,
"end": 22785
} | class ____(XYGlyph):
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
x = NumberSpec(default=field("x"), help="""
The x-coordinates to locate the image anchors.
""")
y = NumberSpec(default=field("y"), help="""
The y-coordinates to locate the image anchors.
""")
dw = DistanceSpec(default=field("dw"), help="""
The widths of the plot regions that the images will occupy.
.. note::
This is not the number of pixels that an image is wide.
That number is fixed by the image itself.
""")
dh = DistanceSpec(default=field("dh"), help="""
The height of the plot region that the image will occupy.
.. note::
This is not the number of pixels that an image is tall.
That number is fixed by the image itself.
""")
image_props = Include(ImageProps, help="""
The {prop} values for the images.
""")
dilate = Bool(False, help="""
Whether to always round fractional pixel locations in such a way
as to make the images bigger.
This setting may be useful if pixel rounding errors are causing
images to have a gap between them, when they should appear flush.
""")
origin = Enum(ImageOrigin, default="bottom_left", help="""
Defines the coordinate space of an image.
""")
anchor = Anchor(default="bottom_left", help="""
Position of the image should be anchored at the `x`, `y` coordinates.
""")
| ImageBase |
python | run-llama__llama_index | llama-index-core/llama_index/core/tools/types.py | {
"start": 7207,
"end": 7942
} | class ____(AsyncBaseTool):
"""
Adapter class that allows a synchronous tool to be used as an async tool.
"""
def __init__(self, tool: BaseTool):
self.base_tool = tool
@property
def metadata(self) -> ToolMetadata:
return self.base_tool.metadata
def call(self, input: Any) -> ToolOutput:
return self.base_tool(input)
async def acall(self, input: Any) -> ToolOutput:
return await asyncio.to_thread(self.call, input)
def adapt_to_async_tool(tool: BaseTool) -> AsyncBaseTool:
"""
Converts a synchronous tool to an async tool.
"""
if isinstance(tool, AsyncBaseTool):
return tool
else:
return BaseToolAsyncAdapter(tool)
| BaseToolAsyncAdapter |
python | joke2k__faker | faker/providers/lorem/pl_PL/__init__.py | {
"start": 68,
"end": 37257
} | class ____(LoremProvider):
"""Implement lorem provider for ``pl_PL`` locale.
Source: https://pl.wiktionary.org/wiki/Indeks%3APolski_-_Najpopularniejsze_s%C5%82owa_1-2000
"""
word_list = (
"w",
"z",
"być",
"na",
"i",
"do",
"nie",
"który",
"lub",
"to",
"się",
"o",
"mieć",
"coś",
"ten",
"dotyczyć",
"on",
"od",
"co",
"język",
"po",
"że",
"ktoś",
"przez",
"osoba",
"miasto",
"jeden",
"jak",
"za",
"ja",
"rok",
"a",
"bardzo",
"swój",
"dla",
"taki",
"człowiek",
"cecha",
"kobieta",
"mój",
"część",
"związany",
"móc",
"dwa",
"ona",
"związać",
"ze",
"mały",
"jakiś",
"miejsce",
"inny",
"duży",
"bez",
"czas",
"ale",
"czy",
"jako",
"sposób",
"rodzaj",
"Polska",
"rodzina",
"tylko",
"mieszkaniec",
"dzień",
"praca",
"przed",
"dom",
"dziecko",
"ty",
"pod",
"tak",
"woda",
"np.",
"już",
"rzeka",
"zostać",
"dobry",
"życie",
"państwo",
"mówić",
"pierwszy",
"nasz",
"cały",
"nad",
"wiele",
"zwierzę",
"przy",
"roślina",
"ta",
"u",
"jego",
"gatunek",
"nowy",
"chcieć",
"sobie",
"wielki",
"często",
"trzy",
"kolor",
"używać",
"musieć",
"kraj",
"robić",
"strona",
"każdy",
"wysoki",
"nazwa",
"mężczyzna",
"grupa",
"my",
"stary",
"sam",
"stan",
"drugi",
"zrobić",
"iść",
"oraz",
"polski",
"litera",
"kto",
"prawo",
"drzewo",
"ptak",
"książka",
"świat",
"samochód",
"rzecz",
"stolica",
"między",
"droga",
"należeć",
"mieszkanka",
"słowo",
"gdy",
"głowa",
"pies",
"młody",
"symbol",
"oni",
"bo",
"ziemia",
"aby",
"owoc",
"liczba",
"wiek",
"nie-",
"kilka",
"zły",
"środek",
"znajdować się",
"raz",
"dobrze",
"pan",
"kiedy",
"okres",
"pochodzić",
"ojciec",
"długi",
"ręka",
"itp.",
"odnosić się",
"dużo",
"podczas",
"biały",
"albo",
"ruch",
"jaki",
"przedmiot",
"służyć",
"matka",
"we",
"znak",
"ci",
"siebie",
"liczba atomowa",
"jeszcze",
"niż",
"cztery",
"wszystko",
"widzieć",
"żona",
"koń",
"szkoła",
"ciało",
"stać",
"kupić",
"zawsze",
"forma",
"sprawa",
"Rosja",
"wieś",
"góra",
"wyspa",
"oko",
"działanie",
"twój",
"występować",
"koniec",
"rząd",
"pięć",
"pokój",
"nauka",
"gdzie",
"kwiat",
"choroba",
"zwykle",
"powiedzieć",
"mieszkać",
"wiedzieć",
"imię",
"prowadzić",
"element",
"dać",
"godzina",
"żyć",
"ryba",
"wszyscy",
"zawierać",
"pracować",
"by",
"alfabet",
"członek",
"syn",
"jednostka",
"herb",
"brat",
"las",
"urządzenie",
"miesiąc",
"dziewczyna",
"obszar",
"grać",
"różny",
"teren",
"piękny",
"jeść",
"nic",
"brak",
"żeby",
"lubić",
"dany",
"budynek",
"położyć",
"czerwony",
"cel",
"stopień",
"siła",
"światło",
"leżeć",
"dawać",
"gra",
"sztuka",
"czarny",
"one",
"jej",
"wino",
"chodzić",
"statek",
"krótki",
"śmierć",
"wartość",
"dźwięk",
"sytuacja",
"teraz",
"główny",
"zajmować się",
"wykonywać",
"związek",
"ważny",
"ostatni",
"1000",
"tam",
"noc",
"dziś",
"pierwiastek chemiczny",
"wojna",
"noga",
"sklep",
"skóra",
"pani",
"własny",
"materiał",
"niektóry",
"tworzyć",
"system",
"znany",
"także",
"wykonać",
"niebo",
"święty",
"władza",
"wczoraj",
"film",
"twarz",
"flaga",
"morze",
"nawet",
"mięso",
"głos",
"Europa",
"?",
"pieniądz",
"powierzchnia",
"proces",
"tydzień",
"posiadać",
"ilość",
"obwód",
"działać",
"północny",
"region",
"jeśli",
"trwać",
"szybko",
"Bóg",
"silny",
"!",
"lecz",
"zielony",
"określony",
"król",
"pole",
"przyjaciel",
"1",
"dwadzieścia",
"serce",
"sześć",
"słońce",
"pisać",
"kot",
"drzwi",
"znać",
"początek",
"tysiąc",
"mleko",
"południowy",
"obraz",
"nosić",
"wiatr",
"niski",
"tekst",
"pić",
"zmiana",
"dawny",
"ulica",
"kierunek",
"linia",
"jechać",
"wyraz",
"stanowić",
"charakterystyczny",
"składać się",
"tu",
"uważać",
"siedem",
"miłość",
"podobny",
"więc",
"żołnierz",
"siostra",
"córka",
"też",
"chleb",
"zacząć",
"koło",
"granica",
"powietrze",
"pewien",
"włos",
"charakter",
"punkt",
"dzisiaj",
"ludzie",
"mało",
"liść",
"(…)",
"znaleźć",
"kościół",
"badanie",
"niewielki",
"wziąć",
"prosty",
"krew",
"mąż",
"–",
"wolny",
"kawa",
"problem",
"pójść",
"powodować",
"czyjś",
"drewno",
"kształt",
"stać się",
"właściwy",
"trzeci",
"znaczenie",
"brzeg",
"historia",
"ich",
"zasada",
"brać",
"dziesięć",
"powinien",
"żaden",
"jezioro",
"okno",
"kultura",
"niemiecki",
"ostry",
"but",
"stosować",
"ogień",
"nigdy",
"zbiór",
"samolot",
"ból",
"osiem",
"można",
"gwiazda",
"walka",
"Ukraina",
"prawdziwy",
"ciężki",
"zespół",
"drogi",
"pracownik",
"Francja",
"myśleć",
"zachowanie",
"polegać",
"uwaga",
"pomoc",
"przypominać",
"grecki",
"Niemcy",
"ząb",
"ile",
"informacja",
"chwila",
"deszcz",
"istnieć",
"nauczyciel",
"żółty",
"chory",
"piwo",
"według",
"dostać",
"uczeń",
"jedzenie",
"śnieg",
"jednak",
"również",
"ani",
"zwłaszcza",
"utwór",
"czysty",
"firma",
"siedzieć",
"francuski",
"łączyć",
"południe",
"zbyt",
"trudny",
"urząd",
"stół",
"lekarz",
"muzyka",
"czynność",
"układ okresowy",
"pociąg",
"jasny",
"klasa",
"męski",
"kamień",
"pierwiastek",
"ubranie",
"ściana",
"postać",
"pełny",
"organizm",
"5",
"księżyc",
"gmina",
"rosnąć",
"w celu",
"wydawać",
"źródło",
"funkcja",
"położenie",
"typ",
"starożytny",
"jutro",
"dziewięć",
"trzeba",
"społeczny",
"prawy",
"program",
"pojazd",
"może",
"historyczny",
"2",
"substancja",
"wszystkie",
"piec",
"układ",
"bóg",
"polityczny",
"chłopiec",
"cena",
"słaby",
"głupi",
"ludzki",
"trzymać",
"zupa",
"około",
"mieszkanie",
"zdanie",
"naczynie",
"uprawiać",
"północ",
"kraina",
"numer",
"para",
"dokument",
"uczucie",
"prawda",
"złoty",
"za pomocą",
"elektryczny",
"dziedzina",
"zachodni",
"alkohol",
"trochę",
"prowincja",
"prosić",
"list",
"bliski",
"komputer",
"towar",
"szybki",
"spać",
"niebieski",
"aż",
"przypadek",
"organizacja",
"herbata",
"szeroki",
"kawałek",
"czytać",
"obejmować",
"wojskowy",
"narzędzie",
"przyjść",
"myśl",
"ogród",
"Włochy",
"całość",
"wieczór",
"lód",
"wiedza",
"powiat",
"połowa",
"angielski",
"głównie",
"zjawisko",
"chłopak",
"wpływ",
"mowa",
"naturalny",
"morski",
"produkt",
"lewy",
"prawie",
"lek",
"miejscowość",
"napój",
"wschodni",
"księga",
"stopa",
"drobny",
"ciasto",
"kuchnia",
"plan",
"powstać",
"pełen",
"wokół",
"kochać",
"palec",
"zobaczyć",
"poprzez",
"maszyna",
"dziadek",
"wielkość",
"nos",
"złoto",
"pewny",
"partia",
"większość",
"obiekt",
"publiczny",
"pismo",
"wybitny",
"wszystek",
"błąd",
"broń",
"sen",
"trzydzieści",
"gruby",
"spotkanie",
"tkanina",
"smak",
"gość",
"potrawa",
"pytanie",
"produkcja",
"wy",
"razem",
"obywatel",
"jajko",
"3",
"zima",
"nazywać",
"policja",
"nikt",
"słodki",
"dopływ",
"butelka",
"energia",
"składać",
"łóżko",
"urodzenie",
"zdrowie",
"odmiana",
"zdjęcie",
"mocny",
"poza",
"4",
"lekki",
"czynić",
"przeciwny",
"duch",
"sąd",
"przeznaczyć",
"zapach",
"stały",
"Afryka",
"styl",
"karta",
"wypadek",
"babcia",
"wojsko",
"wodny",
"równy",
"rola",
"rejon",
"wybrzeże",
"naród",
"wiadomość",
"kość",
"tytuł",
"cukier",
"barwa",
"żywy",
"szczyt",
"rozwój",
"sieć",
"30",
"ponad",
"lato",
"warstwa",
"jabłko",
"wyrażać",
"bogaty",
"odbywać się",
"podstawowy",
"cześć",
"z powodu",
"ponieważ",
"wyjść",
"poziom",
"wyglądać",
"śpiewać",
"oznaczać",
"rozmowa",
"ciemny",
"papier",
"900",
"palić",
"lud",
"długość",
"usta",
"ucho",
"urodzić",
"wewnątrz",
"wśród",
"przedstawiciel",
"środkowy",
"obok",
"dzieło",
"arabski",
"krowa",
"taniec",
"rano",
"grzyb",
"długo",
"wydarzenie",
"pięćdziesiąt",
"włoski",
"słuchać",
"ser",
"właśnie",
"stanowisko",
"odpowiedni",
"korona",
"rower",
"święto",
"czekać",
"szukać",
"100",
"religia",
"piłka",
"opinia",
"wynik",
"pozycja",
"pochodzenie",
"metoda",
"ciepły",
"potem",
"udział",
"Hiszpania",
"rozumieć",
"6",
"wspólny",
"środowisko",
"całkowicie",
"budowa",
"ramię",
"gazeta",
"zabawa",
"nie ma",
"szczęście",
"pomieszczenie",
"strach",
"fala",
"patrzeć",
"odcień",
"temperatura",
"warunek",
"zdolność",
"sól",
"rosyjski",
"podróż",
"wykorzystywać",
"Ziemia",
"religijny",
"centrum",
"zbierać",
"zupełnie",
"przestrzeń",
"pas",
"połączenie",
"wobec",
"stawać się",
"potrzeba",
"narodowy",
"liczyć",
"otwarty",
"wejść",
"pozbawić",
"masa",
"głęboki",
"ono",
"wywoływać",
"zachód",
"wschód",
"powód",
"Azja",
"administracyjny",
"temat",
"odpowiadać",
"szpital",
"zajmować",
"czterdzieści",
"sto",
"sobą",
"pogląd",
"chronić",
"wysokość",
"słownik",
"rodzic",
"świnia",
"zaczynać",
"moneta",
"możliwość",
"mama",
"gdzieś",
"egzamin",
"pogoda",
"chemiczny",
"gorący",
"zadanie",
"więzienie",
"zakład",
"ofiara",
"obiad",
"wąski",
"zamek",
"moc",
"stosunek",
"natura",
"8",
"zazwyczaj",
"założyć",
"skrzydło",
"otrzymać",
"oficjalny",
"chmura",
"ten sam",
"złożyć",
"wewnętrzny",
"wspaniały",
"przyczyna",
"miły",
"dziki",
"kara",
"listopad",
"komórka",
"instytucja",
"skała",
"ogromny",
"wygląd",
"sześćdziesiąt",
"możliwy",
"wąż",
"umrzeć",
"określać",
"amerykański",
"płynąć",
"walczyć",
"nóż",
"nagle",
"instrument",
"20",
"rynek",
"Grecja",
"umowa",
"niedziela",
"szczęśliwy",
"tutaj",
"zmieniać",
"węgiel",
"sylaba",
"Warszawa",
"ładny",
"europejski",
"czwarty",
"styczeń",
"hiszpański",
"posługiwać się",
"papieros",
"fizyczny",
"dach",
"zimny",
"ogon",
"trawa",
"telefon",
"płyn",
"przedstawiać",
"metal",
"dlaczego",
"próbować",
"10",
"7",
"sportowy",
"oddział",
"obecnie",
"9",
"miara",
"prezydent",
"pierś",
"rodowity",
"stworzyć",
"dział",
"dusza",
"wierzyć",
"domowy",
"właściciel",
"wyrób",
"autobus",
"ponownie",
"gaz",
"właściwość",
"rada",
"rzymski",
"bieg",
"zgoda",
"obowiązek",
"owca",
"zamieszkiwać",
"przyjąć",
"muzyczny",
"przyrząd",
"piąty",
"szczególnie",
"kupować",
"istota",
"stracić",
"artykuł",
"ochrona",
"te",
"napisać",
"specjalista",
"ku",
"górski",
"należy",
"określenie",
"pomiędzy",
"Rzym",
"ssak",
"zwolennik",
"odpowiedź",
"działalność",
"miejski",
"wcześnie",
"zdobyć",
"górny",
"uniwersytet",
"bić",
"wymagać",
"miękki",
"źle",
"40",
"państwowy",
"ludność",
"minuta",
"cierpieć",
"ogół",
"naprawdę",
"blisko",
"surowy",
"dodatek",
"radość",
"akcja",
"w kształcie",
"polityka",
"obcy",
"ziemniak",
"podstawa",
"przemysł",
"udać się",
"brzuch",
"suchy",
"krzew",
"terytorium",
"wolność",
"czyli",
"klucz",
"Jan",
"kolejny",
"uczyć się",
"postępowanie",
"sok",
"50",
"łatwo",
"jeździć",
"decyzja",
"naukowy",
"szanowny",
"warzywo",
"nadzieja",
"wrzesień",
"kierować",
"student",
"kąt",
"seksualny",
"piasek",
"drewniany",
"obchodzić",
"wróg",
"przeciwko",
"żeński",
"potrafić",
"pamięć",
"teatr",
"dwudziesty",
"znowu",
"potrzebować",
"owad",
"cienki",
"ziarno",
"moment",
"wiosna",
"wydać",
"literatura",
"tradycyjny",
"leczenie",
"poważny",
"siedemdziesiąt",
"silnik",
"spokój",
"luty",
"biedny",
"czuć",
"drużyna",
"dialekt",
"dzięki",
"grudzień",
"jedyny",
"pragnienie",
"siedziba",
"służba",
"wiara",
"pióro",
"wzrost",
"proszę",
"osiemdziesiąt",
"społeczeństwo",
"dokładnie",
"przykład",
"szacunek",
"marzec",
"róg",
"połączyć",
"uderzenie",
"zwyczaj",
"podawać",
"mocno",
"zwykły",
"kolega",
"międzynarodowy",
"sala",
"nadawać",
"tamten",
"szósty",
"lekcja",
"pomagać",
"republika",
"zjeść",
"typowy",
"modlitwa",
"dół",
"dlatego",
"rasa",
"użycie",
"dziewięćdziesiąt",
"bok",
"zatoka",
"wiersz",
"Szwecja",
"japoński",
"gałąź",
"wrogi",
"przyjmować",
"więcej",
"łatwy",
"atak",
"wychodzić",
"wtedy",
"płyta",
"milion",
"padać",
"kanał",
"poniedziałek",
"wzór",
"twardy",
"podatek",
"rzucać",
"świeży",
"bilet",
"zakładać",
"złapać",
"przyszłość",
"przyjęcie",
"zewnętrzny",
"zamknąć",
"przynosić",
"obecny",
"strój",
"popularny",
"późno",
"płaski",
"struktura",
"pieniądze",
"projekt",
"doświadczenie",
"szyja",
"rozmawiać",
"literacki",
"okolica",
"mur",
"małżeństwo",
"bitwa",
"kwiecień",
"maj",
"specjalny",
"poruszać się",
"sąsiad",
"organ",
"pamiętać",
"uczyć",
"termin",
"bank",
"pusty",
"pół",
"wchodzić",
"czyn",
"Japonia",
"przeciw",
"wczesny",
"wejście",
"ciągle",
"bać się",
"Stany Zjednoczone",
"delikatny",
"wilk",
"kula",
"r.",
"wnętrze",
"prąd",
"sprzedawać",
"port",
"spokojny",
"waga",
"sztuczny",
"Polak",
"jajo",
"dym",
"pszczoła",
"technika",
"współczesny",
"widoczny",
"krok",
"próba",
"gęsty",
"miód",
"ciepło",
"mapa",
"kapelusz",
"otwór",
"lew",
"osioł",
"gwałtowny",
"siódmy",
"budować",
"los",
"telewizja",
"spowodować",
"dłoń",
"akt",
"mysz",
"jesień",
"składnik",
"słyszeć",
"zgodnie",
"zdrowy",
"masło",
"gwiazdozbiór",
"kino",
"podać",
"zmienić",
"przechodzić",
"fabryka",
"dość",
"daleko",
"z krwi i kości",
"płeć",
"chiński",
"sędzia",
"pokryć",
"lotniczy",
"nazwisko",
"bądź",
"Słowacja",
"umieć",
"majątek",
"ocena",
"pływać",
"komputerowy",
"dziać się",
"ósmy",
"autor",
"sierpień",
"łagodny",
"zakres",
"wybrać",
"następny",
"odległość",
"most",
"policjant",
"panować",
"zawód",
"zwrot",
"wybór",
"Chiny",
"internetowy",
"wytwarzać",
"lipiec",
"bohater",
"prasa",
"Czechy",
"80",
"fakt",
"piosenka",
"mąka",
"badać",
"sobota",
"piątek",
"znajdować",
"straszny",
"waluta",
"pojechać",
"otwierać",
"umiejętność",
"ślub",
"restauracja",
"przedsiębiorstwo",
"towarzystwo",
"model",
"no",
"prywatny",
"reakcja",
"okazja",
"porządek",
"opowiadać",
"przeciwnik",
"mięsień",
"zysk",
"sprzedaż",
"zabić",
"różnica",
"klasztor",
"osiągnąć",
"niebezpieczny",
"pisarz",
"wrócić",
"skład",
"m.in.",
"sprawiać",
"chrześcijański",
"zapomnieć",
"gniew",
"planeta",
"postawić",
"przejść",
"kurs",
"przygotować",
"dzielić",
"dzielnica",
"kierowca",
"własność",
"królowa",
"korzeń",
"artysta",
"stawiać",
"jakość",
"przyjemność",
"średni",
"ludowy",
"całkowity",
"Dania",
"biblioteka",
"dopiero",
"zero",
"gniazdo",
"pieśń",
"urzędnik",
"przestać",
"dziura",
"Anglia",
"mózg",
"liczny",
"uderzać",
"efekt",
"rozmiar",
"przyjemny",
"norma",
"pozwalać",
"rana",
"korzyść",
"tańczyć",
"kosztować",
"Słońce",
"podział",
"samica",
"przepis",
"hotel",
"rzadko",
"wykonanie",
"brzydki",
"otworzyć",
"armia",
"kiedyś",
"brązowy",
"rzeczywistość",
"prędkość",
"szef",
"ciecz",
"kaczka",
"szkolny",
"dokonywać",
"fałszywy",
"koszula",
"tyle",
"rzeczownik",
"złożony",
"zawodnik",
"-",
"tradycja",
"śniadanie",
"usługa",
"skończyć",
"Białoruś",
"znów",
"handel",
"mieć na imię",
"królestwo",
"jądro",
"powstawać",
"okrągły",
"spodnie",
"powoli",
"godny",
"jeżeli",
"ślad",
"przedstawienie",
"olej",
"jazda",
"dyskusja",
"wyrażenie",
"daleki",
"sądzić",
"Ameryka",
"tracić",
"znosić",
"profesor",
"świątynia",
"szary",
"piłka nożna",
"zboże",
"uderzyć",
"wola",
"srebro",
"dolina",
"w postaci",
"różowy",
"zamykać",
"wrogość",
"Indie",
"dziwny",
"czasem",
"temu",
"wtorek",
"oglądać",
"sport",
"małpa",
"spotkać",
"zdarzenie",
"wódka",
"wrażenie",
"kalendarz",
"pomysł",
"odczuwać",
"koszt",
"plemię",
"bydło",
"strumień",
"skutek",
"książę",
"całkiem",
"papież",
"dodawać",
"brudny",
"przyszły",
"mecz",
"scena",
"wolno",
"klient",
"opisać",
"szereg",
"ciąża",
"coraz",
"złodziej",
"Izrael",
"głód",
"otaczać",
"władca",
"transport",
"w formie",
"niebezpieczeństwo",
"słoneczny",
"figura",
"wszelki",
"wysiłek",
"kolano",
"niech",
"tłuszcz",
"zakończenie",
"mi",
"ksiądz",
"żelazo",
"łuk",
"mebel",
"Afganistan",
"nieszczęście",
"wskazywać",
"plaża",
"fragment",
"zaś",
"metr",
"kościelny",
"samochodowy",
"zachowywać się",
"obrona",
"danie",
"wierny",
"amharski",
"lista",
"żart",
"ogólny",
"kontrola",
"budzić",
"90",
"tłum",
"naj-",
"kontakt",
"czasownik",
"gotowy",
"Jezus",
"koza",
"zbiornik",
"obserwować",
"grób",
"stacja",
"robotnik",
"czerwiec",
"październik",
"konstrukcja",
"choć",
"wyjście",
"minerał",
"kosz",
"60",
"cebula",
"samiec",
"sos",
"zmarły",
"ojczyzna",
"bycie",
"szkoda",
"niszczyć",
"majuskuła",
"przejaw",
"zniszczyć",
"niedźwiedź",
"pokazywać",
"gospodarka",
"zbudować",
"dodatkowy",
"park",
"opłata",
"wysoko",
"Egipt",
"zegar",
"wujek",
"dawno",
"studia",
"cesarz",
"wizyta",
"przyprawa",
"łódź",
"powszechny",
"robota",
"metalowy",
"biec",
"dobro",
"dzisiejszy",
"obóz",
"żydowski",
"USA",
"Chrystus",
"oddawać",
"widok",
"marka",
"pojęcie",
"miecz",
"krzyż",
"tajemnica",
"chłop",
"Austria",
"lecieć",
"bezpieczeństwo",
"królewski",
"śmiech",
"postawa",
"sukces",
"zgodny",
"płaszcz",
"Turcja",
"przeszkoda",
"prostytutka",
"operacja",
"wywołać",
"narząd",
"futro",
"świeca",
"Australia",
"prawny",
"wciąż",
"Szwajcaria",
"powieść",
"gotować",
"szczególny",
"rozwiązanie",
"relacja",
"studiować",
"stado",
"w czasie",
"kontynent",
"przychodzić",
"lis",
"strefa",
"70",
"wypowiedź",
"dziewiąty",
"idea",
"kura",
"grunt",
"farba",
"wóz",
"epoka",
"lęk",
"smutny",
"kolejowy",
"dodać",
"uchodzić",
"przygotowywać",
"przynieść",
"umysł",
"suma",
"interes",
"produkować",
"Boże Narodzenie",
"wieża",
"handlowy",
"gdyby",
"Kraków",
"utrzymywać",
"urodziny",
"natychmiast",
"uciekać",
"chociaż",
"słoń",
"prezent",
"odwaga",
"ciężar",
"płacić",
"podłoga",
"atmosfera",
"wspólnota",
"zwycięstwo",
"treść",
"zainteresowanie",
"zamiast",
"tor",
"artystyczny",
"dwanaście",
"zdolny",
"pojedynczy",
"przejście",
"moralny",
"reguła",
"naukowiec",
"osobisty",
"mnóstwo",
"wybory",
"jedynie",
"wada",
"sygnał",
"wykonywanie",
"wybierać",
"umieszczać",
"mistrz",
"nagły",
"dno",
"pomarańczowy",
"telewizyjny",
"radio",
"przerwa",
"matematyka",
"klub",
"środa",
"muzeum",
"finansowy",
"malować",
"opieka",
"Żyd",
"ośrodek",
"krzesło",
"ukraiński",
"kolej",
"kłopot",
"ryż",
"cień",
"szwedzki",
"usuwać",
"katolicki",
"cierpienie",
"znaczny",
"umożliwiać",
"Rumunia",
"poznać",
"wynosić",
"pijany",
"zakończyć",
"intensywny",
"kostka",
"świadczyć",
"wydawać się",
"godność",
"Unia Europejska",
"orzeł",
"burza",
"chrześcijaństwo",
"błoto",
"biskup",
"gardło",
"szkło",
"polityk",
"umieścić",
"pozostać",
"czwartek",
"piętro",
"odkryć",
"powstanie",
"zakon",
"oddech",
"nastrój",
"teoria",
"doskonały",
"dolny",
"spadek",
"zawartość",
"zatrzymać",
"aktor",
"grzech",
"otrzymywać",
"anioł",
"szklanka",
"ciekawy",
"pomóc",
"pomidor",
"smutek",
"Wielka Brytania",
"pora",
"śmiać się",
"abugida",
"odcinek",
"nasiono",
"pokarm",
"zimno",
"wieczorem",
"wracać",
"azjatycki",
"wysłać",
"sprzęt",
"posiłek",
"ozdobny",
"impreza",
"potrzebny",
"znaczyć",
"łyżka",
"narkotyk",
"biuro",
"parlament",
"obywatelka",
"babka",
"zabawka",
"dorosły",
"ćwiczenie",
"ocean",
"nadmierny",
"niezwykły",
"bieda",
"użytkownik",
"polować",
"dyrektor",
"procent",
"ziemski",
"spór",
"żaba",
"starać się",
"w wyniku",
"pacjent",
"Litwa",
"wycieczka",
"istotny",
"lampa",
"mgła",
"Węgry",
"późny",
"dziewczynka",
"lina",
"w ciągu",
"mocz",
"motyl",
"półwysep",
"staw",
"przybyć",
"duński",
"nieprzyjemny",
"wakacje",
"przestępstwo",
"centralny",
"odzież",
"głośny",
"wysyłać",
"wina",
"pożar",
"pasek",
"przyjaźń",
"koncert",
"zarówno",
"turecki",
"na zewnątrz",
"kilometr",
"zapalenie",
"tani",
"pytać",
"św.",
"dane",
"poeta",
"łąka",
"trudność",
"ciotka",
"seks",
"bar",
"pasmo",
"zaraz",
"ubogi",
"po prostu",
"igła",
"cmentarz",
"dziób",
"róża",
"pozostawać",
"zawodowy",
"tablica",
"klimat",
"cisza",
"okropny",
"włosy",
"wzdłuż",
"medycyna",
"bawić się",
"wzrok",
"w.",
"bogini",
"wioska",
"letni",
"chyba",
"poczta",
"deska",
"hodować",
"wreszcie",
"przyjechać",
"filmowy",
"kończyć",
"psychiczny",
"uzyskać",
"rachunek",
"minister",
"dowód",
"lata",
"mrówka",
"radiowy",
"średniowieczny",
"mądry",
"przeprowadzać",
"kolacja",
"jakby",
"pragnąć",
"sądowy",
"ustawa",
"zaufanie",
"wojenny",
"obowiązywać",
"promień",
"Kościół",
"dać się",
"kult",
"traktować",
"czapka",
"ciągnąć",
"paliwo",
"—",
"diabeł",
"Holandia",
"broda",
"w końcu",
"powolny",
"muzyk",
"korzystać",
"sowa",
"dokładny",
"czoło",
"zając",
"na przykład",
"płakać",
"podnieść",
"wybuch",
"spaść",
"byk",
"budowla",
"zgromadzenie",
"odważny",
"czynnik",
"zeszły",
"wesoły",
"pająk",
"opuścić",
"ciemność",
"kij",
"pałac",
"archipelag",
"pojawiać się",
"panna",
"gęś",
"nauczycielka",
"zajęcie",
"trudno",
"pustynia",
"kieszeń",
"fotografia",
"tytoń",
"upadek",
"wyrok",
"istnienie",
"zanim",
"wyścig",
"chęć",
"świecić",
"częściowo",
"dokonać",
"żywność",
"sukienka",
"obrót",
"toponim",
"wpaść",
"podróżować",
"kolumna",
"rodzinny",
"poprzedni",
"Niemiec",
"pisanie",
"oddać",
"rzadki",
"bułgarski",
"otoczenie",
"kobiecy",
"kolorowy",
"kartka",
"urodzić się",
"piętnaście",
"uznawać",
"okręt",
"trzydziesty",
"wniosek",
"głupiec",
"strata",
"większy",
"podnosić",
"nocny",
"wywodzić się",
"filozofia",
"inaczej",
"Pan",
"ozdoba",
"uciec",
"martwy",
"hałas",
"lotnisko",
"tył",
"łaciński",
"położony",
"pełnić",
"kwestia",
"tarcza",
"0",
"skłonność",
"go",
"talerz",
"wygrać",
"Morze Śródziemne",
"minuskuła",
"szlachetny",
"poruszać",
"jadalny",
"jedenaście",
"nieść",
"szkodliwy",
"użyć",
"lot",
"wystawa",
"pokonać",
"przebywać",
"przeszłość",
"adres",
"wisieć",
"oś",
"zmęczony",
"katastrofa",
"zamiar",
"bogactwo",
"niechęć",
"poduszka",
"rak",
"jednocześnie",
"dziecięcy",
"wstyd",
"białoruski",
"rozpocząć",
"rzucić",
"ulegać",
"policzek",
"wzgórze",
"hasło",
"lustro",
"wkrótce",
"narodowość",
"pojawić się",
"skala",
"zapis",
"stowarzyszenie",
"zgadzać się",
"rezultat",
"oba",
"przecież",
"czeski",
"tłumaczyć",
"rysunek",
"kłaść",
"aktywny",
"gołąb",
"praktyka",
"okoliczność",
"trwały",
"oczekiwać",
"ryzyko",
"dostęp",
"wyłącznie",
"czekolada",
"oczywiście",
"dalej",
"dar",
"włożyć",
"zrozumieć",
"postępować",
"srebrny",
"doprowadzić",
"analiza",
"mierzyć",
"banknot",
"głupota",
"głupek",
"słowacki",
"plama",
"uśmiech",
"konflikt",
"gleba",
"gospodarczy",
"plecy",
"następować",
"zaburzenie",
"blady",
"spadać",
"plac",
"cichy",
"alkoholowy",
"pomarańcza",
"bajka",
"wprowadzać",
"żołądek",
"latać",
"niewolnik",
"rolnik",
"wspomnienie",
"zająć",
"nasienie",
"Belgia",
"wątpliwość",
"bezpośrednio",
"graniczyć",
"gorączka",
"bronić",
"rządzić",
"drapieżny",
"pojemnik",
"Piotr",
)
parts_of_speech: Dict[str, tuple] = {}
| Provider |
python | pypa__setuptools | setuptools/config/_validate_pyproject/error_reporting.py | {
"start": 5375,
"end": 11813
} | class ____:
_IGNORE = frozenset(("description", "default", "title", "examples"))
def __init__(self, jargon: Optional[Dict[str, str]] = None):
self.jargon: Dict[str, str] = jargon or {}
# Clarify confusing terms
self._terms = {
"anyOf": "at least one of the following",
"oneOf": "exactly one of the following",
"allOf": "all of the following",
"not": "(*NOT* the following)",
"prefixItems": f"{self._jargon('items')} (in order)",
"items": "items",
"contains": "contains at least one of",
"propertyNames": (
f"non-predefined acceptable {self._jargon('property names')}"
),
"patternProperties": f"{self._jargon('properties')} named via pattern",
"const": "predefined value",
"enum": "one of",
}
# Attributes that indicate that the definition is easy and can be done
# inline (e.g. string and number)
self._guess_inline_defs = [
"enum",
"const",
"maxLength",
"minLength",
"pattern",
"format",
"minimum",
"maximum",
"exclusiveMinimum",
"exclusiveMaximum",
"multipleOf",
]
def _jargon(self, term: Union[str, List[str]]) -> Union[str, List[str]]:
if isinstance(term, list):
return [self.jargon.get(t, t) for t in term]
return self.jargon.get(term, term)
def __call__(
self,
schema: Union[dict, List[dict]],
prefix: str = "",
*,
_path: Sequence[str] = (),
) -> str:
if isinstance(schema, list):
return self._handle_list(schema, prefix, _path)
filtered = self._filter_unecessary(schema, _path)
simple = self._handle_simple_dict(filtered, _path)
if simple:
return f"{prefix}{simple}"
child_prefix = self._child_prefix(prefix, " ")
item_prefix = self._child_prefix(prefix, "- ")
indent = len(prefix) * " "
with io.StringIO() as buffer:
for i, (key, value) in enumerate(filtered.items()):
child_path = [*_path, key]
line_prefix = prefix if i == 0 else indent
buffer.write(f"{line_prefix}{self._label(child_path)}:")
# ^ just the first item should receive the complete prefix
if isinstance(value, dict):
filtered = self._filter_unecessary(value, child_path)
simple = self._handle_simple_dict(filtered, child_path)
buffer.write(
f" {simple}"
if simple
else f"\n{self(value, child_prefix, _path=child_path)}"
)
elif isinstance(value, list) and (
key != "type" or self._is_property(child_path)
):
children = self._handle_list(value, item_prefix, child_path)
sep = " " if children.startswith("[") else "\n"
buffer.write(f"{sep}{children}")
else:
buffer.write(f" {self._value(value, child_path)}\n")
return buffer.getvalue()
def _is_unecessary(self, path: Sequence[str]) -> bool:
if self._is_property(path) or not path: # empty path => instruction @ root
return False
key = path[-1]
return any(key.startswith(k) for k in "$_") or key in self._IGNORE
def _filter_unecessary(
self, schema: Dict[str, Any], path: Sequence[str]
) -> Dict[str, Any]:
return {
key: value
for key, value in schema.items()
if not self._is_unecessary([*path, key])
}
def _handle_simple_dict(self, value: dict, path: Sequence[str]) -> Optional[str]:
inline = any(p in value for p in self._guess_inline_defs)
simple = not any(isinstance(v, (list, dict)) for v in value.values())
if inline or simple:
return f"{{{', '.join(self._inline_attrs(value, path))}}}\n"
return None
def _handle_list(
self, schemas: list, prefix: str = "", path: Sequence[str] = ()
) -> str:
if self._is_unecessary(path):
return ""
repr_ = repr(schemas)
if all(not isinstance(e, (dict, list)) for e in schemas) and len(repr_) < 60:
return f"{repr_}\n"
item_prefix = self._child_prefix(prefix, "- ")
return "".join(
self(v, item_prefix, _path=[*path, f"[{i}]"]) for i, v in enumerate(schemas)
)
def _is_property(self, path: Sequence[str]) -> bool:
"""Check if the given path can correspond to an arbitrarily named property"""
counter = 0
for key in path[-2::-1]:
if key not in {"properties", "patternProperties"}:
break
counter += 1
# If the counter if even, the path correspond to a JSON Schema keyword
# otherwise it can be any arbitrary string naming a property
return counter % 2 == 1
def _label(self, path: Sequence[str]) -> str:
*parents, key = path
if not self._is_property(path):
norm_key = _separate_terms(key)
return self._terms.get(key) or " ".join(self._jargon(norm_key))
if parents[-1] == "patternProperties":
return f"(regex {key!r})"
return repr(key) # property name
def _value(self, value: Any, path: Sequence[str]) -> str:
if path[-1] == "type" and not self._is_property(path):
type_ = self._jargon(value)
return f"[{', '.join(type_)}]" if isinstance(type_, list) else type_
return repr(value)
def _inline_attrs(self, schema: dict, path: Sequence[str]) -> Iterator[str]:
for key, value in schema.items():
child_path = [*path, key]
yield f"{self._label(child_path)}: {self._value(value, child_path)}"
def _child_prefix(self, parent_prefix: str, child_prefix: str) -> str:
return len(parent_prefix) * " " + child_prefix
def _separate_terms(word: str) -> List[str]:
"""
>>> _separate_terms("FooBar-foo")
['foo', 'bar', 'foo']
"""
return [w.lower() for w in _CAMEL_CASE_SPLITTER.split(word) if w]
| _SummaryWriter |
python | mlflow__mlflow | mlflow/server/auth/permissions.py | {
"start": 148,
"end": 1267
} | class ____:
name: str
can_read: bool
can_update: bool
can_delete: bool
can_manage: bool
READ = Permission(
name="READ",
can_read=True,
can_update=False,
can_delete=False,
can_manage=False,
)
EDIT = Permission(
name="EDIT",
can_read=True,
can_update=True,
can_delete=False,
can_manage=False,
)
MANAGE = Permission(
name="MANAGE",
can_read=True,
can_update=True,
can_delete=True,
can_manage=True,
)
NO_PERMISSIONS = Permission(
name="NO_PERMISSIONS",
can_read=False,
can_update=False,
can_delete=False,
can_manage=False,
)
ALL_PERMISSIONS = {
READ.name: READ,
EDIT.name: EDIT,
MANAGE.name: MANAGE,
NO_PERMISSIONS.name: NO_PERMISSIONS,
}
def get_permission(permission: str) -> Permission:
return ALL_PERMISSIONS[permission]
def _validate_permission(permission: str):
if permission not in ALL_PERMISSIONS:
raise MlflowException(
f"Invalid permission '{permission}'. Valid permissions are: {tuple(ALL_PERMISSIONS)}",
INVALID_PARAMETER_VALUE,
)
| Permission |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_estimator_checks.py | {
"start": 17048,
"end": 34954
} | class ____(BaseEstimator):
"""Make different predictions when using Numpy and the Array API"""
def fit(self, X, y):
return self
def predict(self, X):
enabled = get_config()["array_api_dispatch"]
xp, _ = _array_api.get_namespace(X)
if enabled:
return xp.asarray([1, 2, 3])
else:
return np.array([3, 2, 1])
def test_check_array_api_input():
try:
importlib.import_module("array_api_strict")
except ModuleNotFoundError: # pragma: nocover
raise SkipTest("array-api-strict is required to run this test")
with raises(AssertionError, match="Not equal to tolerance"):
check_array_api_input(
"BrokenArrayAPI",
BrokenArrayAPI(),
array_namespace="array_api_strict",
check_values=True,
)
def test_not_an_array_array_function():
not_array = _NotAnArray(np.ones(10))
msg = "Don't want to call array_function sum!"
with raises(TypeError, match=msg):
np.sum(not_array)
# always returns True
assert np.may_share_memory(not_array, None)
def test_check_fit_score_takes_y_works_on_deprecated_fit():
# Tests that check_fit_score_takes_y works on a class with
# a deprecated fit method
class TestEstimatorWithDeprecatedFitMethod(BaseEstimator):
@deprecated("Deprecated for the purpose of testing check_fit_score_takes_y")
def fit(self, X, y):
return self
check_fit_score_takes_y("test", TestEstimatorWithDeprecatedFitMethod())
def test_check_estimator_with_class_removed():
"""Test that passing a class instead of an instance fails."""
msg = "Passing a class was deprecated"
with raises(TypeError, match=msg):
check_estimator(LogisticRegression)
def test_mutable_default_params():
"""Test that constructor cannot have mutable default parameters."""
msg = (
"Parameter 'p' of estimator 'HasMutableParameters' is of type "
"object which is not allowed"
)
# check that the "default_constructible" test checks for mutable parameters
check_parameters_default_constructible(
"Immutable", HasImmutableParameters()
) # should pass
with raises(AssertionError, match=msg):
check_parameters_default_constructible("Mutable", HasMutableParameters())
@_mark_thread_unsafe_if_pytest_imported
def test_check_set_params():
"""Check set_params doesn't fail and sets the right values."""
# check that values returned by get_params match set_params
msg = "get_params result does not match what was passed to set_params"
with raises(AssertionError, match=msg):
check_set_params("test", ModifiesValueInsteadOfRaisingError())
with warnings.catch_warnings(record=True) as records:
check_set_params("test", RaisesErrorInSetParams())
assert UserWarning in [rec.category for rec in records]
with raises(AssertionError, match=msg):
check_set_params("test", ModifiesAnotherValue())
def test_check_estimators_nan_inf():
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator NoCheckinPredict doesn't check for NaN and inf in predict"
with raises(AssertionError, match=msg):
check_estimators_nan_inf("NoCheckinPredict", NoCheckinPredict())
def test_check_dict_unchanged():
# check that estimator state does not change
# at transform/predict/predict_proba time
msg = "Estimator changes __dict__ during predict"
with raises(AssertionError, match=msg):
check_dict_unchanged("test", ChangesDict())
def test_check_sample_weights_pandas_series():
# check that sample_weights in fit accepts pandas.Series type
try:
from pandas import Series # noqa: F401
msg = (
"Estimator NoSampleWeightPandasSeriesType raises error if "
"'sample_weight' parameter is of type pandas.Series"
)
with raises(ValueError, match=msg):
check_sample_weights_pandas_series(
"NoSampleWeightPandasSeriesType", NoSampleWeightPandasSeriesType()
)
except ImportError:
pass
def test_check_estimators_overwrite_params():
# check that `fit` only changes attributes that
# are private (start with an _ or end with a _).
msg = (
"Estimator ChangesWrongAttribute should not change or mutate "
"the parameter wrong_attribute from 0 to 1 during fit."
)
with raises(AssertionError, match=msg):
check_estimators_overwrite_params(
"ChangesWrongAttribute", ChangesWrongAttribute()
)
check_estimators_overwrite_params("test", ChangesUnderscoreAttribute())
def test_check_dont_overwrite_parameters():
# check that `fit` doesn't add any public attribute
msg = (
r"Estimator adds public attribute\(s\) during the fit method."
" Estimators are only allowed to add private attributes"
" either started with _ or ended"
" with _ but wrong_attribute added"
)
with raises(AssertionError, match=msg):
check_dont_overwrite_parameters("test", SetsWrongAttribute())
def test_check_methods_sample_order_invariance():
# check for sample order invariance
name = NotInvariantSampleOrder.__name__
method = "predict"
msg = (
"{method} of {name} is not invariant when applied to a dataset"
"with different sample order."
).format(method=method, name=name)
with raises(AssertionError, match=msg):
check_methods_sample_order_invariance(
"NotInvariantSampleOrder", NotInvariantSampleOrder()
)
def test_check_methods_subset_invariance():
# check for invariant method
name = NotInvariantPredict.__name__
method = "predict"
msg = ("{method} of {name} is not invariant when applied to a subset.").format(
method=method, name=name
)
with raises(AssertionError, match=msg):
check_methods_subset_invariance("NotInvariantPredict", NotInvariantPredict())
def test_check_estimator_sparse_data():
# check for sparse data input handling
name = NoSparseClassifier.__name__
msg = "Estimator %s doesn't seem to fail gracefully on sparse data" % name
with raises(AssertionError, match=msg):
check_estimator_sparse_matrix(name, NoSparseClassifier("sparse_matrix"))
if SPARRAY_PRESENT:
with raises(AssertionError, match=msg):
check_estimator_sparse_array(name, NoSparseClassifier("sparse_array"))
# Large indices test on bad estimator
msg = (
"Estimator LargeSparseNotSupportedClassifier doesn't seem to "
r"support \S{3}_64 matrix, and is not failing gracefully.*"
)
with raises(AssertionError, match=msg):
check_estimator_sparse_matrix(
"LargeSparseNotSupportedClassifier",
LargeSparseNotSupportedClassifier("sparse_matrix"),
)
if SPARRAY_PRESENT:
with raises(AssertionError, match=msg):
check_estimator_sparse_array(
"LargeSparseNotSupportedClassifier",
LargeSparseNotSupportedClassifier("sparse_array"),
)
def test_check_classifiers_one_label_sample_weights():
# check for classifiers reducing to less than two classes via sample weights
name = OneClassSampleErrorClassifier.__name__
msg = (
f"{name} failed when fitted on one label after sample_weight "
"trimming. Error message is not explicit, it should have "
"'class'."
)
with raises(AssertionError, match=msg):
check_classifiers_one_label_sample_weights(
"OneClassSampleErrorClassifier", OneClassSampleErrorClassifier()
)
def test_check_estimator_not_fail_fast():
"""Check the contents of the results returned with on_fail!="raise".
This results should contain details about the observed failures, expected
or not.
"""
check_results = check_estimator(BaseEstimator(), on_fail=None)
assert isinstance(check_results, list)
assert len(check_results) > 0
assert all(
isinstance(item, dict)
and set(item.keys())
== {
"estimator",
"check_name",
"exception",
"status",
"expected_to_fail",
"expected_to_fail_reason",
}
for item in check_results
)
# Some tests are expected to fail, some are expected to pass.
assert any(item["status"] == "failed" for item in check_results)
assert any(item["status"] == "passed" for item in check_results)
# Some estimator checks rely on warnings in deep functions calls. This is not
# automatically detected by pytest-run-parallel shallow AST inspection, so we
# need to mark the test function as thread-unsafe.
@_mark_thread_unsafe_if_pytest_imported
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a fit method
msg = "object has no attribute 'fit'"
with raises(AttributeError, match=msg):
check_estimator(BaseEstimator())
# does error on binary_only untagged estimator
msg = "Only 2 classes are supported"
with raises(ValueError, match=msg):
check_estimator(UntaggedBinaryClassifier())
for csr_container in CSR_CONTAINERS:
# non-regression test for estimators transforming to sparse data
check_estimator(SparseTransformer(sparse_container=csr_container))
# doesn't error on actual estimator
check_estimator(LogisticRegression())
check_estimator(LogisticRegression(C=0.01))
check_estimator(MultiTaskElasticNet())
# doesn't error on binary_only tagged estimator
check_estimator(TaggedBinaryClassifier())
check_estimator(RequiresPositiveXRegressor())
# Check regressor with requires_positive_y estimator tag
msg = "negative y values not supported!"
with raises(ValueError, match=msg):
check_estimator(RequiresPositiveYRegressor())
# Does not raise error on classifier with poor_score tag
check_estimator(PoorScoreLogisticRegression())
def test_check_outlier_corruption():
# should raise AssertionError
decision = np.array([0.0, 1.0, 1.5, 2.0])
with raises(AssertionError):
check_outlier_corruption(1, 2, decision)
# should pass
decision = np.array([0.0, 1.0, 1.0, 2.0])
check_outlier_corruption(1, 2, decision)
def test_check_estimator_sparse_tag():
"""Test that check_estimator_sparse_tag raises error when sparse tag is
misaligned."""
class EstimatorWithSparseConfig(BaseEstimator):
def __init__(self, tag_sparse, accept_sparse, fit_error=None):
self.tag_sparse = tag_sparse
self.accept_sparse = accept_sparse
self.fit_error = fit_error
def fit(self, X, y=None):
if self.fit_error:
raise self.fit_error
validate_data(self, X, y, accept_sparse=self.accept_sparse)
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = self.tag_sparse
return tags
test_cases = [
{"tag_sparse": True, "accept_sparse": True, "error_type": None},
{"tag_sparse": False, "accept_sparse": False, "error_type": None},
{"tag_sparse": False, "accept_sparse": True, "error_type": AssertionError},
{"tag_sparse": True, "accept_sparse": False, "error_type": AssertionError},
]
for test_case in test_cases:
estimator = EstimatorWithSparseConfig(
test_case["tag_sparse"],
test_case["accept_sparse"],
)
if test_case["error_type"] is None:
check_estimator_sparse_tag(estimator.__class__.__name__, estimator)
else:
with raises(test_case["error_type"]):
check_estimator_sparse_tag(estimator.__class__.__name__, estimator)
# estimator `tag_sparse=accept_sparse=False` fails on sparse data
# but does not raise the appropriate error
for fit_error in [TypeError("unexpected error"), KeyError("other error")]:
estimator = EstimatorWithSparseConfig(False, False, fit_error)
with raises(AssertionError):
check_estimator_sparse_tag(estimator.__class__.__name__, estimator)
def test_check_estimator_transformer_no_mixin():
# check that TransformerMixin is not required for transformer tests to run
# but it fails since the tag is not set
with raises(RuntimeError, "the `transformer_tags` tag is not set"):
check_estimator(BadTransformerWithoutMixin())
def test_check_estimator_clones():
# check that check_estimator doesn't modify the estimator it receives
iris = load_iris()
for Estimator in [
GaussianMixture,
LinearRegression,
SGDClassifier,
PCA,
MiniBatchKMeans,
]:
# without fitting
with ignore_warnings(category=ConvergenceWarning):
est = Estimator()
set_random_state(est)
old_hash = joblib.hash(est)
check_estimator(
est, expected_failed_checks=_get_expected_failed_checks(est)
)
assert old_hash == joblib.hash(est)
# with fitting
with ignore_warnings(category=ConvergenceWarning):
est = Estimator()
set_random_state(est)
est.fit(iris.data, iris.target)
old_hash = joblib.hash(est)
check_estimator(
est, expected_failed_checks=_get_expected_failed_checks(est)
)
assert old_hash == joblib.hash(est)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "Estimator should raise a NotFittedError when calling"
with raises(AssertionError, match=msg):
check_estimators_unfitted("estimator", NoSparseClassifier())
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier())
def test_check_no_attributes_set_in_init():
class NonConformantEstimatorPrivateSet(BaseEstimator):
def __init__(self):
self.you_should_not_set_this_ = None
class NonConformantEstimatorNoParamSet(BaseEstimator):
def __init__(self, you_should_set_this_=None):
pass
class ConformantEstimatorClassAttribute(BaseEstimator):
# making sure our __metadata_request__* class attributes are okay!
__metadata_request__fit = {"foo": True}
def fit(self, X, y=None):
return self # pragma: no cover
msg = (
"Estimator estimator_name should not set any"
" attribute apart from parameters during init."
r" Found attributes \['you_should_not_set_this_'\]."
)
with raises(AssertionError, match=msg):
check_no_attributes_set_in_init(
"estimator_name", NonConformantEstimatorPrivateSet()
)
msg = (
"Estimator estimator_name should store all parameters as an attribute"
" during init"
)
with raises(AttributeError, match=msg):
check_no_attributes_set_in_init(
"estimator_name", NonConformantEstimatorNoParamSet()
)
# a private class attribute is okay!
check_no_attributes_set_in_init(
"estimator_name", ConformantEstimatorClassAttribute()
)
# also check if cloning an estimator which has non-default set requests is
# fine. Setting a non-default value via `set_{method}_request` sets the
# private _metadata_request instance attribute which is copied in `clone`.
with config_context(enable_metadata_routing=True):
check_no_attributes_set_in_init(
"estimator_name",
ConformantEstimatorClassAttribute().set_fit_request(foo=True),
)
# Some estimator checks rely on warnings in deep functions calls. This is not
# automatically detected by pytest-run-parallel shallow AST inspection, so we
# need to mark the test function as thread-unsafe.
@_mark_thread_unsafe_if_pytest_imported
def test_check_estimator_pairwise():
# check that check_estimator() works on estimator with _pairwise
# kernel or metric
# test precomputed kernel
est = SVC(kernel="precomputed")
check_estimator(est)
# test precomputed metric
est = KNeighborsRegressor(metric="precomputed")
check_estimator(est, expected_failed_checks=_get_expected_failed_checks(est))
def test_check_classifier_data_not_an_array():
with raises(AssertionError, match="Not equal to tolerance"):
check_classifier_data_not_an_array(
"estimator_name", EstimatorInconsistentForPandas()
)
def test_check_regressor_data_not_an_array():
with raises(AssertionError, match="Not equal to tolerance"):
check_regressor_data_not_an_array(
"estimator_name", EstimatorInconsistentForPandas()
)
def test_check_dataframe_column_names_consistency():
err_msg = "Estimator does not have a feature_names_in_"
with raises(ValueError, match=err_msg):
check_dataframe_column_names_consistency("estimator_name", BaseBadClassifier())
check_dataframe_column_names_consistency("estimator_name", PartialFitChecksName())
lr = LogisticRegression()
check_dataframe_column_names_consistency(lr.__class__.__name__, lr)
lr.__doc__ = "Docstring that does not document the estimator's attributes"
err_msg = (
"Estimator LogisticRegression does not document its feature_names_in_ attribute"
)
with raises(ValueError, match=err_msg):
check_dataframe_column_names_consistency(lr.__class__.__name__, lr)
| BrokenArrayAPI |
python | zostera__django-bootstrap4 | example/app/views.py | {
"start": 943,
"end": 1043
} | class ____(FormView):
template_name = "app/form.html"
form_class = ContactForm
| DefaultFormView |
python | huggingface__transformers | src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py | {
"start": 22650,
"end": 23352
} | class ____(PreTrainedModel):
config_class = XLMRobertaXLConfig
base_model_prefix = "roberta"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": XLMRobertaXLLayer,
"attentions": XLMRobertaXLSelfAttention,
"cross_attentions": XLMRobertaXLCrossAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, XLMRobertaXLLMHead):
init.zeros_(module.bias)
| XLMRobertaXLPreTrainedModel |
python | google__flatbuffers | python/flatbuffers/number_types.py | {
"start": 1700,
"end": 1846
} | class ____(object):
bytewidth = 1
min_val = -(2**7)
max_val = (2**7) - 1
py_type = int
name = "int8"
packer_type = packer.int8
| Int8Flags |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/client.py | {
"start": 285,
"end": 5986
} | class ____:
def __init__(
self,
api_key: str,
api_base: Optional[str] = API_BASE,
timeout: Optional[float] = None,
max_retries: Optional[int] = 10,
):
self.api_key = api_key
self.api_base = api_base
self.timeout = timeout
self.max_retries = max_retries
def _get_headers(self) -> Dict[str, str]:
return {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
"User-Agent": USER_AGENT,
}
def request(self, endpoint: str, payload: Dict[str, Any]) -> Dict[str, Any]:
"""
Perform a synchronous request to the DeepInfra API.
Args:
endpoint (str): The API endpoint to send the request to.
payload (Dict[str, Any]): The request payload.
Returns:
Dict[str, Any]: The API response.
"""
def perform_request():
response = requests.post(
self.get_url(endpoint),
json={
**payload,
"stream": False,
},
headers=self._get_headers(),
timeout=self.timeout,
)
response.raise_for_status()
return response.json()
return retry_request(perform_request, max_retries=self.max_retries)
def request_stream(
self, endpoint: str, payload: Dict[str, Any]
) -> Generator[str, None, None]:
"""
Perform a synchronous streaming request to the DeepInfra API.
Args:
endpoint (str): The API endpoint to send the request to.
payload (Dict[str, Any]): The request payload.
Yields:
str: The streaming response from the API.
"""
def perform_request():
response = requests.post(
self.get_url(endpoint),
json={
**payload,
"stream": True,
},
headers=self._get_headers(),
stream=True,
timeout=self.timeout,
)
response.raise_for_status()
for line in response.iter_lines():
if resp := maybe_decode_sse_data(line):
yield resp
response = retry_request(perform_request, max_retries=self.max_retries)
yield from response
async def arequest(self, endpoint: str, payload: Dict[str, Any]) -> Dict[str, Any]:
"""
Perform an asynchronous request to the DeepInfra API.
Args:
endpoint (str): The API endpoint to send the request to.
payload (Dict[str, Any]): The request payload.
Returns:
Dict[str, Any]: The API response.
"""
async def perform_request():
async with aiohttp.ClientSession() as session:
async with session.post(
self.get_url(endpoint),
json={
**payload,
"stream": False,
},
headers=self._get_headers(),
timeout=self.timeout,
) as response:
response.raise_for_status()
return await response.json()
return await aretry_request(perform_request, max_retries=self.max_retries)
async def arequest_stream(
self, endpoint: str, payload: Dict[str, Any]
) -> AsyncGenerator[str, None]:
"""
Perform an asynchronous streaming request to the DeepInfra API.
Args:
endpoint (str): The API endpoint to send the request to.
payload (Dict[str, Any]): The request payload.
Yields:
str: The streaming response from the API.
"""
async def perform_request():
async with aiohttp.ClientSession() as session:
async with session.post(
self.get_url(endpoint),
json={
**payload,
"stream": True,
},
headers=self._get_headers(),
timeout=self.timeout,
) as response:
response.raise_for_status()
async for line in response.content:
if resp := maybe_decode_sse_data(line):
yield resp
response = await aretry_request(perform_request, max_retries=self.max_retries)
async for resp in response:
yield resp
def get_url(self, endpoint: str) -> str:
"""
Get DeepInfra API URL.
"""
return f"{self.api_base}/{endpoint}"
def get_model_details(self, model_name: str) -> requests.Response:
"""
Get model details from DeepInfra API.
If the model does not exist, a 404 response is returned.
Returns:
requests.Response: The API response.
"""
request_url = self.get_url(f"models/{model_name}")
return requests.get(request_url, headers=self._get_headers())
def is_function_calling_model(self, model_name: str) -> bool:
"""
Check if the model is a function calling model.
Returns:
bool: True if the model is a function calling model, False otherwise.
"""
response = self.get_model_details(model_name)
if response.status_code == 404:
return False
response_json = response.json()
tags = response_json.get("tags", [])
return "tools" in tags
| DeepInfraClient |
python | bokeh__bokeh | src/bokeh/util/options.py | {
"start": 1344,
"end": 3028
} | class ____(HasProps, Local):
''' Leverage the Bokeh properties type system for specifying and
validating configuration options.
Subclasses of ``Options`` specify a set of configuration options
using standard Bokeh properties:
.. code-block:: python
class ConnectOpts(Options):
host = String(default="127.0.0.1", help="a host value")
port = Int(default=5590, help="a port value")
Then a ``ConnectOpts`` can be created by passing a dictionary
containing keys and values corresponding to the configuration options,
as well as any additional keys and values. The items corresponding
to the properties on ``ConnectOpts`` will be **removed** from the
dictionary. This can be useful for functions that accept their own
set of config keyword arguments in addition to some set of Bokeh model
properties.
'''
def __init__(self, kw: dict[str, Any]) -> None:
# remove any items that match our declared properties
props: dict[str, Any] = {}
for k in self.properties():
if k in kw:
props[k] = kw.pop(k)
super().__init__(**props)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Options |
python | Lightning-AI__lightning | tests/tests_pytorch/plugins/test_checkpoint_io_plugin.py | {
"start": 1133,
"end": 6522
} | class ____(CheckpointIO):
def save_checkpoint(self, checkpoint: dict[str, Any], path: _PATH, storage_options: Optional[Any] = None) -> None:
torch.save(checkpoint, path)
def load_checkpoint(
self, path: _PATH, storage_options: Optional[Any] = None, weights_only: bool = True
) -> dict[str, Any]:
return torch.load(path, weights_only=True)
def remove_checkpoint(self, path: _PATH) -> None:
os.remove(path)
def test_checkpoint_plugin_called(tmp_path):
"""Ensure that the custom checkpoint IO plugin and torch checkpoint IO plugin is called when saving/loading."""
checkpoint_plugin = CustomCheckpointIO()
checkpoint_plugin = MagicMock(wraps=checkpoint_plugin, spec=CustomCheckpointIO)
ck = ModelCheckpoint(dirpath=tmp_path, save_last=True)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmp_path,
accelerator="cpu",
strategy=SingleDeviceStrategy("cpu", checkpoint_io=checkpoint_plugin),
callbacks=ck,
max_epochs=2,
limit_train_batches=1,
limit_val_batches=0,
limit_test_batches=1,
)
trainer.fit(model)
ckpt_files = {fn.name for fn in Path(tmp_path).glob("*.ckpt")}
assert ckpt_files == {"epoch=1-step=2.ckpt", "last.ckpt"}
assert trainer.checkpoint_callback.best_model_path == str(tmp_path / "epoch=1-step=2.ckpt")
assert trainer.checkpoint_callback.last_model_path == str(tmp_path / "last.ckpt")
assert checkpoint_plugin.save_checkpoint.call_count == 4
assert checkpoint_plugin.remove_checkpoint.call_count == 1
trainer.test(model, ckpt_path=ck.last_model_path)
checkpoint_plugin.load_checkpoint.assert_called_with(str(tmp_path / "last.ckpt"), weights_only=None)
checkpoint_plugin.reset_mock()
ck = ModelCheckpoint(dirpath=tmp_path, save_last=True)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmp_path,
accelerator="cpu",
strategy=SingleDeviceStrategy("cpu"),
plugins=[checkpoint_plugin],
callbacks=ck,
max_epochs=2,
limit_train_batches=1,
limit_val_batches=0,
limit_test_batches=1,
)
trainer.fit(model)
ckpt_files = {fn.name for fn in Path(tmp_path).glob("*.ckpt")}
assert ckpt_files == {"epoch=1-step=2.ckpt", "last.ckpt", "epoch=1-step=2-v1.ckpt", "last-v1.ckpt"}
assert trainer.checkpoint_callback.best_model_path == str(tmp_path / "epoch=1-step=2-v1.ckpt")
assert trainer.checkpoint_callback.last_model_path == str(tmp_path / "last-v1.ckpt")
assert checkpoint_plugin.save_checkpoint.call_count == 4
assert checkpoint_plugin.remove_checkpoint.call_count == 1
trainer.test(model, ckpt_path=ck.last_model_path)
checkpoint_plugin.load_checkpoint.assert_called_once()
checkpoint_plugin.load_checkpoint.assert_called_with(str(tmp_path / "last-v1.ckpt"), weights_only=None)
@pytest.mark.flaky(reruns=3)
def test_async_checkpoint_plugin(tmp_path):
"""Ensure that the custom checkpoint IO plugin and torch checkpoint IO plugin is called when async saving and
loading."""
checkpoint_plugin = AsyncCheckpointIO()
checkpoint_plugin.save_checkpoint = Mock(wraps=checkpoint_plugin.save_checkpoint)
checkpoint_plugin.remove_checkpoint = Mock(wraps=checkpoint_plugin.remove_checkpoint)
class CustomBoringModel(BoringModel):
def on_fit_start(self):
base_ckpt_io = self.trainer.strategy.checkpoint_io.checkpoint_io
base_ckpt_io.save_checkpoint = Mock(wraps=base_ckpt_io.save_checkpoint)
base_ckpt_io.remove_checkpoint = Mock(wraps=base_ckpt_io.remove_checkpoint)
ck = ModelCheckpoint(dirpath=tmp_path, save_top_k=2, monitor="step", mode="max")
model = CustomBoringModel()
trainer = Trainer(
default_root_dir=tmp_path,
plugins=[checkpoint_plugin],
callbacks=ck,
max_epochs=3,
limit_train_batches=1,
limit_val_batches=0,
enable_progress_bar=False,
enable_model_summary=False,
)
# We add a validate step to test that async works when fit or validate is called multiple times.
trainer.validate(model)
trainer.fit(model)
assert checkpoint_plugin.save_checkpoint.call_count == 3
assert checkpoint_plugin.remove_checkpoint.call_count == 1
base_ckpt_io = trainer.strategy.checkpoint_io.checkpoint_io
assert base_ckpt_io.save_checkpoint.call_count == 3
assert base_ckpt_io.remove_checkpoint.call_count == 1
def test_multi_wrapped_checkpoint_io_initialization():
base_ckpt_io = TorchCheckpointIO()
wrap_ckpt = AsyncCheckpointIO(base_ckpt_io)
ckpt_io = AsyncCheckpointIO(wrap_ckpt)
assert ckpt_io.checkpoint_io is wrap_ckpt
assert ckpt_io.checkpoint_io.checkpoint_io is base_ckpt_io
assert ckpt_io._base_checkpoint_io_configured is True
assert ckpt_io.checkpoint_io._base_checkpoint_io_configured is True
wrap_ckpt = AsyncCheckpointIO()
ckpt_io = AsyncCheckpointIO(wrap_ckpt)
trainer = Trainer(accelerator="cpu", plugins=[ckpt_io])
trainer.strategy.checkpoint_io
assert ckpt_io.checkpoint_io is wrap_ckpt
assert isinstance(ckpt_io.checkpoint_io.checkpoint_io, TorchCheckpointIO)
assert ckpt_io._base_checkpoint_io_configured is True
assert ckpt_io.checkpoint_io._base_checkpoint_io_configured is True
| CustomCheckpointIO |
python | getsentry__sentry | src/sentry/pipeline/base.py | {
"start": 977,
"end": 1048
} | class ____(Protocol):
@property
def key(self) -> str: ...
| _HasKey |
python | redis__redis-py | redis/connection.py | {
"start": 71595,
"end": 85376
} | class ____:
"""
Abstract class for handling maintenance notifications logic.
This class is mixed into the ConnectionPool classes.
This class is not intended to be used directly!
All logic related to maintenance notifications and
connection pool handling is encapsulated in this class.
"""
def __init__(
self,
maint_notifications_config: Optional[MaintNotificationsConfig] = None,
**kwargs,
):
# Initialize maintenance notifications
is_protocol_supported = kwargs.get("protocol") in [3, "3"]
if maint_notifications_config is None and is_protocol_supported:
maint_notifications_config = MaintNotificationsConfig()
if maint_notifications_config and maint_notifications_config.enabled:
if not is_protocol_supported:
raise RedisError(
"Maintenance notifications handlers on connection are only supported with RESP version 3"
)
self._maint_notifications_pool_handler = MaintNotificationsPoolHandler(
self, maint_notifications_config
)
self._update_connection_kwargs_for_maint_notifications(
self._maint_notifications_pool_handler
)
else:
self._maint_notifications_pool_handler = None
@property
@abstractmethod
def connection_kwargs(self) -> Dict[str, Any]:
pass
@connection_kwargs.setter
@abstractmethod
def connection_kwargs(self, value: Dict[str, Any]):
pass
@abstractmethod
def _get_pool_lock(self) -> threading.RLock:
pass
@abstractmethod
def _get_free_connections(self) -> Iterable["MaintNotificationsAbstractConnection"]:
pass
@abstractmethod
def _get_in_use_connections(
self,
) -> Iterable["MaintNotificationsAbstractConnection"]:
pass
def maint_notifications_enabled(self):
"""
Returns:
True if the maintenance notifications are enabled, False otherwise.
The maintenance notifications config is stored in the pool handler.
If the pool handler is not set, the maintenance notifications are not enabled.
"""
maint_notifications_config = (
self._maint_notifications_pool_handler.config
if self._maint_notifications_pool_handler
else None
)
return maint_notifications_config and maint_notifications_config.enabled
def update_maint_notifications_config(
self, maint_notifications_config: MaintNotificationsConfig
):
"""
Updates the maintenance notifications configuration.
This method should be called only if the pool was created
without enabling the maintenance notifications and
in a later point in time maintenance notifications
are requested to be enabled.
"""
if (
self.maint_notifications_enabled()
and not maint_notifications_config.enabled
):
raise ValueError(
"Cannot disable maintenance notifications after enabling them"
)
# first update pool settings
if not self._maint_notifications_pool_handler:
self._maint_notifications_pool_handler = MaintNotificationsPoolHandler(
self, maint_notifications_config
)
else:
self._maint_notifications_pool_handler.config = maint_notifications_config
# then update connection kwargs and existing connections
self._update_connection_kwargs_for_maint_notifications(
self._maint_notifications_pool_handler
)
self._update_maint_notifications_configs_for_connections(
self._maint_notifications_pool_handler
)
def _update_connection_kwargs_for_maint_notifications(
self, maint_notifications_pool_handler: MaintNotificationsPoolHandler
):
"""
Update the connection kwargs for all future connections.
"""
if not self.maint_notifications_enabled():
return
self.connection_kwargs.update(
{
"maint_notifications_pool_handler": maint_notifications_pool_handler,
"maint_notifications_config": maint_notifications_pool_handler.config,
}
)
# Store original connection parameters for maintenance notifications.
if self.connection_kwargs.get("orig_host_address", None) is None:
# If orig_host_address is None it means we haven't
# configured the original values yet
self.connection_kwargs.update(
{
"orig_host_address": self.connection_kwargs.get("host"),
"orig_socket_timeout": self.connection_kwargs.get(
"socket_timeout", None
),
"orig_socket_connect_timeout": self.connection_kwargs.get(
"socket_connect_timeout", None
),
}
)
def _update_maint_notifications_configs_for_connections(
self, maint_notifications_pool_handler: MaintNotificationsPoolHandler
):
"""Update the maintenance notifications config for all connections in the pool."""
with self._get_pool_lock():
for conn in self._get_free_connections():
conn.set_maint_notifications_pool_handler_for_connection(
maint_notifications_pool_handler
)
conn.maint_notifications_config = (
maint_notifications_pool_handler.config
)
conn.disconnect()
for conn in self._get_in_use_connections():
conn.set_maint_notifications_pool_handler_for_connection(
maint_notifications_pool_handler
)
conn.maint_notifications_config = (
maint_notifications_pool_handler.config
)
conn.mark_for_reconnect()
def _should_update_connection(
self,
conn: "MaintNotificationsAbstractConnection",
matching_pattern: Literal[
"connected_address", "configured_address", "notification_hash"
] = "connected_address",
matching_address: Optional[str] = None,
matching_notification_hash: Optional[int] = None,
) -> bool:
"""
Check if the connection should be updated based on the matching criteria.
"""
if matching_pattern == "connected_address":
if matching_address and conn.getpeername() != matching_address:
return False
elif matching_pattern == "configured_address":
if matching_address and conn.host != matching_address:
return False
elif matching_pattern == "notification_hash":
if (
matching_notification_hash
and conn.maintenance_notification_hash != matching_notification_hash
):
return False
return True
def update_connection_settings(
self,
conn: "MaintNotificationsAbstractConnection",
state: Optional["MaintenanceState"] = None,
maintenance_notification_hash: Optional[int] = None,
host_address: Optional[str] = None,
relaxed_timeout: Optional[float] = None,
update_notification_hash: bool = False,
reset_host_address: bool = False,
reset_relaxed_timeout: bool = False,
):
"""
Update the settings for a single connection.
"""
if state:
conn.maintenance_state = state
if update_notification_hash:
# update the notification hash only if requested
conn.maintenance_notification_hash = maintenance_notification_hash
if host_address is not None:
conn.set_tmp_settings(tmp_host_address=host_address)
if relaxed_timeout is not None:
conn.set_tmp_settings(tmp_relaxed_timeout=relaxed_timeout)
if reset_relaxed_timeout or reset_host_address:
conn.reset_tmp_settings(
reset_host_address=reset_host_address,
reset_relaxed_timeout=reset_relaxed_timeout,
)
conn.update_current_socket_timeout(relaxed_timeout)
def update_connections_settings(
self,
state: Optional["MaintenanceState"] = None,
maintenance_notification_hash: Optional[int] = None,
host_address: Optional[str] = None,
relaxed_timeout: Optional[float] = None,
matching_address: Optional[str] = None,
matching_notification_hash: Optional[int] = None,
matching_pattern: Literal[
"connected_address", "configured_address", "notification_hash"
] = "connected_address",
update_notification_hash: bool = False,
reset_host_address: bool = False,
reset_relaxed_timeout: bool = False,
include_free_connections: bool = True,
):
"""
Update the settings for all matching connections in the pool.
This method does not create new connections.
This method does not affect the connection kwargs.
:param state: The maintenance state to set for the connection.
:param maintenance_notification_hash: The hash of the maintenance notification
to set for the connection.
:param host_address: The host address to set for the connection.
:param relaxed_timeout: The relaxed timeout to set for the connection.
:param matching_address: The address to match for the connection.
:param matching_notification_hash: The notification hash to match for the connection.
:param matching_pattern: The pattern to match for the connection.
:param update_notification_hash: Whether to update the notification hash for the connection.
:param reset_host_address: Whether to reset the host address to the original address.
:param reset_relaxed_timeout: Whether to reset the relaxed timeout to the original timeout.
:param include_free_connections: Whether to include free/available connections.
"""
with self._get_pool_lock():
for conn in self._get_in_use_connections():
if self._should_update_connection(
conn,
matching_pattern,
matching_address,
matching_notification_hash,
):
self.update_connection_settings(
conn,
state=state,
maintenance_notification_hash=maintenance_notification_hash,
host_address=host_address,
relaxed_timeout=relaxed_timeout,
update_notification_hash=update_notification_hash,
reset_host_address=reset_host_address,
reset_relaxed_timeout=reset_relaxed_timeout,
)
if include_free_connections:
for conn in self._get_free_connections():
if self._should_update_connection(
conn,
matching_pattern,
matching_address,
matching_notification_hash,
):
self.update_connection_settings(
conn,
state=state,
maintenance_notification_hash=maintenance_notification_hash,
host_address=host_address,
relaxed_timeout=relaxed_timeout,
update_notification_hash=update_notification_hash,
reset_host_address=reset_host_address,
reset_relaxed_timeout=reset_relaxed_timeout,
)
def update_connection_kwargs(
self,
**kwargs,
):
"""
Update the connection kwargs for all future connections.
This method updates the connection kwargs for all future connections created by the pool.
Existing connections are not affected.
"""
self.connection_kwargs.update(kwargs)
def update_active_connections_for_reconnect(
self,
moving_address_src: Optional[str] = None,
):
"""
Mark all active connections for reconnect.
This is used when a cluster node is migrated to a different address.
:param moving_address_src: The address of the node that is being moved.
"""
with self._get_pool_lock():
for conn in self._get_in_use_connections():
if self._should_update_connection(
conn, "connected_address", moving_address_src
):
conn.mark_for_reconnect()
def disconnect_free_connections(
self,
moving_address_src: Optional[str] = None,
):
"""
Disconnect all free/available connections.
This is used when a cluster node is migrated to a different address.
:param moving_address_src: The address of the node that is being moved.
"""
with self._get_pool_lock():
for conn in self._get_free_connections():
if self._should_update_connection(
conn, "connected_address", moving_address_src
):
conn.disconnect()
| MaintNotificationsAbstractConnectionPool |
python | django__django | tests/forms_tests/widget_tests/test_clearablefileinput.py | {
"start": 168,
"end": 401
} | class ____:
"""
Quacks like a FieldFile (has a .url and string representation), but
doesn't require us to care about storages etc.
"""
url = "something"
def __str__(self):
return self.url
| FakeFieldFile |
python | spyder-ide__spyder | spyder/plugins/explorer/widgets/remote_explorer.py | {
"start": 1256,
"end": 1345
} | class ____:
Context = "remote_context_menu"
New = "remote_new_menu"
| RemoteViewMenus |
python | spyder-ide__spyder | spyder/plugins/console/widgets/main_widget.py | {
"start": 1585,
"end": 2072
} | class ____:
# Triggers
Environment = 'environment_action'
ExternalEditor = 'external_editor_action'
MaxLineCount = 'max_line_count_action'
# The name of the action needs to match name of the shortcut
# so 'Quit' is used instead of something like 'quit_action'
Quit = 'Quit'
Run = 'run_action'
SysPath = 'sys_path_action'
# Toggles
ToggleCodeCompletion = 'toggle_code_completion_action'
ToggleWrap = 'toggle_wrap_action'
| ConsoleWidgetActions |
python | google__pytype | pytype/errors/error_types.py | {
"start": 5910,
"end": 6073
} | class ____:
protocol: ProtocolError | None = None
noniterable_str: NonIterableStrError | None = None
typed_dict: TypedDictError | None = None
| MatcherErrorDetails |
python | gevent__gevent | src/greentest/3.13/test_ssl.py | {
"start": 214513,
"end": 225556
} | class ____(unittest.TestCase):
"""Verify behavior of close sockets with received data before to the handshake.
"""
class SingleConnectionTestServerThread(threading.Thread):
def __init__(self, *, name, call_after_accept, timeout=None):
self.call_after_accept = call_after_accept
self.received_data = b'' # set by .run()
self.wrap_error = None # set by .run()
self.listener = None # set by .start()
self.port = None # set by .start()
if timeout is None:
self.timeout = support.SHORT_TIMEOUT
else:
self.timeout = timeout
super().__init__(name=name)
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
try:
if self.listener:
self.listener.close()
except OSError:
pass
self.join()
self.wrap_error = None # avoid dangling references
def start(self):
self.ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.ssl_ctx.verify_mode = ssl.CERT_REQUIRED
self.ssl_ctx.load_verify_locations(cafile=ONLYCERT)
self.ssl_ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
self.listener = socket.socket()
self.port = socket_helper.bind_port(self.listener)
self.listener.settimeout(self.timeout)
self.listener.listen(1)
super().start()
def run(self):
try:
conn, address = self.listener.accept()
except TimeoutError:
# on timeout, just close the listener
return
finally:
self.listener.close()
with conn:
if self.call_after_accept(conn):
return
try:
tls_socket = self.ssl_ctx.wrap_socket(conn, server_side=True)
except OSError as err: # ssl.SSLError inherits from OSError
self.wrap_error = err
else:
try:
self.received_data = tls_socket.recv(400)
except OSError:
pass # closed, protocol error, etc.
def non_linux_skip_if_other_okay_error(self, err):
if sys.platform in ("linux", "android"):
return # Expect the full test setup to always work on Linux.
if (isinstance(err, ConnectionResetError) or
(isinstance(err, OSError) and err.errno == errno.EINVAL) or
re.search('wrong.version.number', str(getattr(err, "reason", "")), re.I)):
# On Windows the TCP RST leads to a ConnectionResetError
# (ECONNRESET) which Linux doesn't appear to surface to userspace.
# If wrap_socket() winds up on the "if connected:" path and doing
# the actual wrapping... we get an SSLError from OpenSSL. Typically
# WRONG_VERSION_NUMBER. While appropriate, neither is the scenario
# we're specifically trying to test. The way this test is written
# is known to work on Linux. We'll skip it anywhere else that it
# does not present as doing so.
try:
self.skipTest(f"Could not recreate conditions on {sys.platform}:"
f" {err=}")
finally:
# gh-108342: Explicitly break the reference cycle
err = None
# If maintaining this conditional winds up being a problem.
# just turn this into an unconditional skip anything but Linux.
# The important thing is that our CI has the logic covered.
def test_preauth_data_to_tls_server(self):
server_accept_called = threading.Event()
ready_for_server_wrap_socket = threading.Event()
def call_after_accept(unused):
server_accept_called.set()
if not ready_for_server_wrap_socket.wait(support.SHORT_TIMEOUT):
raise RuntimeError("wrap_socket event never set, test may fail.")
return False # Tell the server thread to continue.
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
name="preauth_data_to_tls_server")
self.enterContext(server) # starts it & unittest.TestCase stops it.
with socket.socket() as client:
client.connect(server.listener.getsockname())
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(client)
client.setblocking(False)
server_accept_called.wait()
client.send(b"DELETE /data HTTP/1.0\r\n\r\n")
client.close() # RST
ready_for_server_wrap_socket.set()
server.join()
wrap_error = server.wrap_error
server.wrap_error = None
try:
self.assertEqual(b"", server.received_data)
self.assertIsInstance(wrap_error, OSError) # All platforms.
self.non_linux_skip_if_other_okay_error(wrap_error)
self.assertIsInstance(wrap_error, ssl.SSLError)
self.assertIn("before TLS handshake with data", wrap_error.args[1])
self.assertIn("before TLS handshake with data", wrap_error.reason)
self.assertNotEqual(0, wrap_error.args[0])
self.assertIsNone(wrap_error.library, msg="attr must exist")
finally:
# gh-108342: Explicitly break the reference cycle
wrap_error = None
server = None
def test_preauth_data_to_tls_client(self):
server_can_continue_with_wrap_socket = threading.Event()
client_can_continue_with_wrap_socket = threading.Event()
def call_after_accept(conn_to_client):
if not server_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT):
print("ERROR: test client took too long")
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(conn_to_client)
conn_to_client.send(
b"HTTP/1.0 307 Temporary Redirect\r\n"
b"Location: https://example.com/someone-elses-server\r\n"
b"\r\n")
conn_to_client.close() # RST
client_can_continue_with_wrap_socket.set()
return True # Tell the server to stop.
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
name="preauth_data_to_tls_client")
self.enterContext(server) # starts it & unittest.TestCase stops it.
# Redundant; call_after_accept sets SO_LINGER on the accepted conn.
set_socket_so_linger_on_with_zero_timeout(server.listener)
with socket.socket() as client:
client.connect(server.listener.getsockname())
server_can_continue_with_wrap_socket.set()
if not client_can_continue_with_wrap_socket.wait(support.SHORT_TIMEOUT):
self.fail("test server took too long")
ssl_ctx = ssl.create_default_context()
try:
tls_client = ssl_ctx.wrap_socket(
client, server_hostname="localhost")
except OSError as err: # SSLError inherits from OSError
wrap_error = err
received_data = b""
else:
wrap_error = None
received_data = tls_client.recv(400)
tls_client.close()
server.join()
try:
self.assertEqual(b"", received_data)
self.assertIsInstance(wrap_error, OSError) # All platforms.
self.non_linux_skip_if_other_okay_error(wrap_error)
self.assertIsInstance(wrap_error, ssl.SSLError)
self.assertIn("before TLS handshake with data", wrap_error.args[1])
self.assertIn("before TLS handshake with data", wrap_error.reason)
self.assertNotEqual(0, wrap_error.args[0])
self.assertIsNone(wrap_error.library, msg="attr must exist")
finally:
# gh-108342: Explicitly break the reference cycle
with warnings_helper.check_no_resource_warning(self):
wrap_error = None
server = None
def test_https_client_non_tls_response_ignored(self):
server_responding = threading.Event()
class SynchronizedHTTPSConnection(http.client.HTTPSConnection):
def connect(self):
# Call clear text HTTP connect(), not the encrypted HTTPS (TLS)
# connect(): wrap_socket() is called manually below.
http.client.HTTPConnection.connect(self)
# Wait for our fault injection server to have done its thing.
if not server_responding.wait(support.SHORT_TIMEOUT) and support.verbose:
sys.stdout.write("server_responding event never set.")
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
def call_after_accept(conn_to_client):
# This forces an immediate connection close via RST on .close().
set_socket_so_linger_on_with_zero_timeout(conn_to_client)
conn_to_client.send(
b"HTTP/1.0 402 Payment Required\r\n"
b"\r\n")
conn_to_client.close() # RST
server_responding.set()
return True # Tell the server to stop.
timeout = 2.0
server = self.SingleConnectionTestServerThread(
call_after_accept=call_after_accept,
name="non_tls_http_RST_responder",
timeout=timeout)
self.enterContext(server) # starts it & unittest.TestCase stops it.
# Redundant; call_after_accept sets SO_LINGER on the accepted conn.
set_socket_so_linger_on_with_zero_timeout(server.listener)
connection = SynchronizedHTTPSConnection(
server.listener.getsockname()[0],
port=server.port,
context=ssl.create_default_context(),
timeout=timeout,
)
# There are lots of reasons this raises as desired, long before this
# test was added. Sending the request requires a successful TLS wrapped
# socket; that fails if the connection is broken. It may seem pointless
# to test this. It serves as an illustration of something that we never
# want to happen... properly not happening.
with warnings_helper.check_no_resource_warning(self), \
self.assertRaises(OSError):
connection.request("HEAD", "/test", headers={"Host": "localhost"})
response = connection.getresponse()
server.join()
| TestPreHandshakeClose |
python | ray-project__ray | python/ray/llm/tests/common/cloud/test_cloud_filesystem.py | {
"start": 198,
"end": 3549
} | class ____:
"""Tests for the CloudFileSystem class."""
@patch("ray.llm._internal.common.utils.cloud_utils.GCSFileSystem")
def test_download_model(self, mock_gcs_filesystem):
"""Test downloading a model from cloud storage."""
# Mock GCSFileSystem.get_file to return hash content
mock_gcs_filesystem.get_file.return_value = "abcdef1234567890"
# Create temp directory for testing
with tempfile.TemporaryDirectory() as tempdir:
# Test downloading model
with patch.object(CloudFileSystem, "download_files") as mock_download:
CloudFileSystem.download_model(tempdir, "gs://bucket/model", False)
# Check that hash file was processed
assert os.path.exists(os.path.join(tempdir, "refs", "main"))
with open(os.path.join(tempdir, "refs", "main"), "r") as f:
assert f.read() == "abcdef1234567890"
# Verify get_file was called for hash file
mock_gcs_filesystem.get_file.assert_called_once_with(
"gs://bucket/model/hash", decode_as_utf_8=True
)
# Check that download_files was called correctly
mock_download.assert_called_once()
call_args = mock_download.call_args[1]
assert call_args["path"] == os.path.join(
tempdir, "snapshots", "abcdef1234567890"
)
assert call_args["bucket_uri"] == "gs://bucket/model"
assert call_args["substrings_to_include"] == []
assert call_args["suffixes_to_exclude"] is None
@patch("ray.llm._internal.common.utils.cloud_utils.GCSFileSystem")
def test_upload_model(self, mock_gcs_filesystem):
"""Test uploading a model to cloud storage."""
# Create temp directory for testing
with tempfile.TemporaryDirectory() as tempdir:
hash = "abcdef1234567890"
# Create refs/main file
os.makedirs(os.path.join(tempdir, "refs"), exist_ok=True)
model_rev_path = os.path.join(tempdir, "refs", "main")
with open(model_rev_path, "w") as f:
f.write(hash)
# Create snapshots/<hash> folder
model_asset_path = os.path.join(tempdir, "snapshots", hash)
os.makedirs(model_asset_path)
# Test uploading model
CloudFileSystem.upload_model(tempdir, "gs://bucket/model")
# Check that upload_files was called twice - once for model assets and once for hash file
assert mock_gcs_filesystem.upload_files.call_count == 2
# Verify the calls were made with correct arguments
calls = mock_gcs_filesystem.upload_files.call_args_list
call_paths = {
call[0][0] for call in calls
} # Extract local_path from each call
call_uris = {
call[0][1] for call in calls
} # Extract bucket_uri from each call
assert model_asset_path in call_paths
assert model_rev_path in call_paths
assert "gs://bucket/model" in call_uris
assert "gs://bucket/model/hash" in call_uris
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestCloudFileSystem |
python | apache__airflow | providers/common/sql/src/airflow/providers/common/sql/hooks/sql.py | {
"start": 5026,
"end": 43914
} | class ____(BaseHook):
"""
Abstract base class for sql hooks.
When subclassing, maintainers can override the `_make_common_data_structure` method:
This method transforms the result of the handler method (typically `cursor.fetchall()`) into
objects common across all Hooks derived from this class (tuples). Most of the time, the underlying SQL
library already returns tuples from its cursor, and the `_make_common_data_structure` method can be ignored.
:param schema: Optional DB schema that overrides the schema specified in the connection. Make sure that
if you change the schema parameter value in the constructor of the derived Hook, such change
should be done before calling the ``DBApiHook.__init__()``.
:param log_sql: Whether to log SQL query when it's executed. Defaults to *True*.
"""
# Override to provide the connection name.
conn_name_attr: str
# Override to have a default connection id for a particular dbHook
default_conn_name = "default_conn_id"
# Override if this db doesn't support semicolons in SQL queries
strip_semicolon = False
# Override if this db supports autocommit.
supports_autocommit = False
# Override if this db supports executemany.
supports_executemany = False
# Override with the object that exposes the connect method
connector: ConnectorProtocol | None = None
# Override with db-specific query to check connection
_test_connection_sql = "select 1"
# Default SQL placeholder
_placeholder: str = "%s"
_dialects: MutableMapping[str, MutableMapping] = resolve_dialects()
_resolve_target_fields = conf.getboolean("core", "dbapihook_resolve_target_fields", fallback=False)
def __init__(self, *args, schema: str | None = None, log_sql: bool = True, **kwargs):
super().__init__()
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
if len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
# We should not make schema available in deriving hooks for backwards compatibility
# If a hook deriving from DBApiHook has a need to access schema, then it should retrieve it
# from kwargs and store it on its own. We do not run "pop" here as we want to give the
# Hook deriving from the DBApiHook to still have access to the field in its constructor
self.__schema = schema
self.log_sql = log_sql
self.descriptions: list[Sequence[Sequence] | None] = []
self._insert_statement_format: str | None = kwargs.get("insert_statement_format")
self._replace_statement_format: str | None = kwargs.get("replace_statement_format")
self._escape_word_format: str | None = kwargs.get("escape_word_format")
self._escape_column_names: bool | None = kwargs.get("escape_column_names")
self._connection: Connection | None = kwargs.pop("connection", None)
def get_conn_id(self) -> str:
return getattr(self, self.conn_name_attr)
@cached_property
def placeholder(self) -> str:
"""Return SQL placeholder."""
placeholder = self.connection_extra.get("placeholder")
if placeholder:
if placeholder in SQL_PLACEHOLDERS:
return placeholder
self.log.warning(
"Placeholder '%s' defined in Connection '%s' is not listed in 'DEFAULT_SQL_PLACEHOLDERS' "
"and got ignored. Falling back to the default placeholder '%s'.",
placeholder,
self.get_conn_id(),
self._placeholder,
)
return self._placeholder
@property
def insert_statement_format(self) -> str:
"""Return the insert statement format."""
if self._insert_statement_format is None:
self._insert_statement_format = self.connection_extra.get(
"insert_statement_format", "INSERT INTO {} {} VALUES ({})"
)
return self._insert_statement_format
@property
def replace_statement_format(self) -> str:
"""Return the replacement statement format."""
if self._replace_statement_format is None:
self._replace_statement_format = self.connection_extra.get(
"replace_statement_format", "REPLACE INTO {} {} VALUES ({})"
)
return self._replace_statement_format
@property
def escape_word_format(self) -> str:
"""Return the escape word format."""
if self._escape_word_format is None:
self._escape_word_format = self.connection_extra.get("escape_word_format", '"{}"')
return self._escape_word_format
@property
def escape_column_names(self) -> bool:
"""Return the escape column names flag."""
if self._escape_column_names is None:
self._escape_column_names = self.connection_extra.get("escape_column_names", False)
return self._escape_column_names
@property
def connection(self) -> Connection:
if self._connection is None:
self._connection = self.get_connection(self.get_conn_id())
return self._connection
@connection.setter
def connection(self, value: Any) -> None:
if value != self.connection:
self.log.warning(
"This setter is for backward compatibility and should not be used.\n"
"Since the introduction of connection property, the providers listed below "
"breaks due to assigning value to self.connection in their __init__ method.\n"
"* apache-airflow-providers-mysql<5.7.1\n"
"* apache-airflow-providers-elasticsearch<5.5.1\n"
"* apache-airflow-providers-postgres<5.13.0"
)
@property
def connection_extra(self) -> dict:
return self.connection.extra_dejson
@cached_property
def connection_extra_lower(self) -> dict:
"""
``connection.extra_dejson`` but where keys are converted to lower case.
This is used internally for case-insensitive access of extra params.
"""
return {k.lower(): v for k, v in self.connection_extra.items()}
def get_conn(self) -> Any:
"""Return a connection object."""
db = self.connection
if self.connector is None:
raise RuntimeError(f"{type(self).__name__} didn't have `self.connector` set!")
host = db.host or ""
login = db.login or ""
schema = db.schema or ""
return self.connector.connect(host=host, port=cast("int", db.port), username=login, schema=schema)
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted uri.
"""
conn = self.connection
if self.__schema:
conn.schema = self.__schema
return conn.get_uri()
@property
def sqlalchemy_url(self) -> URL:
"""
Return a Sqlalchemy.engine.URL object from the connection.
Needs to be implemented in the provider subclass to return the sqlalchemy.engine.URL object.
:return: the extracted sqlalchemy.engine.URL object.
"""
qualname = f"{self.__class__.__module__}.{self.__class__.__qualname__}"
if qualname != "airflow.providers.common.sql.hooks.sql.DbApiHook":
msg = f"{qualname!r} does not implement/support built SQLAlchemy URL."
else:
msg = "`sqlalchemy_url` property should be implemented in the provider subclass."
raise NotImplementedError(msg)
def get_sqlalchemy_engine(self, engine_kwargs=None) -> Engine:
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
try:
url: URL | str = self.sqlalchemy_url
except NotImplementedError:
url = self.get_uri()
self.log.debug("url: %s", url)
self.log.debug("engine_kwargs: %s", engine_kwargs)
return create_engine(url=url, **engine_kwargs)
@property
def inspector(self) -> Inspector:
return inspect(self.get_sqlalchemy_engine())
@cached_property
def dialect_name(self) -> str:
try:
return make_url(self.get_uri()).get_dialect().name
except (ArgumentError, NoSuchModuleError, ValueError):
config = self.connection_extra
sqlalchemy_scheme = config.get("sqlalchemy_scheme")
if sqlalchemy_scheme:
return sqlalchemy_scheme.split("+")[0] if "+" in sqlalchemy_scheme else sqlalchemy_scheme
return config.get("dialect", "default")
@cached_property
def dialect(self) -> Dialect:
from airflow.utils.module_loading import import_string
dialect_info = self._dialects.get(self.dialect_name)
self.log.debug("dialect_info: %s", dialect_info)
if dialect_info:
try:
return import_string(dialect_info["dialect_class_name"])(self)
except ImportError:
raise AirflowOptionalProviderFeatureException(
f"{dialect_info['dialect_class_name']} not found, run: pip install "
f"'{dialect_info['provider_name']}'."
)
return Dialect(self)
@property
def reserved_words(self) -> set[str]:
return self.get_reserved_words(self.dialect_name)
@lru_cache(maxsize=None)
def get_reserved_words(self, dialect_name: str) -> set[str]:
result = set()
with suppress(ImportError, ModuleNotFoundError, NoSuchModuleError):
dialect_module = import_string(f"sqlalchemy.dialects.{dialect_name}.base")
if hasattr(dialect_module, "RESERVED_WORDS"):
result = set(dialect_module.RESERVED_WORDS)
else:
dialect_module = import_string(f"sqlalchemy.dialects.{dialect_name}.reserved_words")
reserved_words_attr = f"RESERVED_WORDS_{dialect_name.upper()}"
if hasattr(dialect_module, reserved_words_attr):
result = set(getattr(dialect_module, reserved_words_attr))
self.log.debug("reserved words for '%s': %s", dialect_name, result)
return result
@deprecated(
reason="Replaced by function `get_df`.",
category=AirflowProviderDeprecationWarning,
action="ignore",
)
def get_pandas_df(
self,
sql,
parameters: list | tuple | Mapping[str, Any] | None = None,
**kwargs,
) -> PandasDataFrame:
"""
Execute the sql and returns a pandas dataframe.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
"""
return self.get_df(sql, parameters, df_type="pandas", **kwargs)
@deprecated(
reason="Replaced by function `get_df_by_chunks`.",
category=AirflowProviderDeprecationWarning,
action="ignore",
)
def get_pandas_df_by_chunks(
self,
sql,
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
chunksize: int,
**kwargs,
) -> Generator[PandasDataFrame, None, None]:
return self.get_df_by_chunks(sql, parameters, chunksize=chunksize, df_type="pandas", **kwargs)
@overload
def get_df(
self,
sql: str | list[str],
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
df_type: Literal["pandas"] = "pandas",
**kwargs: Any,
) -> PandasDataFrame: ...
@overload
def get_df(
self,
sql: str | list[str],
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
df_type: Literal["polars"],
**kwargs: Any,
) -> PolarsDataFrame: ...
def get_df(
self,
sql: str | list[str],
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
df_type: Literal["pandas", "polars"] = "pandas",
**kwargs,
) -> PandasDataFrame | PolarsDataFrame:
"""
Execute the sql and returns a dataframe.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
:param df_type: Type of dataframe to return, either "pandas" or "polars"
:param kwargs: (optional) passed into `pandas.io.sql.read_sql` or `polars.read_database` method
"""
if df_type == "pandas":
return self._get_pandas_df(sql, parameters, **kwargs)
if df_type == "polars":
return self._get_polars_df(sql, parameters, **kwargs)
def _get_pandas_df(
self,
sql,
parameters: list | tuple | Mapping[str, Any] | None = None,
**kwargs,
) -> PandasDataFrame:
"""
Execute the sql and returns a pandas dataframe.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
"""
try:
from pandas.io import sql as psql
except ImportError:
raise AirflowOptionalProviderFeatureException(
"pandas library not installed, run: pip install "
"'apache-airflow-providers-common-sql[pandas]'."
)
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters, **kwargs)
def _get_polars_df(
self,
sql,
parameters: list | tuple | Mapping[str, Any] | None = None,
**kwargs,
) -> PolarsDataFrame:
"""
Execute the sql and returns a polars dataframe.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
:param kwargs: (optional) passed into polars.read_database method
"""
try:
import polars as pl
except ImportError:
raise AirflowOptionalProviderFeatureException(
"polars library not installed, run: pip install "
"'apache-airflow-providers-common-sql[polars]'."
)
with closing(self.get_conn()) as conn:
execute_options: dict[str, Any] | None = None
if parameters is not None:
if isinstance(parameters, Mapping):
execute_options = dict(parameters)
else:
execute_options = {}
return pl.read_database(sql, connection=conn, execute_options=execute_options, **kwargs)
@overload
def get_df_by_chunks(
self,
sql: str | list[str],
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
chunksize: int,
df_type: Literal["pandas"] = "pandas",
**kwargs,
) -> Generator[PandasDataFrame, None, None]: ...
@overload
def get_df_by_chunks(
self,
sql: str | list[str],
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
chunksize: int,
df_type: Literal["polars"],
**kwargs,
) -> Generator[PolarsDataFrame, None, None]: ...
def get_df_by_chunks(
self,
sql: str | list[str],
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
chunksize: int,
df_type: Literal["pandas", "polars"] = "pandas",
**kwargs,
) -> Generator[PandasDataFrame | PolarsDataFrame, None, None]:
"""
Execute the sql and return a generator.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with
:param chunksize: number of rows to include in each chunk
:param df_type: Type of dataframe to return, either "pandas" or "polars"
:param kwargs: (optional) passed into `pandas.io.sql.read_sql` or `polars.read_database` method
"""
if df_type == "pandas":
return self._get_pandas_df_by_chunks(sql, parameters, chunksize=chunksize, **kwargs)
if df_type == "polars":
return self._get_polars_df_by_chunks(sql, parameters, chunksize=chunksize, **kwargs)
def _get_pandas_df_by_chunks(
self,
sql,
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
chunksize: int,
**kwargs,
) -> Generator[PandasDataFrame, None, None]:
"""
Execute the sql and return a generator.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with
:param chunksize: number of rows to include in each chunk
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
"""
try:
from pandas.io import sql as psql
except ImportError:
raise AirflowOptionalProviderFeatureException(
"pandas library not installed, run: pip install "
"'apache-airflow-providers-common-sql[pandas]'."
)
with closing(self.get_conn()) as conn:
yield from psql.read_sql(sql, con=conn, params=parameters, chunksize=chunksize, **kwargs)
def _get_polars_df_by_chunks(
self,
sql,
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
chunksize: int,
**kwargs,
) -> Generator[PolarsDataFrame, None, None]:
"""
Execute the sql and return a generator.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
:param chunksize: number of rows to include in each chunk
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
"""
try:
import polars as pl
except ImportError:
raise AirflowOptionalProviderFeatureException(
"polars library not installed, run: pip install "
"'apache-airflow-providers-common-sql[polars]'."
)
with closing(self.get_conn()) as conn:
execute_options = None
if parameters is not None:
if isinstance(parameters, Mapping):
execute_options = dict(parameters)
yield from pl.read_database(
sql, connection=conn, execute_options=execute_options, batch_size=chunksize, **kwargs
)
def get_records(
self,
sql: str | list[str],
parameters: Iterable | Mapping[str, Any] | None = None,
) -> Any:
"""
Execute the sql and return a set of records.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
return self.run(sql=sql, parameters=parameters, handler=handlers.fetch_all_handler)
def get_first(self, sql: str | list[str], parameters: Iterable | Mapping[str, Any] | None = None) -> Any:
"""
Execute the sql and return the first resulting row.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
return self.run(sql=sql, parameters=parameters, handler=handlers.fetch_one_handler)
@staticmethod
def strip_sql_string(sql: str) -> str:
return sql.strip().rstrip(";")
@staticmethod
def split_sql_string(sql: str, strip_semicolon: bool = False) -> list[str]:
"""
Split string into multiple SQL expressions.
:param sql: SQL string potentially consisting of multiple expressions
:param strip_semicolon: whether to strip semicolon from SQL string
:return: list of individual expressions
"""
splits = sqlparse.split(
sql=sqlparse.format(sql, strip_comments=True),
strip_semicolon=strip_semicolon,
)
return [s for s in splits if s]
@property
def last_description(self) -> Sequence[Sequence] | None:
if not self.descriptions:
return None
return self.descriptions[-1]
@overload
def run(
self,
sql: str | Iterable[str],
autocommit: bool = ...,
parameters: Iterable | Mapping[str, Any] | None = ...,
handler: None = ...,
split_statements: bool = ...,
return_last: bool = ...,
) -> None: ...
@overload
def run(
self,
sql: str | Iterable[str],
autocommit: bool = ...,
parameters: Iterable | Mapping[str, Any] | None = ...,
handler: Callable[[Any], T] = ...,
split_statements: bool = ...,
return_last: bool = ...,
) -> tuple | list | list[tuple] | list[list[tuple] | tuple] | None: ...
def run(
self,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping[str, Any] | None = None,
handler: Callable[[Any], T] | None = None,
split_statements: bool = False,
return_last: bool = True,
) -> tuple | list | list[tuple] | list[list[tuple] | tuple] | None:
"""
Run a command or a list of commands.
Pass a list of SQL statements to the sql parameter to get them to
execute sequentially.
The method will return either single query results (typically list of rows) or list of those results
where each element in the list are results of one of the queries (typically list of list of rows :D)
For compatibility reasons, the behaviour of the DBAPIHook is somewhat confusing.
In some cases, when multiple queries are run, the return value will be an iterable (list) of results
-- one for each query. However, in other cases, when single query is run, the return value will
be the result of that single query without wrapping the results in a list.
The cases when single query results are returned without wrapping them in a list are as follows:
a) sql is string and ``return_last`` is True (regardless what ``split_statements`` value is)
b) sql is string and ``split_statements`` is False
In all other cases, the results are wrapped in a list, even if there is only one statement to process.
In particular, the return value will be a list of query results in the following circumstances:
a) when ``sql`` is an iterable of string statements (regardless what ``return_last`` value is)
b) when ``sql`` is string, ``split_statements`` is True and ``return_last`` is False
After ``run`` is called, you may access the following properties on the hook object:
* ``descriptions``: an array of cursor descriptions. If ``return_last`` is True, this will be
a one-element array containing the cursor ``description`` for the last statement.
Otherwise, it will contain the cursor description for each statement executed.
* ``last_description``: the description for the last statement executed
Note that query result will ONLY be actually returned when a handler is provided; if
``handler`` is None, this method will return None.
Handler is a way to process the rows from cursor (Iterator) into a value that is suitable to be
returned to XCom and generally fit in memory.
You can use pre-defined handles (``fetch_all_handler``, ``fetch_one_handler``) or implement your
own handler.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:param parameters: The parameters to render the SQL query with.
:param handler: The result handler which is called with the result of each statement.
:param split_statements: Whether to split a single SQL string into statements and run separately
:param return_last: Whether to return result for only last statement or for all after split
:return: if handler provided, returns query results (may be list of results depending on params)
"""
self.descriptions = []
if isinstance(sql, str):
if split_statements:
sql_list: Iterable[str] = self.split_sql_string(
sql=sql,
strip_semicolon=self.strip_semicolon,
)
else:
sql_list = [sql] if sql.strip() else []
else:
sql_list = sql
if sql_list:
self.log.debug("Executing following statements against DB: %s", sql_list)
else:
raise ValueError("List of SQL statements is empty")
_last_result = None
with self._create_autocommit_connection(autocommit) as conn:
with closing(conn.cursor()) as cur:
results = []
for sql_statement in sql_list:
self._run_command(cur, sql_statement, parameters)
if handler is not None:
result = self._make_common_data_structure(handler(cur))
if handlers.return_single_query_results(sql, return_last, split_statements):
_last_result = result
_last_description = cur.description
else:
results.append(result)
self.descriptions.append(cur.description)
# If autocommit was set to False or db does not support autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
# Logs all database messages or errors sent to the client
self.get_db_log_messages(conn)
if handler is None:
return None
if handlers.return_single_query_results(sql, return_last, split_statements):
self.descriptions = [_last_description]
return _last_result
return results
def _make_common_data_structure(self, result: T | Sequence[T]) -> tuple | list[tuple]:
"""
Ensure the data returned from an SQL command is a standard tuple or list[tuple].
This method is intended to be overridden by subclasses of the `DbApiHook`. Its purpose is to
transform the result of an SQL command (typically returned by cursor methods) into a common
data structure (a tuple or list[tuple]) across all DBApiHook derived Hooks, as defined in the
ADR-0002 of the sql provider.
If this method is not overridden, the result data is returned as-is. If the output of the cursor
is already a common data structure, this method should be ignored.
"""
if isinstance(result, Sequence):
return cast("list[tuple]", result)
return cast("tuple", result)
def _run_command(self, cur, sql_statement, parameters):
"""Run a statement using an already open cursor."""
if self.log_sql:
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
if parameters:
# If we're using psycopg3, we might need to handle parameters differently
if hasattr(cur, "__module__") and "psycopg" in cur.__module__ and isinstance(parameters, list):
parameters = tuple(parameters)
cur.execute(sql_statement, parameters)
else:
cur.execute(sql_statement)
# According to PEP 249, this is -1 when query result is not applicable.
if cur.rowcount >= 0:
self.log.info("Rows affected: %s", cur.rowcount)
def set_autocommit(self, conn, autocommit):
"""Set the autocommit flag on the connection."""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
self.get_conn_id(),
)
conn.autocommit = autocommit
def get_autocommit(self, conn) -> bool:
"""
Get autocommit setting for the provided connection.
:param conn: Connection to get autocommit setting from.
:return: connection autocommit setting. True if ``autocommit`` is set
to True on the connection. False if it is either not set, set to
False, or the connection does not support auto-commit.
"""
return getattr(conn, "autocommit", False) and self.supports_autocommit
def get_cursor(self) -> Any:
"""Return a cursor."""
return self.get_conn().cursor()
def _generate_insert_sql(self, table, values, target_fields=None, replace: bool = False, **kwargs) -> str:
"""
Generate the INSERT SQL statement.
The REPLACE variant is specific to MySQL syntax, the UPSERT variant is specific to SAP Hana syntax
:param table: Name of the target table
:param values: The row to insert into the table
:param target_fields: The names of the columns to fill in the table. If no target fields are
specified, they will be determined dynamically from the table's metadata.
:param replace: Whether to replace/upsert instead of insert
:return: The generated INSERT or REPLACE/UPSERT SQL statement
"""
if not target_fields and self._resolve_target_fields:
with suppress(Exception):
target_fields = self.dialect.get_target_fields(table)
if replace:
return self.dialect.generate_replace_sql(table, values, target_fields, **kwargs)
return self.dialect.generate_insert_sql(table, values, target_fields, **kwargs)
@contextmanager
def _create_autocommit_connection(self, autocommit: bool = False):
"""Context manager that closes the connection after use and detects if autocommit is supported."""
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
yield conn
def insert_rows(
self,
table,
rows,
target_fields=None,
commit_every=1000,
replace=False,
*,
executemany=False,
fast_executemany=False,
autocommit=False,
**kwargs,
):
"""
Insert a collection of tuples into a table.
Rows are inserted in chunks, each chunk (of size ``commit_every``) is
done in a new transaction.
:param table: Name of the target table
:param rows: The rows to insert into the table
:param target_fields: The names of the columns to fill in the table
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:param replace: Whether to replace instead of insert
:param executemany: If True, all rows are inserted at once in
chunks defined by the commit_every parameter. This only works if all rows
have same number of column names, but leads to better performance.
:param fast_executemany: If True, the `fast_executemany` parameter will be set on the
cursor used by `executemany` which leads to better performance, if supported by driver.
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
"""
nb_rows = 0
with self._create_autocommit_connection(autocommit) as conn:
conn.commit()
with closing(conn.cursor()) as cur:
if self.supports_executemany or executemany:
if fast_executemany:
with contextlib.suppress(AttributeError):
# Try to set the fast_executemany attribute
cur.fast_executemany = True
self.log.info(
"Fast_executemany is enabled for conn_id '%s'!",
self.get_conn_id(),
)
for chunked_rows in chunked(rows, commit_every):
values = list(
map(
lambda row: self._serialize_cells(row, conn),
chunked_rows,
)
)
sql = self._generate_insert_sql(table, values[0], target_fields, replace, **kwargs)
self.log.debug("Generated sql: %s", sql)
try:
cur.executemany(sql, values)
except Exception as e:
self.log.error("Generated sql: %s", sql)
self.log.error("Parameters: %s", values)
raise e
conn.commit()
nb_rows += len(chunked_rows)
self.log.info("Loaded %s rows into %s so far", nb_rows, table)
else:
for i, row in enumerate(rows, 1):
values = self._serialize_cells(row, conn)
sql = self._generate_insert_sql(table, values, target_fields, replace, **kwargs)
self.log.debug("Generated sql: %s", sql)
try:
cur.execute(sql, values)
except Exception as e:
self.log.error("Generated sql: %s", sql)
self.log.error("Parameters: %s", values)
raise e
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info("Loaded %s rows into %s so far", i, table)
nb_rows += 1
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows into %s", nb_rows, table)
@classmethod
def _serialize_cells(cls, row, conn=None):
return tuple(cls._serialize_cell(cell, conn) for cell in row)
@staticmethod
def _serialize_cell(cell, conn=None) -> str | None:
"""
Return the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The serialized cell
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dump a database table into a tab-delimited file.
:param table: The name of the source table
:param tmp_file: The path of the target file
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Load a tab-delimited file into a database table.
:param table: The name of the target table
:param tmp_file: The path of the file to load into the table
"""
raise NotImplementedError()
def test_connection(self):
"""Tests the connection using db-specific query."""
status, message = False, ""
try:
if self.get_first(self._test_connection_sql):
status = True
message = "Connection successfully tested"
except Exception as e:
status = False
message = str(e)
return status, message
def get_openlineage_database_info(self, connection) -> DatabaseInfo | None:
"""
Return database specific information needed to generate and parse lineage metadata.
This includes information helpful for constructing information schema query
and creating correct namespace.
:param connection: Airflow connection to reduce calls of `get_connection` method
"""
def get_openlineage_database_dialect(self, connection) -> str:
"""
Return database dialect used for SQL parsing.
For a list of supported dialects check: https://openlineage.io/docs/development/sql#sql-dialects
"""
return "generic"
def get_openlineage_default_schema(self) -> str | None:
"""
Return default schema specific to database.
.. seealso::
- :class:`airflow.providers.openlineage.sqlparser.SQLParser`
"""
return self.__schema or "public"
def get_openlineage_database_specific_lineage(self, task_instance) -> OperatorLineage | None:
"""
Return additional database specific lineage, e.g. query execution information.
This method is called only on completion of the task.
:param task_instance: this may be used to retrieve additional information
that is collected during runtime of the task
"""
@staticmethod
def get_openlineage_authority_part(connection, default_port: int | None = None) -> str:
"""
Get authority part from Airflow Connection.
The authority represents the hostname and port of the connection
and conforms OpenLineage naming convention for a number of databases (e.g. MySQL, Postgres, Trino).
:param default_port: (optional) used if no port parsed from connection URI
"""
parsed = urlparse(connection.get_uri())
port = parsed.port or default_port
if port:
authority = f"{parsed.hostname}:{port}"
else:
authority = parsed.hostname
return authority
def get_db_log_messages(self, conn) -> None:
"""
Log all database messages sent to the client during the session.
:param conn: Connection object
"""
| DbApiHook |
python | skorch-dev__skorch | skorch/callbacks/lr_scheduler.py | {
"start": 1061,
"end": 8988
} | class ____(Callback):
"""Callback that sets the learning rate of each
parameter group according to some policy.
Parameters
----------
policy : str or _LRScheduler class (default='WarmRestartLR')
Learning rate policy name or scheduler to be used.
monitor : str or callable (default=None)
Value of the history to monitor or function/callable. In
the latter case, the callable receives the net instance as
argument and is expected to return the score (float) used to
determine the learning rate adjustment.
event_name: str, (default='event_lr')
Name of event to be placed in history when the scheduler takes a step.
Pass ``None`` to disable placing events in history.
**Note:** This feature works only for pytorch version >=1.4
step_every: str, (default='epoch')
Value for when to apply the learning scheduler step. Can be either 'batch'
or 'epoch'.
kwargs
Additional arguments passed to the lr scheduler.
"""
def __init__(self,
policy='WarmRestartLR',
monitor='train_loss',
event_name="event_lr",
step_every='epoch',
**kwargs):
self.policy = policy
self.monitor = monitor
self.event_name = event_name
self.step_every = step_every
vars(self).update(kwargs)
def simulate(self, steps, initial_lr, step_args=None):
"""
Simulates the learning rate scheduler.
Parameters
----------
steps: int
Number of steps to simulate
initial_lr: float
Initial learning rate
step_args: None or float or List[float] (default=None)
Argument to the ``.step()`` function of the policy. If it is an
indexable object the simulation will try to associate every step of
the simulation with an entry in ``step_args``. Scalar values are
passed at every step, unchanged. In the default setting (``None``)
no additional arguments are passed to ``.step()``.
Returns
-------
lrs: numpy ndarray
Simulated learning rates
"""
test = torch.ones(1, requires_grad=True)
opt = torch.optim.SGD([{'params': test, 'lr': initial_lr}])
policy_cls = self._get_policy_cls()
sch = policy_cls(opt, **self.kwargs)
lrs = []
for step_idx in range(steps):
opt.step() # suppress warning about .step call order
lrs.append(opt.param_groups[0]['lr'])
if step_args is None:
sch.step()
elif hasattr(step_args, '__getitem__'):
sch.step(step_args[step_idx])
else:
sch.step(step_args)
return np.array(lrs)
def initialize(self):
self.policy_ = self._get_policy_cls()
self.lr_scheduler_ = None
self.batch_idx_ = 0
return self
def _get_policy_cls(self):
if isinstance(self.policy, str):
return getattr(sys.modules[__name__], self.policy)
return self.policy
@property
def kwargs(self):
# These are the parameters that are passed to the
# scheduler. Parameters that don't belong there must be
# excluded.
excluded = ('policy', 'monitor', 'event_name', 'step_every')
kwargs = {key: val for key, val in vars(self).items()
if not (key in excluded or key.endswith('_'))}
return kwargs
def on_train_begin(self, net, **kwargs):
if net.history:
try:
self.batch_idx_ = sum(net.history[:, 'train_batch_count'])
except KeyError:
self.batch_idx_ = sum(len(b) for b in net.history[:, 'batches'])
self.lr_scheduler_ = self._get_scheduler(
net, self.policy_, **self.kwargs
)
def _step(self, net, lr_scheduler, score=None):
"""Helper method to step the lr scheduler.
This takes care of two things:
1. If the lr scheduler is ReduceLROnPlateau, we need to pass the score.
2. If the net is uses AccelerateMixin, stepping has to be skipped in
certain conditions.
For more info on the latter, see:
https://huggingface.co/docs/accelerate/quicktour#mixed-precision-training
"""
accelerator_maybe = getattr(net, 'accelerator', None)
accelerator_step_skipped = (
accelerator_maybe and accelerator_maybe.optimizer_step_was_skipped
)
if accelerator_step_skipped:
return
if score is None:
lr_scheduler.step()
else:
lr_scheduler.step(score)
def _record_last_lr(self, net, kind):
# helper function to record the last learning rate if possible;
# only record the first lr returned if more than 1 param group
if kind not in ('epoch', 'batch'):
raise ValueError(f"Argument 'kind' should be 'batch' or 'epoch', get {kind}.")
if (
(self.event_name is None)
or not hasattr(self.lr_scheduler_, 'get_last_lr')
):
return
try:
last_lrs = self.lr_scheduler_.get_last_lr()
except AttributeError:
# get_last_lr fails for ReduceLROnPlateau with PyTorch <= 2.2 on 1st epoch.
# Take the initial lr instead.
last_lrs = [group['lr'] for group in net.optimizer_.param_groups]
if kind == 'epoch':
net.history.record(self.event_name, last_lrs[0])
else:
net.history.record_batch(self.event_name, last_lrs[0])
def on_epoch_end(self, net, **kwargs):
if self.step_every != 'epoch':
return
self._record_last_lr(net, kind='epoch')
if isinstance(self.lr_scheduler_, ReduceLROnPlateau):
if callable(self.monitor):
score = self.monitor(net)
else:
try:
score = net.history[-1, self.monitor]
except KeyError as e:
raise ValueError(
f"'{self.monitor}' was not found in history. A "
f"Scoring callback with name='{self.monitor}' "
"should be placed before the LRScheduler callback"
) from e
self._step(net, self.lr_scheduler_, score=score)
else:
self._step(net, self.lr_scheduler_)
def on_batch_end(self, net, training, **kwargs):
if not training or self.step_every != 'batch':
return
self._record_last_lr(net, kind='batch')
if isinstance(self.lr_scheduler_, ReduceLROnPlateau):
if callable(self.monitor):
score = self.monitor(net)
else:
try:
score = net.history[-1, 'batches', -1, self.monitor]
except KeyError as e:
raise ValueError(
f"'{self.monitor}' was not found in history. A "
f"Scoring callback with name='{self.monitor}' "
"should be placed before the LRScheduler callback"
) from e
self._step(net, self.lr_scheduler_, score=score)
else:
self._step(net, self.lr_scheduler_)
self.batch_idx_ += 1
def _get_scheduler(self, net, policy, **scheduler_kwargs):
"""Return scheduler, based on indicated policy, with appropriate
parameters.
"""
if (
(policy not in [ReduceLROnPlateau])
and ('last_epoch' not in scheduler_kwargs)
):
last_epoch = len(net.history) - 1
scheduler_kwargs['last_epoch'] = last_epoch
return policy(net.optimizer_, **scheduler_kwargs)
| LRScheduler |
python | getsentry__sentry | tests/sentry/integrations/repository/issue_alert/test_issue_alert_notification_message_repository.py | {
"start": 509,
"end": 1741
} | class ____(TestCase):
def setUp(self) -> None:
self.action_uuid = str(uuid4())
self.notify_issue_owners_action = [
{
"targetType": "IssueOwners",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": "",
"uuid": self.action_uuid,
}
]
self.rule = self.create_project_rule(
project=self.project, action_data=self.notify_issue_owners_action
)
self.event_id = 456
self.notification_uuid = str(uuid4())
self.rule_fire_history = RuleFireHistory.objects.create(
project=self.project,
rule=self.rule,
group=self.group,
event_id=self.event_id,
notification_uuid=self.notification_uuid,
)
self.parent_notification_message = NotificationMessage.objects.create(
rule_fire_history=self.rule_fire_history,
rule_action_uuid=self.action_uuid,
message_identifier="123abc",
)
self.repository = IssueAlertNotificationMessageRepository.default()
| BaseIssueAlertNotificationMessageRepositoryTest |
python | scrapy__scrapy | tests/test_downloadermiddleware_httpauth.py | {
"start": 737,
"end": 1808
} | class ____:
def setup_method(self):
self.mw = HttpAuthMiddleware()
spider = DomainSpider("foo")
self.mw.spider_opened(spider)
def teardown_method(self):
del self.mw
def test_no_auth(self):
req = Request("http://example-noauth.com/")
assert self.mw.process_request(req) is None
assert "Authorization" not in req.headers
def test_auth_domain(self):
req = Request("http://example.com/")
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == basic_auth_header("foo", "bar")
def test_auth_subdomain(self):
req = Request("http://foo.example.com/")
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == basic_auth_header("foo", "bar")
def test_auth_already_set(self):
req = Request("http://example.com/", headers={"Authorization": "Digest 123"})
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == b"Digest 123"
| TestHttpAuthMiddleware |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/error.py | {
"start": 352,
"end": 673
} | class ____(Exception):
"""Base class for all errors thrown by the Dagster framework.
Users should not subclass this base class for their own exceptions.
"""
@property
def is_user_code_error(self):
"""Returns true if this error is attributable to user code."""
return False
| DagsterError |
python | tensorflow__tensorflow | tensorflow/python/ops/script_ops.py | {
"start": 6706,
"end": 39207
} | class ____:
"""A helper class to keep track of registered py functions.
FuncRegistry keeps a map from unique tokens (string) to python
functions, which takes numpy arrays and outputs numpy arrays.
"""
def __init__(self):
self._lock = threading.Lock()
self._unique_id = 0 # GUARDED_BY(self._lock)
# Only store weakrefs to the functions. The strong reference is stored in
# the graph.
self._funcs = weakref.WeakValueDictionary()
@property
def _ctx(self):
# N.B. This is needed to support calling py_func with GPU tensors,
# which must be transferred to CPU if used in any of the NumPy APIs.
context.ensure_initialized()
return context.context()._handle # pylint: disable=protected-access
def insert(self, func):
"""Registers `func` and returns a unique token for this entry."""
token = self._next_unique_token()
# Store a weakref to the function
self._funcs[token] = func
return token
def remove(self, token):
"""Removes the registered function corresponding to `token`."""
self._funcs.pop(token, None)
def get(self, token, default=None):
"""Gets the registered function corresponding to `token`."""
return self._funcs.get(token, default)
@staticmethod
def _convert(value, dtype=None):
"""Converts an arg to numpy, avoiding dangerous string and unicode dtypes.
Numpy pads with zeros when using string and unicode dtypes if different
components of a tensor have different lengths. This is bad: ignoring the
padding is wrong for text data, and removing the padding is wrong for binary
data. To avoid this bug, we redo the conversion using an object dtype.
Additionally, we convert unicode strings to (byte-)strings for
compatibility.
Args:
value: Value to convert to a numpy array.
dtype: (Optional.) Desired NumPy type for the returned value.
Returns:
A numpy array.
"""
result = numpy_compat.np_asarray(value, dtype=dtype, order="C")
if result.dtype.char == "S" and result is not value:
return numpy_compat.np_asarray(value, order="C", dtype=object)
elif result.dtype.char == "U" and result is not value:
value = np.vectorize(lambda x: x.encode("utf8"))(value)
return numpy_compat.np_asarray(value, order="C", dtype=object)
elif result.dtype.char == "U":
return result.astype(np.bytes_)
else:
return result
def __call__(self, token, device, args):
"""Calls the registered function for `token` with args.
Args:
token: A key into this `FuncRegistry` identifying which function to call.
device: Name of the device on which outputs of `token`'s corresponding
operation should be placed. Used iff the function registered for `token`
is an EagerPyFunc.
args: The arguments to pass to the function registered for `token`.
Returns:
The output of the function registered for `token`.
Raises:
ValueError: if no function is registered for `token`.
"""
func = self.get(token, None)
if func is None:
raise ValueError(f"Could not find callback with key={token} in the "
"registry.")
if isinstance(func, EagerFunc):
# NB: Different invocations of the same py_func will share the same
# token, and the entries they stash in the tape_cache will collide.
# In practice, when executing a graph, this should only happen if
# the py_func is in a while_loop whose iterations are run in parallel
# or if the graph is being driven by concurrent session.run() calls.
#
# TODO(akshayka): Key the tape cache in a thread-safe way.
return func(device, token, args)
else:
ret = func(*args)
# Strings seem to lead to a memory leak here if they're not wrapped in a
# list.
if isinstance(ret, bytes):
ret = [ret]
# Ensures that we return either a single numpy array or a list of numpy
# arrays.
if isinstance(ret, (tuple, list)):
return [self._convert(x) for x in ret]
else:
return self._convert(ret)
def size(self):
"""Returns how many functions are currently registered."""
return len(self._funcs)
def _next_unique_token(self):
"""Returns a unique token."""
with self._lock:
uid = self._unique_id
self._unique_id += 1
return "pyfunc_%d" % uid
# Global registry for py functions.
_py_funcs = FuncRegistry()
_pywrap_py_func.initialize_py_trampoline(_py_funcs)
def _internal_py_func(func,
inp,
Tout,
stateful=None,
use_eager_py_func=False,
is_grad_func=False,
name=None):
"""See documentation for py_func and eager_py_func."""
if not callable(func):
raise ValueError(
f"Expected func to be callable. Received func={func} of type "
f"{type(func)}.")
original_func = func
func = autograph.do_not_convert(func)
inp = variable_utils.convert_variables_to_tensors(list(inp))
# Normalize Tout.
is_list_or_tuple = isinstance(Tout, (list, tuple))
Tout = Tout if is_list_or_tuple else [Tout]
Tout = [_as_dtype_or_type_spec(t) for t in Tout]
# Check if we need to handle CompositeTensor inputs or outputs.
handle_composite_tensors = (
use_eager_py_func and
(any(isinstance(v, composite_tensor.CompositeTensor) for v in inp) or
any(isinstance(t, type_spec.TypeSpec) for t in Tout)))
if handle_composite_tensors:
func, inp, Tout, out_structure = _wrap_for_composites(func, inp, Tout)
if use_eager_py_func:
func = EagerFunc(func, Tout, is_grad_func)
# Tying the registered function's lifetime with the current default graph is
# not reliable. For example, a binary may switch graphs in between model
# training end evaluation, via saved_model. Those binaries work because the
# original function is global, and break once the registered
# function is an anonymous lambda, like the one produced by do_not_convert.
# To avoid breaking those cases, we attach the wrapper to the original
# function so that their lifetime is connected.
# TODO(b/144286616): Remove this.
if tf_inspect.isfunction(original_func):
# Note: this check is needed because original_func may be a descriptor
# (https://docs.python.org/3/howto/descriptor.html)
# and we can't attach attributes to those.
original_func.ag_dnc_wrapper__ = func
token = _py_funcs.insert(func)
# We tie the registered function's lifetime with the current default graph,
# i.e., when the current graph is destroyed, we remove its py funcs.
graph = ops.get_default_graph()
while True:
current_graph = graph
if isinstance(graph, function._FuncGraph): # pylint: disable=protected-access
graph = graph._outer_graph # pylint: disable=protected-access
elif isinstance(graph, func_graph.FuncGraph):
graph = graph.outer_graph
if graph is current_graph:
break
# TODO(zhifengc): Consider adding a Graph method to collect
# `cleanup` objects in one of its member.
if not hasattr(graph, "_py_funcs_used_in_graph"):
graph._py_funcs_used_in_graph = [] # pylint: disable=protected-access
# Store a reference to the function in the graph to ensure it stays alive
# as long as the graph lives. When the graph is destroyed, the function
# is left to the garbage collector for destruction as well.
graph._py_funcs_used_in_graph.append(func) # pylint: disable=protected-access
if use_eager_py_func:
result = gen_script_ops.eager_py_func(
input=inp,
token=token,
is_async=context.is_async(),
Tout=Tout,
name=name)
else:
if stateful:
result = gen_script_ops.py_func(
input=inp, token=token, Tout=Tout, name=name)
else:
result = gen_script_ops.py_func_stateless(
input=inp, token=token, Tout=Tout, name=name)
if handle_composite_tensors and Tout:
result = nest.pack_sequence_as(
out_structure, result, expand_composites=True)
return result if is_list_or_tuple else result[0]
# TODO(akshayka): Implement higher-order derivatives.
@ops.RegisterGradient("EagerPyFunc")
def _EagerPyFuncGrad(op, *dy):
"""Computes the gradient of an EagerPyFunc."""
token = op.get_attr("token")
def eagerly_executed_grad(*dy):
tape, eager_inputs, eager_outputs = tape_cache.pop(compat.as_bytes(token))
return tape.gradient(eager_outputs, eager_inputs, output_gradients=dy)
with ops.control_dependencies(op.outputs):
gradient_op = _internal_py_func(
func=eagerly_executed_grad,
inp=dy,
Tout=[tensor.dtype for tensor in op.inputs],
use_eager_py_func=True,
is_grad_func=True)
if not context.executing_eagerly():
# In graph mode, we find the func object from its token and
# notify the eager func object it needs to support the gradients.
func = _py_funcs.get(token.decode())
assert isinstance(func, EagerFunc), (
f"EagerPyFuncGrad called on a non-EagerFunc object: {func}.")
func.set_support_graph_mode_gradient()
return gradient_op
def _check_args_and_maybe_make_decorator(
script_op, script_op_name, func=None, inp=None, Tout=None, **kwargs
):
"""Checks the arguments and returns a decorator if func is None."""
if Tout is None:
raise TypeError(
"Missing required argument: 'Tout'\n"
f" If using {script_op_name} as a decorator, set `Tout`\n"
" **by name** above the function:\n"
f" `@{script_op_name}(Tout=tout)`"
)
if func is None:
if inp is not None:
raise TypeError(
f"Don't set the `inp` argument when using {script_op_name} as a "
"decorator (`func=None`)."
)
def py_function_decorator(fun):
@functools.wraps(fun)
def py_function_wrapper(*args):
return script_op(fun, inp=args, Tout=Tout, **kwargs)
return py_function_wrapper
return py_function_decorator
if inp is None:
raise TypeError(
"Missing argument `inp`:\n"
" You must set the `inp` argument (the list of arguments to the\n"
f" function), unless you use `{script_op_name}` as a decorator"
"(`func=None`)."
)
return None
@tf_export("py_function")
@dispatch.add_dispatch_support
def eager_py_func(func=None, inp=None, Tout=None, name=None):
# TODO(b/338268835): Remove "pyformat: disable" and the "pyformat: enable"
# line below if this feature request is implemented.
# pyformat: disable
"""Wraps a python function into a TensorFlow op that executes it eagerly.
Using `tf.py_function` inside a `tf.function` allows you to run a python
function using eager execution, inside the `tf.function`'s graph.
This has two main effects:
1. This allows you to use nofunc=None, inp=None, Tout=None tensorflow code
inside your `tf.function`.
2. It allows you to run python control logic in a `tf.function` without
relying on `tf.autograph` to convert the code to use tensorflow control logic
(tf.cond, tf.while_loop).
Both of these features can be useful for debugging.
Since `tf.py_function` operates on `Tensor`s it is still
differentiable (once).
There are two ways to use this function:
### As a decorator
Use `tf.py_function` as a decorator to ensure the function always runs
eagerly.
When using `tf.py_function` as a decorator:
* you must set `Tout`
* you may set `name`
* you must not set `func` or `inp`
For example, you might use `tf.py_function` to
implement the log huber function.
>>> @tf.py_function(Tout=tf.float32)
... def py_log_huber(x, m):
... print('Running with eager execution.')
... if tf.abs(x) <= m:
... return x**2
... else:
... return m**2 * (1 - 2 * tf.math.log(m) + tf.math.log(x**2))
Under eager execution the function operates normally:
>>> x = tf.constant(1.0)
>>> m = tf.constant(2.0)
>>>
>>> print(py_log_huber(x,m).numpy())
Running with eager execution.
1.0
Inside a `tf.function` the `tf.py_function` is not converted to a `tf.Graph`.:
>>> @tf.function
... def tf_wrapper(x):
... print('Tracing.')
... m = tf.constant(2.0)
... return py_log_huber(x,m)
The `tf.py_function` only executes eagerly, and only when the `tf.function`
is called:
>>> print(tf_wrapper(x).numpy())
Tracing.
Running with eager execution.
1.0
>>> print(tf_wrapper(x).numpy())
Running with eager execution.
1.0
Gradients work as expected:
>>> with tf.GradientTape() as t:
... t.watch(x)
... y = tf_wrapper(x)
Running with eager execution.
>>>
>>> t.gradient(y, x).numpy()
2.0
### Inplace
You can also skip the decorator and use `tf.py_function` in-place.
This form is a useful shortcut if you don't control the function's source,
but it is harder to read.
>>> # No decorator
>>> def log_huber(x, m):
... if tf.abs(x) <= m:
... return x**2
... else:
... return m**2 * (1 - 2 * tf.math.log(m) + tf.math.log(x**2))
>>>
>>> x = tf.constant(1.0)
>>> m = tf.constant(2.0)
>>>
>>> tf.py_function(func=log_huber, inp=[x, m], Tout=tf.float32).numpy()
1.0
### More info
You can also use `tf.py_function` to debug your models at runtime
using Python tools, i.e., you can isolate portions of your code that
you want to debug, wrap them in Python functions and insert `pdb` tracepoints
or print statements as desired, and wrap those functions in
`tf.py_function`.
For more information on eager execution, see the
[Eager guide](https://tensorflow.org/guide/eager).
`tf.py_function` is similar in spirit to `tf.numpy_function`, but unlike
the latter, the former lets you use TensorFlow operations in the wrapped
Python function. In particular, while `tf.compat.v1.py_func` only runs on CPUs
and wraps functions that take NumPy arrays as inputs and return NumPy arrays
as outputs, `tf.py_function` can be placed on GPUs and wraps functions
that take Tensors as inputs, execute TensorFlow operations in their bodies,
and return Tensors as outputs.
Note: We recommend to avoid using `tf.py_function` outside of prototyping
and experimentation due to the following known limitations:
* Calling `tf.py_function` will acquire the Python Global Interpreter Lock
(GIL) that allows only one thread to run at any point in time. This will
preclude efficient parallelization and distribution of the execution of the
program.
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.py_function()`. If you are using distributed
TensorFlow, you must run a `tf.distribute.Server` in the same process as the
program that calls `tf.py_function()` and you must pin the created
operation to a device in that server (e.g. using `with tf.device():`).
* Currently `tf.py_function` is not compatible with XLA. Calling
`tf.py_function` inside `tf.function(jit_compile=True)` will raise an
error.
Args:
func: A Python function that accepts `inp` as arguments, and returns a value
(or list of values) whose type is described by `Tout`. Do not set `func`
when using `tf.py_function` as a decorator.
inp: Input arguments for `func`. A list whose elements are `Tensor`s or
`CompositeTensors` (such as `tf.RaggedTensor`); or a single `Tensor` or
`CompositeTensor`. Do not set `inp` when using `tf.py_function` as a
decorator.
Tout: The type(s) of the value(s) returned by `func`. One of the following:
* If `func` returns a `Tensor` (or a value that can be converted to a
Tensor): the `tf.DType` for that value.
* If `func` returns a `CompositeTensor`: The `tf.TypeSpec` for that value.
* If `func` returns `None`: the empty list (`[]`).
* If `func` returns a list of `Tensor` and `CompositeTensor` values: a
corresponding list of `tf.DType`s and `tf.TypeSpec`s for each value.
name: A name for the operation (optional).
Returns:
* If `func` is `None` this returns a decorator that will ensure the
decorated function will always run with eager execution even if called
from a `tf.function`/`tf.Graph`.
* If used `func` is not `None` this executes `func` with eager execution
and returns the result: a `Tensor`, `CompositeTensor`, or list of
`Tensor` and `CompositeTensor`; or an empty list if `func` returns `None`.
"""
# pyformat: enable
decorator = _check_args_and_maybe_make_decorator(
eager_py_func, "tf.py_function", func=func, inp=inp, Tout=Tout, name=name
)
if decorator is not None:
return decorator
if ops.executing_eagerly_outside_functions():
with ops.device(context.context().host_address_space()):
return _internal_py_func(
func=func, inp=inp, Tout=Tout, use_eager_py_func=True, name=name)
return _internal_py_func(
func=func, inp=inp, Tout=Tout, use_eager_py_func=True, name=name)
def py_func_common(func, inp, Tout, stateful=True, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func`, which takes numpy arrays as its
arguments and returns numpy arrays as its outputs, wrap this function as an
operation in a TensorFlow graph. The following snippet constructs a simple
TensorFlow graph that invokes the `np.sinh()` NumPy function as a operation
in the graph:
```python
def my_func(x):
# x will be a numpy array with the contents of the placeholder below
return np.sinh(x)
input = tf.compat.v1.placeholder(tf.float32)
y = tf.compat.v1.py_func(my_func, [input], tf.float32)
```
**N.B.** The `tf.compat.v1.py_func()` operation has the following known
limitations:
* The body of the function (i.e. `func`) will not be serialized in a
`GraphDef`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.compat.v1.py_func()`. If you are using distributed
TensorFlow, you
must run a `tf.distribute.Server` in the same process as the program that
calls
`tf.compat.v1.py_func()` and you must pin the created operation to a device
in that
server (e.g. using `with tf.device():`).
Note: It produces tensors of unknown shape and rank as shape inference
does not work on arbitrary Python code.
If you need the shape, you need to set it based on statically
available information.
E.g.
```python
import tensorflow as tf
import numpy as np
def make_synthetic_data(i):
return np.cast[np.uint8](i) * np.ones([20,256,256,3],
dtype=np.float32) / 10.
def preprocess_fn(i):
ones = tf.py_function(make_synthetic_data,[i],tf.float32)
ones.set_shape(tf.TensorShape([None, None, None, None]))
ones = tf.image.resize(ones, [224,224])
return ones
ds = tf.data.Dataset.range(10)
ds = ds.map(preprocess_fn)
```
Args:
func: A Python function, which accepts `ndarray` objects as arguments and
returns a list of `ndarray` objects (or a single `ndarray`). This function
must accept as many arguments as there are tensors in `inp`, and these
argument types will match the corresponding `tf.Tensor` objects in `inp`.
The returns `ndarray`s must match the number and types defined `Tout`.
Important Note: Input and output numpy `ndarray`s of `func` are not
guaranteed to be copies. In some cases their underlying memory will be
shared with the corresponding TensorFlow tensors. In-place modification or
storing `func` input or return values in python datastructures without
explicit (np.)copy can have non-deterministic consequences.
inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) If True, the function should be considered stateful. If
a function is stateless, when given the same input it will return the same
output and have no observable side effects. Optimizations such as common
sub-expression elimination are only performed on stateless operations.
name: A name for the operation (optional).
Returns:
A list of `Tensor` or a single `Tensor` which `func` computes.
@compatibility(TF2)
This name was deprecated and removed in TF2, but `tf.numpy_function` is a
near-exact replacement, just drop the `stateful` argument (all
`tf.numpy_function` calls are considered stateful). It is compatible with
eager execution and `tf.function`.
`tf.py_function` is a close but not an exact replacement, passing TensorFlow
tensors to the wrapped function instead of NumPy arrays, which provides
gradients and can take advantage of accelerators.
Before:
>>> def fn_using_numpy(x):
... x[0] = 0.
... return x
>>> tf.compat.v1.py_func(fn_using_numpy, inp=[tf.constant([1., 2.])],
... Tout=tf.float32, stateful=False)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 2.], dtype=float32)>
After:
>>> tf.numpy_function(fn_using_numpy, inp=[tf.constant([1., 2.])],
... Tout=tf.float32)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0., 2.], dtype=float32)>
@end_compatibility
"""
if context.executing_eagerly():
result = func(*[np.array(x) for x in inp])
result = nest.flatten(result)
result = [x if x is None else ops.convert_to_tensor(x) for x in result]
if len(result) == 1:
# Mimic the automatic unwrapping in graph-mode py_func
result, = result
return result
if ops.executing_eagerly_outside_functions():
with ops.device(context.context().host_address_space()):
return _internal_py_func(
func=func,
inp=inp,
Tout=Tout,
stateful=stateful,
use_eager_py_func=False,
name=name)
return _internal_py_func(
func=func,
inp=inp,
Tout=Tout,
stateful=stateful,
use_eager_py_func=False,
name=name)
@deprecation.deprecated(
date=None,
instructions="""tf.py_func is deprecated in TF V2. Instead, there are two
options available in V2.
- tf.py_function takes a python function which manipulates tf eager
tensors instead of numpy arrays. It's easy to convert a tf eager tensor to
an ndarray (just call tensor.numpy()) but having access to eager tensors
means `tf.py_function`s can use accelerators such as GPUs as well as
being differentiable using a gradient tape.
- tf.numpy_function maintains the semantics of the deprecated tf.py_func
(it is not differentiable, and manipulates numpy arrays). It drops the
stateful argument making all functions stateful.
""")
@tf_export(v1=["py_func"])
@dispatch.add_dispatch_support
def py_func(func, inp, Tout, stateful=True, name=None):
return py_func_common(func, inp, Tout, stateful, name=name)
py_func.__doc__ = "%s" % py_func_common.__doc__
@tf_export("numpy_function")
@dispatch.add_dispatch_support
def numpy_function(func=None, inp=None, Tout=None, stateful=True, name=None):
"""Wraps a python function and uses it as a TensorFlow op.
Given a python function `func` wrap this function as an operation in a
`tf.function`. `func` must take numpy arrays as its arguments and
return numpy arrays as its outputs.
There are two ways to use `tf.numpy_function`.
### As a decorator
When using `tf.numpy_function` as a decorator:
* you must set `Tout`
* you may set `name`
* you must not set `func` or `inp`
>>> @tf.numpy_function(Tout=tf.float32)
... def my_numpy_func(x):
... # x will be a numpy array with the contents of the input to the
... # tf.function
... print(f'executing eagerly, {x=}')
... return np.sinh(x)
The function runs eagerly:
>>> my_numpy_func(1.0).numpy()
executing eagerly, x=1.0
1.17520
The behavior doesn't change inside a `tf.function`:
>>> @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])
... def tf_function(input):
... y = tf.numpy_function(my_numpy_func, [input], tf.float32)
... return y
>>> tf_function(tf.constant(1.)).numpy()
executing eagerly, x=array(1.)
1.17520
### Inplace
This form can be useful if you don't control the function's source,
but it is harder to read.
Here is the same function with no decorator:
>>> def my_func(x):
... # x will be a numpy array with the contents of the input to the
... # tf.function
... print(f'executing eagerly, {x=}')
... return np.sinh(x)
To run `tf.numpy_function` in-place, pass the function, its inputs, and the
output type in a single call to `tf.numpy_function`:
>>> tf.numpy_function(my_func, [tf.constant(1.0)], tf.float32)
executing eagerly, x=array(1.)
1.17520
### More info
Comparison to `tf.py_function`:
`tf.py_function` and `tf.numpy_function` are very similar, except that
`tf.numpy_function` takes numpy arrays, and not `tf.Tensor`s. If you want the
function to contain `tf.Tensors`, and have any TensorFlow operations executed
in the function be differentiable, please use `tf.py_function`.
Note: We recommend to avoid using `tf.numpy_function` outside of
prototyping and experimentation due to the following known limitations:
* Calling `tf.numpy_function` will acquire the Python Global Interpreter Lock
(GIL) that allows only one thread to run at any point in time. This will
preclude efficient parallelization and distribution of the execution of the
program. Therefore, you are discouraged to use `tf.numpy_function` outside
of prototyping and experimentation.
* The body of the function (i.e. `func`) will not be serialized in a
`tf.SavedModel`. Therefore, you should not use this function if you need to
serialize your model and restore it in a different environment.
* The operation must run in the same address space as the Python program
that calls `tf.numpy_function()`. If you are using distributed
TensorFlow, you must run a `tf.distribute.Server` in the same process as the
program that calls `tf.numpy_function` you must pin the created
operation to a device in that server (e.g. using `with tf.device():`).
* Currently `tf.numpy_function` is not compatible with XLA. Calling
`tf.numpy_function` inside `tf.function(jit_compile=True)` will raise an
error.
* Since the function takes numpy arrays, you cannot take gradients
through a numpy_function. If you require something that is differentiable,
please consider using tf.py_function.
Args:
func: A Python function, which accepts `numpy.ndarray` objects as arguments
and returns a list of `numpy.ndarray` objects (or a single
`numpy.ndarray`). This function must accept as many arguments as there are
tensors in `inp`, and these argument types will match the corresponding
`tf.Tensor` objects in `inp`. The returns `numpy.ndarray`s must match the
number and types defined `Tout`. Important Note: Input and output
`numpy.ndarray`s of `func` are not guaranteed to be copies. In some cases
their underlying memory will be shared with the corresponding TensorFlow
tensors. In-place modification or storing `func` input or return values in
python datastructures without explicit (np.)copy can have
non-deterministic consequences.
inp: A list of `tf.Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) Setting this argument to False tells the runtime to
treat the function as stateless, which enables certain optimizations. A
function is stateless when given the same input it will return the same
output and have no side effects; its only purpose is to have a return
value. The behavior for a stateful function with the `stateful` argument
False is undefined. In particular, caution should be taken when mutating
the input arguments as this is a stateful operation.
name: (Optional) A name for the operation.
Returns:
* If `func` is `None` this returns a decorator that will ensure the
decorated function will always run with eager execution even if called
from a `tf.function`/`tf.Graph`.
* If used `func` is not `None` this executes `func` with eager execution
and returns the result: A single or list of `tf.Tensor` which `func`
computes.
"""
decorator = _check_args_and_maybe_make_decorator(
numpy_function,
"tf.numpy_function",
func=func,
inp=inp,
Tout=Tout,
stateful=stateful,
name=name,
)
if decorator is not None:
return decorator
return py_func_common(func, inp, Tout, stateful=stateful, name=name)
def _as_dtype_or_type_spec(t):
return t if isinstance(t, type_spec.TypeSpec) else dtypes.as_dtype(t)
def _wrap_for_composites(func, inp, Tout):
"""Wraps user inputs to support composite tensors for `py_function`.
1. Flattens `inp` to a list of Tensors (by flattening any composite tensors).
2. Creates a wrapper function for `func` that expects flat inputs and:
- Packs the inputs into the input structure expected by `func`.
- Calls `func` with the packed inputs.
- Checks that `func`'s output matches `Tout`.
- Flattens func`'s output to a list of Tensors (flattening any composite
tensors).
Args:
func: The function to wrap (`func` argument to `py_function`).
inp: The input arguments for func (`inp` argument to `py_function`).
Tout: The expected output types for func (`Tout` argument to `py_function).
Returns:
A tuple `(func, inp, Tout, out_structure)`, where `func` is the wrapped
function, `inp` is the flattened inputs, `Tout` is the list of expected
dtypes for the flattened outputs, and `out_structure` is the expected
output structure (which can be used to pack the output tensors).
"""
in_structure = [
v if isinstance(v, composite_tensor.CompositeTensor) else 1 for v in inp
]
inp = nest.flatten_up_to(in_structure, inp, expand_composites=True)
out_structure = Tout
Tout = [
v.dtype if isinstance(v, tensor_spec.TensorSpec) else v
for v in nest.flatten(Tout, expand_composites=True)
]
def wrapped_func(*flat_inp):
structured_inp = nest.pack_sequence_as(
in_structure, flat_inp, expand_composites=True)
out = func(*structured_inp)
if not out_structure:
return [] # Ignore return value if none is requested/expected.
if not isinstance(out, (list, tuple)):
out = [out] # func may return a single value instead of a list.
flat_out = []
for elt, expected_type in zip(out, out_structure):
if (isinstance(expected_type, type_spec.TypeSpec) and
not isinstance(expected_type, tensor_spec.TensorSpec)):
if not expected_type.is_compatible_with(elt):
# pylint: disable=protected-access
raise ValueError(
f"py_function: func={func} returned {out!r}, "
f"which did not match Tout={out_structure!r}.\nIn particular, "
f"{elt!r} is not compatible with {expected_type!r}.")
flat_out.extend(nest.flatten(elt, expand_composites=True))
else:
# Pro-actively check if the return value is a composite tensor when
# we expect a Tensor. We would catch this later (when we call
# convert_to_tensor), but checking it here lets us give a better
# error message.
if isinstance(elt, composite_tensor.CompositeTensor):
raise ValueError(
f"py_function: func={func} returned {out!r}, "
f"which did not match Tout={out_structure!r}.\nIn particular, "
f"{elt!r} is not a Tensor.")
flat_out.append(elt)
return flat_out
return wrapped_func, inp, Tout, out_structure
ops.NotDifferentiable("PyFunc")
ops.NotDifferentiable("PyFuncStateless")
| FuncRegistry |
python | sympy__sympy | sympy/simplify/hyperexpand.py | {
"start": 44743,
"end": 46085
} | class ____(Operator):
""" Increment a lower a index. """
# XXX This is essentially the same as MeijerUnShiftA.
# See comment at MeijerUnShiftC.
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
ai = ap.pop(i) + 1
m = Poly(z, _x)
for a in an:
m *= Poly(1 - a + _x, _x)
for a in ap:
m *= Poly(a - 1 - _x, _x)
B = Dummy('B') # - this is the shift operator `D_I`
D = Poly(ai - 1 - B, B)
n = Poly(1, B)
for b in bm:
n *= (-D + b)
for b in bq:
n *= (D - b)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot increment lower a index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], B).as_expr().subs(
B, ai - 1 - _x), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment lower a index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
| MeijerUnShiftD |
python | OmkarPathak__pygorithm | tests/test_binary.py | {
"start": 97,
"end": 1548
} | class ____(unittest.TestCase):
def test_base2_to_ascii(self):
array = ['01010100', '01101000', '01100101', '00100000', '01010001', '01110101', '01101001',
'01100011',
'01101011', '00100000', '01000010', '01110010', '01101111', '01110111', '01101110',
'00100000',
'01000110', '01101111', '01111000', '00100000', '01001010', '01110101', '01101101',
'01110000',
'01110011', '00100000', '01001111', '01110110', '01100101', '01110010', '00100000',
'01110100',
'01101000', '01100101', '00100000', '01001100', '01100001', '01111010', '01111001',
'00100000',
'01000100', '01101111', '01100111']
self.assertEqual(base2.to_ascii(array), "The Quick Brown Fox Jumps Over the Lazy Dog")
def test_base2_to_base10(self):
self.assertEqual(base2.to_base10(1101001000101001), 53801)
self.assertEqual(base2.to_base10(101111101011110000011111111), 99999999)
self.assertEqual(base2.to_base10(10011110110001100001010001100101000000110001), 10910848929841)
def test_base2_to_base16(self):
self.assertEqual(base2.to_base16(1101001000101001), 'D229')
self.assertEqual(base2.to_base16(101111101011110000011111111), '5F5E0FF')
self.assertEqual(base2.to_base16(10011110110001100001010001100101000000110001), '9EC61465031')
| TestBase2 |
python | huggingface__transformers | tests/models/deit/test_modeling_deit.py | {
"start": 7008,
"end": 15344
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as DeiT does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = DeiTModelTester(self)
self.config_tester = ConfigTester(self, config_class=DeiTConfig, has_text_modality=False, hidden_size=37)
@unittest.skip(
"Since `torch==2.3+cu121`, although this test passes, many subsequent tests have `CUDA error: misaligned address`."
"If `nvidia-xxx-cu118` are also installed, no failure (even with `torch==2.3+cu121`)."
)
def test_multi_gpu_data_parallel_forward(self):
super().test_multi_gpu_data_parallel_forward()
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_image_modeling(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
# special case for DeiTForImageClassificationWithTeacher model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def test_training(self):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class.__name__ in MODEL_MAPPING_NAMES.values()
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
config.use_cache = False
config.return_dict = True
for model_class in self.all_model_classes:
if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
model = model_class(config)
model.gradient_checkpointing_enable()
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_problem_types(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
problem_types = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class.__name__
not in [
*MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values(),
*MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES.values(),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"):
config.problem_type = problem_type["title"]
config.num_labels = problem_type["num_labels"]
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
if problem_type["num_labels"] > 1:
inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"])
inputs["labels"] = inputs["labels"].to(problem_type["dtype"])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=True) as warning_list:
loss = model(**inputs).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}"
)
loss.backward()
@slow
def test_model_from_pretrained(self):
model_name = "facebook/deit-base-distilled-patch16-224"
model = DeiTModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| DeiTModelTest |
python | scrapy__scrapy | scrapy/extensions/feedexport.py | {
"start": 1688,
"end": 2872
} | class ____:
"""
This will be used by FeedExporter to decide if an item should be allowed
to be exported to a particular feed.
:param feed_options: feed specific options passed from FeedExporter
:type feed_options: dict
"""
feed_options: dict[str, Any] | None
item_classes: tuple[type, ...]
def __init__(self, feed_options: dict[str, Any] | None) -> None:
self.feed_options = feed_options
if feed_options is not None:
self.item_classes = tuple(
load_object(item_class)
for item_class in feed_options.get("item_classes") or ()
)
else:
self.item_classes = ()
def accepts(self, item: Any) -> bool:
"""
Return ``True`` if `item` should be exported or ``False`` otherwise.
:param item: scraped item which user wants to check if is acceptable
:type item: :ref:`Scrapy items <topics-items>`
:return: `True` if accepted, `False` otherwise
:rtype: bool
"""
if self.item_classes:
return isinstance(item, self.item_classes)
return True # accept all items by default
| ItemFilter |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 678147,
"end": 678599
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "message", "repository")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
message = sgqlc.types.Field(String, graphql_name="message")
repository = sgqlc.types.Field("Repository", graphql_name="repository")
| UpdateRepositoryWebCommitSignoffSettingPayload |
python | getsentry__sentry | src/sentry/api/endpoints/organization_trace_item_attributes.py | {
"start": 2966,
"end": 4182
} | class ____:
"""
This is a bit of a weird paginator.
The trace item attributes RPC returns a list of attribute names from the
database. But depending on the item type, it is possible that there are some
hard coded attribute names that gets appended to the end of the results.
Because of that, the number of results returned can exceed limit + 1.
To handle this nicely, here we choose to return the full set of results
even if it exceeds limit + 1.
"""
def __init__(self, data_fn):
self.data_fn = data_fn
def get_result(self, limit, cursor=None):
if limit <= 0:
raise ValueError(f"invalid limit for paginator, expected >0, got {limit}")
offset = cursor.offset if cursor is not None else 0
# Request 1 more than limit so we can tell if there is another page
data = self.data_fn(offset=offset, limit=limit + 1)
assert isinstance(data, list)
has_more = len(data) >= limit + 1
return CursorResult(
data,
prev=Cursor(0, max(0, offset - limit), True, offset > 0),
next=Cursor(0, max(0, offset + limit), False, has_more),
)
| TraceItemAttributesNamesPaginator |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk4agg.py | {
"start": 204,
"end": 1186
} | class ____(backend_agg.FigureCanvasAgg,
backend_gtk4.FigureCanvasGTK4):
def on_draw_event(self, widget, ctx):
if self._idle_draw_id:
GLib.source_remove(self._idle_draw_id)
self._idle_draw_id = 0
self.draw()
scale = self.device_pixel_ratio
allocation = self.get_allocation()
Gtk.render_background(
self.get_style_context(), ctx,
allocation.x, allocation.y,
allocation.width, allocation.height)
buf = cbook._unmultiplied_rgba8888_to_premultiplied_argb32(
np.asarray(self.get_renderer().buffer_rgba()))
height, width, _ = buf.shape
image = cairo.ImageSurface.create_for_data(
buf.ravel().data, cairo.FORMAT_ARGB32, width, height)
image.set_device_scale(scale, scale)
ctx.set_source_surface(image, 0, 0)
ctx.paint()
return False
@_BackendGTK4.export
| FigureCanvasGTK4Agg |
python | Textualize__textual | tests/select/test_changed_message.py | {
"start": 138,
"end": 2082
} | class ____(App[None]):
def __init__(self):
self.changed_messages = []
super().__init__()
def compose(self):
yield Select[int]([(str(n), n) for n in range(3)])
@on(Select.Changed)
def add_message(self, event):
self.changed_messages.append(event)
async def test_message_control():
app = SelectApp()
async with app.run_test() as pilot:
await pilot.click(Select)
await pilot.click(SelectOverlay, offset=(2, 3))
await pilot.pause()
message = app.changed_messages[0]
assert message.control is app.query_one(Select)
async def test_selecting_posts_message():
app = SelectApp()
async with app.run_test() as pilot:
await pilot.click(Select)
# Click on the 1.
await pilot.click(SelectOverlay, offset=(2, 3))
await pilot.pause()
assert len(app.changed_messages) == 1
await pilot.click(Select)
# Click on the 2.
await pilot.click(SelectOverlay, offset=(2, 4))
await pilot.pause()
assert len(app.changed_messages) == 2
async def test_same_selection_does_not_post_message():
app = SelectApp()
async with app.run_test() as pilot:
await pilot.click(Select)
# Click on the 1.
await pilot.click(SelectOverlay, offset=(2, 3))
await pilot.pause()
assert len(app.changed_messages) == 1
await pilot.click(Select)
# Click on the 1 again...
await pilot.click(SelectOverlay, offset=(2, 3))
await pilot.pause()
assert len(app.changed_messages) == 1
async def test_setting_value_posts_message() -> None:
"""Setting the value of a Select should post a message."""
async with (app := SelectApp()).run_test() as pilot:
assert len(app.changed_messages) == 0
app.query_one(Select).value = 2
await pilot.pause()
assert len(app.changed_messages) == 1
| SelectApp |
python | getsentry__sentry | src/sentry/seer/similarity/types.py | {
"start": 537,
"end": 1083
} | class ____(TypedDict):
project_id: int
stacktrace: str
exception_type: str | None
hash: str
k: NotRequired[int] # how many neighbors to find
threshold: NotRequired[float]
read_only: NotRequired[bool]
event_id: NotRequired[str]
referrer: NotRequired[str]
use_reranking: NotRequired[bool]
model: NotRequired[GroupingVersion] # Model version, defaults to V1 for backward compatibility
training_mode: NotRequired[bool] # whether to just insert embedding without querying
| SimilarIssuesEmbeddingsRequest |
python | sqlalchemy__sqlalchemy | test/orm/test_mapper.py | {
"start": 98564,
"end": 103034
} | class ____(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
@testing.combinations((True,), (False,))
def test_no_mapper_configure_w_selects_etc(self, use_legacy_query):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
am = self.mapper(Address, addresses)
um = self.mapper(
User,
users,
properties={
"address_count": column_property(
select(Address)
.where(Address.id == users.c.id)
.correlate_except(Address)
.scalar_subquery()
)
},
)
is_false(am.configured)
is_false(um.configured)
if use_legacy_query:
stmt = Session().query(User).filter(User.name == "ed")
self.assert_compile(
stmt,
"SELECT (SELECT addresses.id, addresses.user_id, "
"addresses.email_address FROM addresses "
"WHERE addresses.id = users.id) AS anon_1, "
"users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.name = :name_1",
)
else:
stmt = select(User).where(User.name == "ed")
self.assert_compile(
stmt,
"SELECT (SELECT addresses.id, addresses.user_id, "
"addresses.email_address FROM addresses "
"WHERE addresses.id = users.id) AS anon_1, "
"users.id, users.name "
"FROM users WHERE users.name = :name_1",
)
is_true(am.configured)
is_true(um.configured)
@testing.combinations((True,), (False,))
def test_load_options(self, use_bound):
User = self.classes.User
users = self.tables.users
um = self.mapper_registry.map_imperatively(User, users)
if use_bound:
stmt = select(User).options(
Load(User).load_only(User.name),
)
is_true(um.configured)
else:
stmt = select(User).options(
load_only(User.name),
)
# all options are "bound" Load objects now,
# so this operation configures mappers
is_true(um.configured)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users",
)
is_true(um.configured)
@testing.combinations((True,), (False,))
def test_backrefs(self, use_legacy_query):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
am = self.mapper_registry.map_imperatively(Address, addresses)
if use_legacy_query:
s = Session()
# legacy, Query still forces configure
stmt = s.query(Address).join(Address.user)
is_true(am.configured)
self.assert_compile(
stmt,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses JOIN users ON users.id = addresses.user_id",
)
else:
# new queries, they can't, because they are used in mapper
# config also. backrefs that aren't explicit on the class
# are the only thing we can't do. we would need __getattr__
# to intercept this error.
with expect_raises_message(
AttributeError, "type object 'Address' has no attribute 'user'"
):
stmt = select(Address).join(Address.user)
is_false(am.configured)
configure_mappers()
is_true(am.configured)
stmt = select(Address).join(Address.user)
self.assert_compile(
stmt,
"SELECT addresses.id, addresses.user_id, "
"addresses.email_address FROM addresses JOIN users "
"ON users.id = addresses.user_id",
)
| ConfigureOrNotConfigureTest |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/recurrent.py | {
"start": 107538,
"end": 124874
} | class ____(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
Note that this cell is not optimized for performance on GPU. Please use
`tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False`
entry indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
implementation = kwargs.pop('implementation', 1)
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(LSTM, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(LSTM, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
config.update(_config_for_enable_caching_device(self.cell))
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return backend.dropout(ones, rate)
if count > 1:
return [
backend.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(count)
]
return backend.in_train_phase(dropped_inputs, ones, training=training)
def _standardize_args(inputs, initial_state, constants, num_constants):
"""Standardizes `__call__` to a single list of tensor inputs.
When running a model loaded from a file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__()` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
Args:
inputs: Tensor or list/tuple of tensors. which may include constants
and initial states. In that case `num_constant` must be specified.
initial_state: Tensor or list of tensors or None, initial states.
constants: Tensor or list of tensors or None, constant tensors.
num_constants: Expected number of constants (if constants are passed as
part of the `inputs` list.
Returns:
inputs: Single tensor or tuple of tensors.
initial_state: List of tensors or None.
constants: List of tensors or None.
"""
if isinstance(inputs, list):
# There are several situations here:
# In the graph mode, __call__ will be only called once. The initial_state
# and constants could be in inputs (from file loading).
# In the eager mode, __call__ will be called twice, once during
# rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
# model.fit/train_on_batch/predict with real np data. In the second case,
# the inputs will contain initial_state and constants as eager tensor.
#
# For either case, the real input is the first item in the list, which
# could be a nested structure itself. Then followed by initial_states, which
# could be a list of items, or list of list if the initial_state is complex
# structure, and finally followed by constants which is a flat list.
assert initial_state is None and constants is None
if num_constants:
constants = inputs[-num_constants:]
inputs = inputs[:-num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[:1]
if len(inputs) > 1:
inputs = tuple(inputs)
else:
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def _is_multiple_state(state_size):
"""Check whether the state_size contains multiple states."""
return (hasattr(state_size, '__len__') and
not isinstance(state_size, tensor_shape.TensorShape))
def _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype):
if inputs is not None:
batch_size = array_ops.shape(inputs)[0]
dtype = inputs.dtype
return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
def _generate_zero_filled_state(batch_size_tensor, state_size, dtype):
"""Generate a zero filled tensor with shape [batch_size, state_size]."""
if batch_size_tensor is None or dtype is None:
raise ValueError(
'batch_size and dtype cannot be None while constructing initial state: '
'batch_size={}, dtype={}'.format(batch_size_tensor, dtype))
def create_zeros(unnested_state_size):
flat_dims = tensor_shape.TensorShape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return array_ops.zeros(init_state_size, dtype=dtype)
if nest.is_nested(state_size):
return nest.map_structure(create_zeros, state_size)
else:
return create_zeros(state_size)
def _caching_device(rnn_cell):
"""Returns the caching device for the RNN variable.
This is useful for distributed training, when variable is not located as same
device as the training worker. By enabling the device cache, this allows
worker to read the variable once and cache locally, rather than read it every
time step from remote when it is needed.
Note that this is assuming the variable that cell needs for each time step is
having the same value in the forward path, and only gets updated in the
backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the
cell body relies on any variable that gets updated every time step, then
caching device will cause it to read the stall value.
Args:
rnn_cell: the rnn cell instance.
"""
if context.executing_eagerly():
# caching_device is not supported in eager mode.
return None
if not getattr(rnn_cell, '_enable_caching_device', False):
return None
# Don't set a caching device when running in a loop, since it is possible that
# train steps could be wrapped in a tf.while_loop. In that scenario caching
# prevents forward computations in loop iterations from re-reading the
# updated weights.
if control_flow_util.IsInWhileLoop(ops.get_default_graph()):
logging.warning(
'Variable read device caching has been disabled because the '
'RNN is in tf.while_loop loop context, which will cause '
'reading stalled value in forward path. This could slow down '
'the training due to duplicated variable reads. Please '
'consider updating your code to remove tf.while_loop if possible.')
return None
if (rnn_cell._dtype_policy.compute_dtype !=
rnn_cell._dtype_policy.variable_dtype):
logging.warning(
'Variable read device caching has been disabled since it '
'doesn\'t work with the mixed precision API. This is '
'likely to cause a slowdown for RNN training due to '
'duplicated read of variable for each timestep, which '
'will be significant in a multi remote worker setting. '
'Please consider disabling mixed precision API if '
'the performance has been affected.')
return None
# Cache the value on the device that access the variable.
return lambda op: op.device
def _config_for_enable_caching_device(rnn_cell):
"""Return the dict config for RNN cell wrt to enable_caching_device field.
Since enable_caching_device is a internal implementation detail for speed up
the RNN variable read when running on the multi remote worker setting, we
don't want this config to be serialized constantly in the JSON. We will only
serialize this field when a none default value is used to create the cell.
Args:
rnn_cell: the RNN cell for serialize.
Returns:
A dict which contains the JSON config for enable_caching_device value or
empty dict if the enable_caching_device value is same as the default value.
"""
default_enable_caching_device = ops.executing_eagerly_outside_functions()
if rnn_cell._enable_caching_device != default_enable_caching_device:
return {'enable_caching_device': rnn_cell._enable_caching_device}
return {}
| LSTM |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 127093,
"end": 128178
} | class ____(Response):
"""
Response of tasks.archive endpoint.
:param archived: Indicates number of archived tasks
:type archived: int
"""
_service = "tasks"
_action = "archive"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"archived": {
"description": "Indicates number of archived tasks",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, archived=None, **kwargs):
super(ArchiveResponse, self).__init__(**kwargs)
self.archived = archived
@schema_property("archived")
def archived(self):
return self._property_archived
@archived.setter
def archived(self, value):
if value is None:
self._property_archived = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "archived", six.integer_types)
self._property_archived = value
| ArchiveResponse |
python | google__pytype | pytype/overlays/enum_overlay.py | {
"start": 7909,
"end": 10110
} | class ____(abstract.InterpreterClass):
"""A wrapper for classes that subclass enum.Enum."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# These are set by EnumMetaInit.setup_interpreterclass.
self.member_type = None
self.member_attrs = {}
self._instantiating = False
@contextlib.contextmanager
def _is_instantiating(self):
old_instantiating = self._instantiating
self._instantiating = True
try:
yield
finally:
self._instantiating = old_instantiating
def instantiate(self, node, container=None):
# Instantiate creates a canonical enum member. This intended for when no
# particular enum member is needed, e.g. during analysis. Real members have
# these fields set during class creation.
del container
instance = abstract.Instance(self, self.ctx)
instance.members["name"] = self.ctx.convert.build_nonatomic_string(node)
if self.member_type:
value = self.member_type.instantiate(node)
else:
# instantiate() should never be called before setup_interpreterclass sets
# self.member_type, because pytype will complain about recursive types.
# But there's no reason not to make sure this function is safe.
value = self.ctx.new_unsolvable(node)
instance.members["value"] = value
for attr_name, attr_type in self.member_attrs.items():
# attr_type might refer back to self, so track whether we are
# instantiating to avoid infinite recursion.
if self._instantiating:
instance.members[attr_name] = self.ctx.new_unsolvable(node)
else:
with self._is_instantiating():
instance.members[attr_name] = attr_type.instantiate(node)
return instance.to_variable(node)
def is_empty_enum(self):
for member in self.members.values():
for b in member.data:
if b.cls == self:
return False
return True
def get_enum_members(self, qualified=False):
ret = {
k: v
for k, v in self.members.items()
if all(d.cls == self for d in v.data)
}
if qualified:
return {f"{self.name}.{k}": v for k, v in ret.items()}
else:
return ret
| EnumInstance |
python | pydata__xarray | xarray/tests/test_combine.py | {
"start": 9795,
"end": 12246
} | class ____:
@pytest.mark.parametrize(
"concat_dim, kwargs", [("dim1", {}), ("new_dim", {"data_vars": "all"})]
)
def test_concat_once(self, create_combined_ids, concat_dim, kwargs):
shape = (2,)
combined_ids = create_combined_ids(shape)
ds = create_test_data
result = _combine_all_along_first_dim(
combined_ids,
dim=concat_dim,
data_vars="all",
coords="different",
compat="no_conflicts",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
)
expected_ds = concat([ds(0), ds(1)], dim=concat_dim, **kwargs)
assert_combined_tile_ids_equal(result, {(): expected_ds})
def test_concat_only_first_dim(self, create_combined_ids):
shape = (2, 3)
combined_ids = create_combined_ids(shape)
result = _combine_all_along_first_dim(
combined_ids,
dim="dim1",
data_vars="all",
coords="different",
compat="no_conflicts",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
)
ds = create_test_data
partway1 = concat([ds(0), ds(3)], dim="dim1")
partway2 = concat([ds(1), ds(4)], dim="dim1")
partway3 = concat([ds(2), ds(5)], dim="dim1")
expected_datasets = [partway1, partway2, partway3]
expected = {(i,): ds for i, ds in enumerate(expected_datasets)}
assert_combined_tile_ids_equal(result, expected)
@pytest.mark.parametrize(
"concat_dim, kwargs", [("dim1", {}), ("new_dim", {"data_vars": "all"})]
)
def test_concat_twice(self, create_combined_ids, concat_dim, kwargs):
shape = (2, 3)
combined_ids = create_combined_ids(shape)
result = _combine_nd(
combined_ids,
concat_dims=["dim1", concat_dim],
data_vars="all",
coords="different",
compat="no_conflicts",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
)
ds = create_test_data
partway1 = concat([ds(0), ds(3)], dim="dim1")
partway2 = concat([ds(1), ds(4)], dim="dim1")
partway3 = concat([ds(2), ds(5)], dim="dim1")
expected = concat([partway1, partway2, partway3], **kwargs, dim=concat_dim)
assert_equal(result, expected)
| TestCombineND |
python | huggingface__transformers | src/transformers/models/yoso/modeling_yoso.py | {
"start": 19380,
"end": 20594
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = YosoAttention(config)
self.add_cross_attention = config.add_cross_attention
self.intermediate = YosoIntermediate(config)
self.output = YosoOutput(config)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
| YosoLayer |
python | doocs__leetcode | solution/3400-3499/3492.Maximum Containers on a Ship/Solution.py | {
"start": 0,
"end": 130
} | class ____:
def maxContainers(self, n: int, w: int, maxWeight: int) -> int:
return min(n * n * w, maxWeight) // w
| Solution |
python | getlogbook__logbook | src/logbook/queues.py | {
"start": 18584,
"end": 18974
} | class ____(Handler):
"""Implements a handler that dispatches over a execnet channel
to a different process.
"""
def __init__(self, channel, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.channel = channel
def emit(self, record):
self.channel.send(record.to_dict(json_safe=True))
| ExecnetChannelHandler |
python | viewflow__viewflow | viewflow/views/search.py | {
"start": 3251,
"end": 3837
} | class ____(object):
"""
The mixin for LitView to enable search capabilities
"""
search_fields = None
def search_enabled(self):
return self.search_fields is not None
def get_search_term(self):
return self.request.GET.get("_search")
def get_queryset(self):
queryset = super().get_queryset()
search_term = self.get_search_term()
if self.search_enabled is not None and search_term is not None:
queryset = get_search_results(queryset, self.search_fields, search_term)
return queryset
| SearchableViewMixin |
python | mahmoud__glom | glom/matching.py | {
"start": 792,
"end": 1228
} | class ____(GlomError):
"""
Raised when a :class:`Match` or :data:`M` check fails.
>>> glom({123: 'a'}, Match({'id': int}))
Traceback (most recent call last):
...
MatchError: key 123 didn't match any of ['id']
"""
def __init__(self, fmt, *args):
super().__init__(fmt, *args)
def get_message(self):
fmt, args = self.args[0], self.args[1:]
return bbformat(fmt, *args)
| MatchError |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/sensors.py | {
"start": 11797,
"end": 12885
} | class ____(graphene.Mutation):
"""Set a cursor for a sensor to track state across evaluations."""
Output = graphene.NonNull(GrapheneSensorOrError)
class Arguments:
sensor_selector = graphene.NonNull(GrapheneSensorSelector)
cursor = graphene.String()
class Meta:
name = "SetSensorCursorMutation"
@capture_error
@require_permission_check(Permissions.UPDATE_SENSOR_CURSOR)
def mutate(self, graphene_info: ResolveInfo, sensor_selector, cursor=None):
selector = SensorSelector.from_graphql_input(sensor_selector)
assert_permission_for_sensor(graphene_info, Permissions.UPDATE_SENSOR_CURSOR, selector)
return set_sensor_cursor(graphene_info, selector, cursor)
types = [
GrapheneSensor,
GrapheneSensorOrError,
GrapheneSensors,
GrapheneSensorsOrError,
GrapheneStopSensorMutation,
GrapheneStopSensorMutationResult,
GrapheneStopSensorMutationResultOrError,
GrapheneStopSensorMutation,
GrapheneSetSensorCursorMutation,
GrapheneResetSensorMutation,
]
| GrapheneSetSensorCursorMutation |
python | great-expectations__great_expectations | tests/integration/data_sources_and_expectations/test_test_performance.py | {
"start": 1211,
"end": 1283
} | class ____:
setup_count = 0
teardown_count = 0
| SetupTeardownCounts |
python | pytorch__pytorch | torch/utils/benchmark/utils/common.py | {
"start": 755,
"end": 2226
} | class ____:
"""Container for information used to define a Timer. (except globals)"""
stmt: str
setup: str
global_setup: str = ""
label: str | None = None
sub_label: str | None = None
description: str | None = None
env: str | None = None
num_threads: int = 1
@property
def title(self) -> str:
"""Best effort attempt at a string label for the measurement."""
if self.label is not None:
return self.label + (f": {self.sub_label}" if self.sub_label else "")
elif "\n" not in self.stmt:
return self.stmt + (f": {self.sub_label}" if self.sub_label else "")
return (
f"stmt:{f' ({self.sub_label})' if self.sub_label else ''}\n"
f"{textwrap.indent(self.stmt, ' ')}"
)
def setup_str(self) -> str:
return (
"" if (self.setup == "pass" or not self.setup)
else f"setup:\n{textwrap.indent(self.setup, ' ')}" if "\n" in self.setup
else f"setup: {self.setup}"
)
def summarize(self) -> str:
"""Build TaskSpec portion of repr string for other containers."""
sections = [
self.title,
self.description or "",
self.setup_str(),
]
return "\n".join([f"{i}\n" if "\n" in i else i for i in sections if i])
_TASKSPEC_FIELDS = tuple(i.name for i in dataclasses.fields(TaskSpec))
@dataclasses.dataclass(init=True, repr=False)
| TaskSpec |
python | facebookresearch__faiss | contrib/rpc.py | {
"start": 2164,
"end": 5463
} | class ____:
"""
server protocol. Methods from classes that subclass Server can be called
transparently from a client
"""
def __init__(self, s, logf=sys.stderr, log_prefix=''):
self.logf = logf
self.log_prefix = log_prefix
# connection
self.conn = s
self.fs = FileSock(s)
def log(self, s):
self.logf.write("Server log %s: %s\n" % (self.log_prefix, s))
def one_function(self):
"""
Executes a single function with associated I/O.
Protocol:
- the arguments and results are serialized with the pickle protocol
- client sends : (fname,args)
fname = method name to call
args = tuple of arguments
- server sends result: (rid,st,ret)
rid = request id
st = None, or exception if there was during execution
ret = return value or None if st!=None
"""
try:
(fname, args) = RestrictedUnpickler(self.fs).load()
except EOFError:
raise ClientExit("read args")
self.log("executing method %s"%(fname))
st = None
ret = None
try:
f=getattr(self,fname)
except AttributeError:
st = AttributeError("unknown method "+fname)
self.log("unknown method")
try:
ret = f(*args)
except Exception as e:
# due to a bug (in mod_python?), ServerException cannot be
# unpickled, so send the string and make the exception on the client side
#st=ServerException(
# "".join(traceback.format_tb(sys.exc_info()[2]))+
# str(e))
st="".join(traceback.format_tb(sys.exc_info()[2]))+str(e)
self.log("exception in method")
traceback.print_exc(50,self.logf)
self.logf.flush()
LOG.info("return")
try:
pickle.dump((st ,ret), self.fs, protocol=4)
except EOFError:
raise ClientExit("function return")
def exec_loop(self):
""" main execution loop. Loops and handles exit states"""
self.log("in exec_loop")
try:
while True:
self.one_function()
except ClientExit as e:
self.log("ClientExit %s"%e)
except socket.error as e:
self.log("socket error %s"%e)
traceback.print_exc(50,self.logf)
except EOFError:
self.log("EOF during communication")
traceback.print_exc(50,self.logf)
except BaseException:
# unexpected
traceback.print_exc(50,sys.stderr)
sys.exit(1)
LOG.info("exit server")
def exec_loop_cleanup(self):
pass
###################################################################
# spying stuff
def get_ps_stats(self):
ret=''
f=os.popen("echo ============ `hostname` uptime:; uptime;"+
"echo ============ self:; "+
"ps -p %d -o pid,vsize,rss,%%cpu,nlwp,psr; "%os.getpid()+
"echo ============ run queue:;"+
"ps ar -o user,pid,%cpu,%mem,ni,nlwp,psr,vsz,rss,cputime,command")
for l in f:
ret+=l
return ret
| Server |
python | fsspec__filesystem_spec | fsspec/core.py | {
"start": 4510,
"end": 23238
} | class ____(list):
"""List of OpenFile instances
Can be used in a single context, which opens and closes all of the
contained files. Normal list access to get the elements works as
normal.
A special case is made for caching filesystems - the files will
be down/uploaded together at the start or end of the context, and
this may happen concurrently, if the target filesystem supports it.
"""
def __init__(self, *args, mode="rb", fs=None):
self.mode = mode
self.fs = fs
self.files = []
super().__init__(*args)
def __enter__(self):
if self.fs is None:
raise ValueError("Context has already been used")
fs = self.fs
while True:
if hasattr(fs, "open_many"):
# check for concurrent cache download; or set up for upload
self.files = fs.open_many(self)
return self.files
if hasattr(fs, "fs") and fs.fs is not None:
fs = fs.fs
else:
break
return [s.__enter__() for s in self]
def __exit__(self, *args):
fs = self.fs
[s.__exit__(*args) for s in self]
if "r" not in self.mode:
while True:
if hasattr(fs, "open_many"):
# check for concurrent cache upload
fs.commit_many(self.files)
return
if hasattr(fs, "fs") and fs.fs is not None:
fs = fs.fs
else:
break
def __getitem__(self, item):
out = super().__getitem__(item)
if isinstance(item, slice):
return OpenFiles(out, mode=self.mode, fs=self.fs)
return out
def __repr__(self):
return f"<List of {len(self)} OpenFile instances>"
def open_files(
urlpath,
mode="rb",
compression=None,
encoding="utf8",
errors=None,
name_function=None,
num=1,
protocol=None,
newline=None,
auto_mkdir=True,
expand=True,
**kwargs,
):
"""Given a path or paths, return a list of ``OpenFile`` objects.
For writing, a str path must contain the "*" character, which will be filled
in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
For either reading or writing, can instead provide explicit list of paths.
Parameters
----------
urlpath: string or list
Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
to read from alternative filesystems. To read from multiple files you
can pass a globstring or a list of paths, with the caveat that they
must all have the same protocol.
mode: 'rb', 'wt', etc.
compression: string or None
If given, open file using compression codec. Can either be a compression
name (a key in ``fsspec.compression.compr``) or "infer" to guess the
compression from the filename suffix.
encoding: str
For text mode only
errors: None or str
Passed to TextIOWrapper in text mode
name_function: function or None
if opening a set of files for writing, those files do not yet exist,
so we need to generate their names by formatting the urlpath for
each sequence number
num: int [1]
if writing mode, number of files we expect to create (passed to
name+function)
protocol: str or None
If given, overrides the protocol found in the URL.
newline: bytes or None
Used for line terminator in text mode. If None, uses system default;
if blank, uses no translation.
auto_mkdir: bool (True)
If in write mode, this will ensure the target directory exists before
writing, by calling ``fs.mkdirs(exist_ok=True)``.
expand: bool
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> files = open_files('2015-*-*.csv') # doctest: +SKIP
>>> files = open_files(
... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
... ) # doctest: +SKIP
Returns
-------
An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
be used as a single context
Notes
-----
For a full list of the available protocols and the implementations that
they map across to see the latest online documentation:
- For implementations built into ``fsspec`` see
https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
- For implementations in separate packages see
https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
"""
fs, fs_token, paths = get_fs_token_paths(
urlpath,
mode,
num=num,
name_function=name_function,
storage_options=kwargs,
protocol=protocol,
expand=expand,
)
if fs.protocol == "file":
fs.auto_mkdir = auto_mkdir
elif "r" not in mode and auto_mkdir:
parents = {fs._parent(path) for path in paths}
for parent in parents:
try:
fs.makedirs(parent, exist_ok=True)
except PermissionError:
pass
return OpenFiles(
[
OpenFile(
fs,
path,
mode=mode,
compression=compression,
encoding=encoding,
errors=errors,
newline=newline,
)
for path in paths
],
mode=mode,
fs=fs,
)
def _un_chain(path, kwargs):
# Avoid a circular import
from fsspec.implementations.chained import ChainedFileSystem
if "::" in path:
x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
bits = []
for p in path.split("::"):
if "://" in p or x.match(p):
bits.append(p)
else:
bits.append(p + "://")
else:
bits = [path]
# [[url, protocol, kwargs], ...]
out = []
previous_bit = None
kwargs = kwargs.copy()
for bit in reversed(bits):
protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file"
cls = get_filesystem_class(protocol)
extra_kwargs = cls._get_kwargs_from_urls(bit)
kws = kwargs.pop(protocol, {})
if bit is bits[0]:
kws.update(kwargs)
kw = dict(
**{k: v for k, v in extra_kwargs.items() if k not in kws or v != kws[k]},
**kws,
)
bit = cls._strip_protocol(bit)
if "target_protocol" not in kw and issubclass(cls, ChainedFileSystem):
bit = previous_bit
out.append((bit, protocol, kw))
previous_bit = bit
out.reverse()
return out
def url_to_fs(url, **kwargs):
"""
Turn fully-qualified and potentially chained URL into filesystem instance
Parameters
----------
url : str
The fsspec-compatible URL
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Returns
-------
filesystem : FileSystem
The new filesystem discovered from ``url`` and created with
``**kwargs``.
urlpath : str
The file-systems-specific URL for ``url``.
"""
url = stringify_path(url)
# non-FS arguments that appear in fsspec.open()
# inspect could keep this in sync with open()'s signature
known_kwargs = {
"compression",
"encoding",
"errors",
"expand",
"mode",
"name_function",
"newline",
"num",
}
kwargs = {k: v for k, v in kwargs.items() if k not in known_kwargs}
chain = _un_chain(url, kwargs)
inkwargs = {}
# Reverse iterate the chain, creating a nested target_* structure
for i, ch in enumerate(reversed(chain)):
urls, protocol, kw = ch
if i == len(chain) - 1:
inkwargs = dict(**kw, **inkwargs)
continue
inkwargs["target_options"] = dict(**kw, **inkwargs)
inkwargs["target_protocol"] = protocol
inkwargs["fo"] = urls
urlpath, protocol, _ = chain[0]
fs = filesystem(protocol, **inkwargs)
return fs, urlpath
DEFAULT_EXPAND = conf.get("open_expand", False)
def open(
urlpath,
mode="rb",
compression=None,
encoding="utf8",
errors=None,
protocol=None,
newline=None,
expand=None,
**kwargs,
):
"""Given a path or paths, return one ``OpenFile`` object.
Parameters
----------
urlpath: string or list
Absolute or relative filepath. Prefix with a protocol like ``s3://``
to read from alternative filesystems. Should not include glob
character(s).
mode: 'rb', 'wt', etc.
compression: string or None
If given, open file using compression codec. Can either be a compression
name (a key in ``fsspec.compression.compr``) or "infer" to guess the
compression from the filename suffix.
encoding: str
For text mode only
errors: None or str
Passed to TextIOWrapper in text mode
protocol: str or None
If given, overrides the protocol found in the URL.
newline: bytes or None
Used for line terminator in text mode. If None, uses system default;
if blank, uses no translation.
expand: bool or None
Whether to regard file paths containing special glob characters as needing
expansion (finding the first match) or absolute. Setting False allows using
paths which do embed such characters. If None (default), this argument
takes its value from the DEFAULT_EXPAND module variable, which takes
its initial value from the "open_expand" config value at startup, which will
be False if not set.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> openfile = open('2015-01-01.csv') # doctest: +SKIP
>>> openfile = open(
... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
... ) # doctest: +SKIP
>>> with openfile as f:
... df = pd.read_csv(f) # doctest: +SKIP
...
Returns
-------
``OpenFile`` object.
Notes
-----
For a full list of the available protocols and the implementations that
they map across to see the latest online documentation:
- For implementations built into ``fsspec`` see
https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
- For implementations in separate packages see
https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
"""
expand = DEFAULT_EXPAND if expand is None else expand
out = open_files(
urlpath=[urlpath],
mode=mode,
compression=compression,
encoding=encoding,
errors=errors,
protocol=protocol,
newline=newline,
expand=expand,
**kwargs,
)
if not out:
raise FileNotFoundError(urlpath)
return out[0]
def open_local(
url: str | list[str] | Path | list[Path],
mode: str = "rb",
**storage_options: dict,
) -> str | list[str]:
"""Open file(s) which can be resolved to local
For files which either are local, or get downloaded upon open
(e.g., by file caching)
Parameters
----------
url: str or list(str)
mode: str
Must be read mode
storage_options:
passed on to FS for or used by open_files (e.g., compression)
"""
if "r" not in mode:
raise ValueError("Can only ensure local files when reading")
of = open_files(url, mode=mode, **storage_options)
if not getattr(of[0].fs, "local_file", False):
raise ValueError(
"open_local can only be used on a filesystem which"
" has attribute local_file=True"
)
with of as files:
paths = [f.name for f in files]
if (isinstance(url, str) and not has_magic(url)) or isinstance(url, Path):
return paths[0]
return paths
def get_compression(urlpath, compression):
if compression == "infer":
compression = infer_compression(urlpath)
if compression is not None and compression not in compr:
raise ValueError(f"Compression type {compression} not supported")
return compression
def split_protocol(urlpath):
"""Return protocol, path pair"""
urlpath = stringify_path(urlpath)
if "://" in urlpath:
protocol, path = urlpath.split("://", 1)
if len(protocol) > 1:
# excludes Windows paths
return protocol, path
if urlpath.startswith("data:"):
return urlpath.split(":", 1)
return None, urlpath
def strip_protocol(urlpath):
"""Return only path part of full URL, according to appropriate backend"""
protocol, _ = split_protocol(urlpath)
cls = get_filesystem_class(protocol)
return cls._strip_protocol(urlpath)
def expand_paths_if_needed(paths, mode, num, fs, name_function):
"""Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]``
in them (read mode).
:param paths: list of paths
mode: str
Mode in which to open files.
num: int
If opening in writing mode, number of files we expect to create.
fs: filesystem object
name_function: callable
If opening in writing mode, this callable is used to generate path
names. Names are generated for each partition by
``urlpath.replace('*', name_function(partition_index))``.
:return: list of paths
"""
expanded_paths = []
paths = list(paths)
if "w" in mode: # read mode
if sum(1 for p in paths if "*" in p) > 1:
raise ValueError(
"When writing data, only one filename mask can be specified."
)
num = max(num, len(paths))
for curr_path in paths:
if "*" in curr_path:
# expand using name_function
expanded_paths.extend(_expand_paths(curr_path, name_function, num))
else:
expanded_paths.append(curr_path)
# if we generated more paths that asked for, trim the list
if len(expanded_paths) > num:
expanded_paths = expanded_paths[:num]
else: # read mode
for curr_path in paths:
if has_magic(curr_path):
# expand using glob
expanded_paths.extend(fs.glob(curr_path))
else:
expanded_paths.append(curr_path)
return expanded_paths
def get_fs_token_paths(
urlpath,
mode="rb",
num=1,
name_function=None,
storage_options=None,
protocol=None,
expand=True,
):
"""Filesystem, deterministic token, and paths from a urlpath and options.
Parameters
----------
urlpath: string or iterable
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
mode: str, optional
Mode in which to open files.
num: int, optional
If opening in writing mode, number of files we expect to create.
name_function: callable, optional
If opening in writing mode, this callable is used to generate path
names. Names are generated for each partition by
``urlpath.replace('*', name_function(partition_index))``.
storage_options: dict, optional
Additional keywords to pass to the filesystem class.
protocol: str or None
To override the protocol specifier in the URL
expand: bool
Expand string paths for writing, assuming the path is a directory
"""
if isinstance(urlpath, (list, tuple, set)):
if not urlpath:
raise ValueError("empty urlpath sequence")
urlpath0 = stringify_path(next(iter(urlpath)))
else:
urlpath0 = stringify_path(urlpath)
storage_options = storage_options or {}
if protocol:
storage_options["protocol"] = protocol
chain = _un_chain(urlpath0, storage_options or {})
inkwargs = {}
# Reverse iterate the chain, creating a nested target_* structure
for i, ch in enumerate(reversed(chain)):
urls, nested_protocol, kw = ch
if i == len(chain) - 1:
inkwargs = dict(**kw, **inkwargs)
continue
inkwargs["target_options"] = dict(**kw, **inkwargs)
inkwargs["target_protocol"] = nested_protocol
inkwargs["fo"] = urls
paths, protocol, _ = chain[0]
fs = filesystem(protocol, **inkwargs)
if isinstance(urlpath, (list, tuple, set)):
pchains = [
_un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath
]
if len({pc[1] for pc in pchains}) > 1:
raise ValueError("Protocol mismatch getting fs from %s", urlpath)
paths = [pc[0] for pc in pchains]
else:
paths = fs._strip_protocol(paths)
if isinstance(paths, (list, tuple, set)):
if expand:
paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
elif not isinstance(paths, list):
paths = list(paths)
else:
if ("w" in mode or "x" in mode) and expand:
paths = _expand_paths(paths, name_function, num)
elif "*" in paths:
paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
else:
paths = [paths]
return fs, fs._fs_token, paths
def _expand_paths(path, name_function, num):
if isinstance(path, str):
if path.count("*") > 1:
raise ValueError("Output path spec must contain exactly one '*'.")
elif "*" not in path:
path = os.path.join(path, "*.part")
if name_function is None:
name_function = build_name_function(num - 1)
paths = [path.replace("*", name_function(i)) for i in range(num)]
if paths != sorted(paths):
logger.warning(
"In order to preserve order between partitions"
" paths created with ``name_function`` should "
"sort to partition order"
)
elif isinstance(path, (tuple, list)):
assert len(path) == num
paths = list(path)
else:
raise ValueError(
"Path should be either\n"
"1. A list of paths: ['foo.json', 'bar.json', ...]\n"
"2. A directory: 'foo/\n"
"3. A path with a '*' in it: 'foo.*.json'"
)
return paths
| OpenFiles |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 54104,
"end": 55557
} | class ____(GenericFunction[_T]):
"""Define a function whose return type is bound to the type of its
arguments.
"""
inherit_cache = True
# set ColumnElement[_T] as a separate overload, to appease
# mypy which seems to not want to accept _T from
# _ColumnExpressionArgument. Seems somewhat related to the covariant
# _HasClauseElement as of mypy 1.15
@overload
def __init__(
self,
col: ColumnElement[_T],
*args: _ColumnExpressionOrLiteralArgument[Any],
**kwargs: Any,
) -> None: ...
@overload
def __init__(
self,
col: _ColumnExpressionArgument[_T],
*args: _ColumnExpressionOrLiteralArgument[Any],
**kwargs: Any,
) -> None: ...
@overload
def __init__(
self,
col: _T,
*args: _ColumnExpressionOrLiteralArgument[Any],
**kwargs: Any,
) -> None: ...
def __init__(
self, *args: _ColumnExpressionOrLiteralArgument[_T], **kwargs: Any
) -> None:
fn_args: Sequence[ColumnElement[Any]] = [
coercions.expect(
roles.ExpressionElementRole,
c,
name=self.name,
apply_propagate_attrs=self,
)
for c in args
]
kwargs.setdefault("type_", _type_from_args(fn_args))
kwargs["_parsed_args"] = fn_args
super().__init__(*fn_args, **kwargs)
| ReturnTypeFromArgs |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 35587,
"end": 40706
} | class ____(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_recall = self.evaluate(recall)
for _ in range(10):
self.assertEqual(initial_recall, self.evaluate(recall))
@test_util.run_deprecated_v1
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertAlmostEqual(1.0, self.evaluate(recall), 6)
@test_util.run_deprecated_v1
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, self.evaluate(update_op))
self.assertAlmostEqual(0.5, self.evaluate(recall))
@test_util.run_deprecated_v1
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, self.evaluate(update_op))
self.assertAlmostEqual(expected_precision, self.evaluate(recall))
@test_util.run_deprecated_v1
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, self.evaluate(update_op))
self.assertAlmostEqual(expected_precision, self.evaluate(recall))
@test_util.run_deprecated_v1
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertEqual(0, self.evaluate(recall))
@test_util.run_deprecated_v1
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.recall(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertEqual(0, self.evaluate(recall))
| RecallTest |
python | coleifer__peewee | tests/regressions.py | {
"start": 42630,
"end": 43641
} | class ____(ModelTestCase):
@skip_if(sys.version_info[0] == 2)
@requires_models(UUIDReg)
def test_bulk_update_uuid_pk(self):
r1 = UUIDReg.create(key='k1')
r2 = UUIDReg.create(key='k2')
r1.key = 'k1-x'
r2.key = 'k2-x'
UUIDReg.bulk_update((r1, r2), (UUIDReg.key,))
r1_db, r2_db = UUIDReg.select().order_by(UUIDReg.key)
self.assertEqual(r1_db.key, 'k1-x')
self.assertEqual(r2_db.key, 'k2-x')
@requires_models(CharPKKV)
def test_bulk_update_non_integer_pk(self):
a, b, c = [CharPKKV.create(id=c, key='k%s' % c) for c in 'abc']
a.key = 'ka-x'
a.value = 1
b.value = 2
c.key = 'kc-x'
c.value = 3
CharPKKV.bulk_update((a, b, c), (CharPKKV.key, CharPKKV.value))
data = list(CharPKKV.select().order_by(CharPKKV.id).tuples())
self.assertEqual(data, [
('a', 'ka-x', 1),
('b', 'kb', 2),
('c', 'kc-x', 3)])
| TestBulkUpdateNonIntegerPK |
python | Netflix__metaflow | metaflow/decorators.py | {
"start": 1478,
"end": 1826
} | class ____(MetaflowException):
headline = "Syntax error"
def __init__(self, deconame):
msg = (
"Decorator '%s' can be applied only to FlowSpecs. Make sure "
"the decorator is above a class definition." % deconame
)
super(BadFlowDecoratorException, self).__init__(msg)
| BadFlowDecoratorException |
python | django__django | tests/template_tests/test_response.py | {
"start": 787,
"end": 8789
} | class ____(SimpleTestCase):
def _response(self, template="foo", *args, **kwargs):
template = engines["django"].from_string(template)
return SimpleTemplateResponse(template, *args, **kwargs)
def test_template_resolving(self):
response = SimpleTemplateResponse("first/test.html")
response.render()
self.assertEqual(response.content, b"First template\n")
templates = ["foo.html", "second/test.html", "first/test.html"]
response = SimpleTemplateResponse(templates)
response.render()
self.assertEqual(response.content, b"Second template\n")
response = self._response()
response.render()
self.assertEqual(response.content, b"foo")
def test_explicit_baking(self):
# explicit baking
response = self._response()
self.assertFalse(response.is_rendered)
response.render()
self.assertTrue(response.is_rendered)
def test_render(self):
# response is not re-rendered without the render call
response = self._response().render()
self.assertEqual(response.content, b"foo")
# rebaking doesn't change the rendered content
template = engines["django"].from_string("bar{{ baz }}")
response.template_name = template
response.render()
self.assertEqual(response.content, b"foo")
# but rendered content can be overridden by manually
# setting content
response.content = "bar"
self.assertEqual(response.content, b"bar")
def test_iteration_unrendered(self):
# unrendered response raises an exception on iteration
response = self._response()
self.assertFalse(response.is_rendered)
def iteration():
list(response)
msg = "The response content must be rendered before it can be iterated over."
with self.assertRaisesMessage(ContentNotRenderedError, msg):
iteration()
self.assertFalse(response.is_rendered)
def test_iteration_rendered(self):
# iteration works for rendered responses
response = self._response().render()
self.assertEqual(list(response), [b"foo"])
def test_content_access_unrendered(self):
# unrendered response raises an exception when content is accessed
response = self._response()
self.assertFalse(response.is_rendered)
with self.assertRaises(ContentNotRenderedError):
response.content
self.assertFalse(response.is_rendered)
def test_content_access_rendered(self):
# rendered response content can be accessed
response = self._response().render()
self.assertEqual(response.content, b"foo")
def test_set_content(self):
# content can be overridden
response = self._response()
self.assertFalse(response.is_rendered)
response.content = "spam"
self.assertTrue(response.is_rendered)
self.assertEqual(response.content, b"spam")
response.content = "baz"
self.assertEqual(response.content, b"baz")
def test_dict_context(self):
response = self._response("{{ foo }}{{ processors }}", {"foo": "bar"})
self.assertEqual(response.context_data, {"foo": "bar"})
response.render()
self.assertEqual(response.content, b"bar")
def test_kwargs(self):
response = self._response(
content_type="application/json", status=504, charset="ascii"
)
self.assertEqual(response.headers["content-type"], "application/json")
self.assertEqual(response.status_code, 504)
self.assertEqual(response.charset, "ascii")
def test_args(self):
response = SimpleTemplateResponse("", {}, "application/json", 504)
self.assertEqual(response.headers["content-type"], "application/json")
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
response = SimpleTemplateResponse("template_tests/using.html").render()
self.assertEqual(response.content, b"DTL\n")
response = SimpleTemplateResponse(
"template_tests/using.html", using="django"
).render()
self.assertEqual(response.content, b"DTL\n")
response = SimpleTemplateResponse(
"template_tests/using.html", using="jinja2"
).render()
self.assertEqual(response.content, b"Jinja2\n")
def test_post_callbacks(self):
"Rendering a template response triggers the post-render callbacks"
post = []
def post1(obj):
post.append("post1")
def post2(obj):
post.append("post2")
response = SimpleTemplateResponse("first/test.html", {})
response.add_post_render_callback(post1)
response.add_post_render_callback(post2)
# When the content is rendered, all the callbacks are invoked, too.
response.render()
self.assertEqual(response.content, b"First template\n")
self.assertEqual(post, ["post1", "post2"])
def test_pickling(self):
# Create a template response. The context is
# known to be unpicklable (e.g., a function).
response = SimpleTemplateResponse(
"first/test.html",
{
"value": 123,
"fn": datetime.now,
},
)
with self.assertRaises(ContentNotRenderedError):
pickle.dumps(response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(
unpickled_response.headers["content-type"], response.headers["content-type"]
)
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ("template_name", "context_data", "_post_render_callbacks")
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse(
"first/test.html",
{
"value": 123,
"fn": datetime.now,
},
)
with self.assertRaises(ContentNotRenderedError):
pickle.dumps(response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
def test_pickling_cookie(self):
response = SimpleTemplateResponse(
"first/test.html",
{
"value": 123,
"fn": datetime.now,
},
)
response.cookies["key"] = "value"
response.render()
pickled_response = pickle.dumps(response, pickle.HIGHEST_PROTOCOL)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.cookies["key"].value, "value")
def test_headers(self):
response = SimpleTemplateResponse(
"first/test.html",
{"value": 123, "fn": datetime.now},
headers={"X-Foo": "foo"},
)
self.assertEqual(response.headers["X-Foo"], "foo")
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [TEMPLATE_DIR],
"OPTIONS": {
"context_processors": [test_processor_name],
},
}
]
)
| SimpleTemplateResponseTest |
python | getsentry__sentry | tests/sentry/api/serializers/test_fields.py | {
"start": 441,
"end": 618
} | class ____(serializers.Serializer):
a_field = ListField(child=ChildSerializer(), required=False, allow_null=False)
actor_field = ActorField(required=False)
| DummySerializer |
python | pdm-project__pdm | src/pdm/cli/commands/cache.py | {
"start": 4148,
"end": 4724
} | class ____(BaseCommand):
"""List the built wheels stored in the cache"""
arguments = (verbose_option,)
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument("pattern", nargs="?", default="*", help="The pattern to list")
def handle(self, project: Project, options: argparse.Namespace) -> None:
rows = [
(format_size(file_size(file)), file.name) for file in find_files(project.cache("wheels"), options.pattern)
]
project.core.ui.display_columns(rows, [">Size", "Filename"])
| ListCommand |
python | pytest-dev__pytest | testing/code/test_excinfo.py | {
"start": 2758,
"end": 14298
} | class ____:
def setup_method(self, method):
try:
h()
except ValueError:
self.excinfo = _pytest._code.ExceptionInfo.from_current()
def test_traceback_entries(self):
tb = self.excinfo.traceback
entries = list(tb)
assert len(tb) == 4 # maybe fragile test
assert len(entries) == 4 # maybe fragile test
names = ["f", "g", "h"]
for entry in entries:
try:
names.remove(entry.frame.code.name)
except ValueError:
pass
assert not names
def test_traceback_entry_getsource(self):
tb = self.excinfo.traceback
s = str(tb[-1].getsource())
assert s.startswith("def f():")
assert s.endswith("raise ValueError")
def test_traceback_entry_getsource_in_construct(self):
def xyz():
try:
raise ValueError
except somenoname: # type: ignore[name-defined] # noqa: F821
pass # pragma: no cover
try:
xyz()
except NameError:
excinfo = _pytest._code.ExceptionInfo.from_current()
else:
assert False, "did not raise NameError"
tb = excinfo.traceback
source = tb[-1].getsource()
assert source is not None
assert source.deindent().lines == [
"def xyz():",
" try:",
" raise ValueError",
" except somenoname: # type: ignore[name-defined] # noqa: F821",
]
def test_traceback_cut(self) -> None:
co = _pytest._code.Code.from_function(f)
path, firstlineno = co.path, co.firstlineno
assert isinstance(path, Path)
traceback = self.excinfo.traceback
newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
assert len(newtraceback) == 1
newtraceback = traceback.cut(path=path, lineno=firstlineno + 2)
assert len(newtraceback) == 1
def test_traceback_cut_excludepath(self, pytester: Pytester) -> None:
p = pytester.makepyfile("def f(): raise ValueError")
with pytest.raises(ValueError) as excinfo:
import_path(p, root=pytester.path, consider_namespace_packages=False).f()
basedir = Path(pytest.__file__).parent
newtraceback = excinfo.traceback.cut(excludepath=basedir)
for x in newtraceback:
assert isinstance(x.path, Path)
assert basedir not in x.path.parents
assert newtraceback[-1].frame.code.path == p
def test_traceback_filter(self):
traceback = self.excinfo.traceback
ntraceback = traceback.filter(self.excinfo)
assert len(ntraceback) == len(traceback) - 1
@pytest.mark.parametrize(
"tracebackhide, matching",
[
(lambda info: True, True),
(lambda info: False, False),
(operator.methodcaller("errisinstance", ValueError), True),
(operator.methodcaller("errisinstance", IndexError), False),
],
)
def test_traceback_filter_selective(self, tracebackhide, matching):
def f():
#
raise ValueError
#
def g():
#
__tracebackhide__ = tracebackhide
f()
#
def h():
#
g()
#
excinfo = pytest.raises(ValueError, h)
traceback = excinfo.traceback
ntraceback = traceback.filter(excinfo)
print(f"old: {traceback!r}")
print(f"new: {ntraceback!r}")
if matching:
assert len(ntraceback) == len(traceback) - 2
else:
# -1 because of the __tracebackhide__ in pytest.raises
assert len(ntraceback) == len(traceback) - 1
def test_traceback_recursion_index(self):
def f(n):
if n < 10:
n += 1
f(n)
excinfo = pytest.raises(RecursionError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex == 3
def test_traceback_only_specific_recursion_errors(self, monkeypatch):
def f(n):
if n == 0:
raise RuntimeError("hello")
f(n - 1)
excinfo = pytest.raises(RuntimeError, f, 25)
monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
repr = excinfo.getrepr()
assert "RuntimeError: hello" in str(repr.reprcrash)
def test_traceback_no_recursion_index(self) -> None:
def do_stuff() -> None:
raise RuntimeError
def reraise_me() -> None:
import sys
_exc, val, tb = sys.exc_info()
assert val is not None
raise val.with_traceback(tb)
def f(n: int) -> None:
try:
do_stuff()
except BaseException:
reraise_me()
excinfo = pytest.raises(RuntimeError, f, 8)
assert excinfo is not None
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex is None
def test_traceback_messy_recursion(self):
# XXX: simplified locally testable version
decorator = pytest.importorskip("decorator").decorator
def log(f, *k, **kw):
print(f"{k} {kw}")
f(*k, **kw)
log = decorator(log)
def fail():
raise ValueError("")
fail = log(log(fail))
excinfo = pytest.raises(ValueError, fail)
assert excinfo.traceback.recursionindex() is None
def test_getreprcrash(self):
def i():
__tracebackhide__ = True
raise ValueError
def h():
i()
def g():
__tracebackhide__ = True
h()
def f():
g()
excinfo = pytest.raises(ValueError, f)
reprcrash = excinfo._getreprcrash()
assert reprcrash is not None
co = _pytest._code.Code.from_function(h)
assert reprcrash.path == str(co.path)
assert reprcrash.lineno == co.firstlineno + 1 + 1
def test_getreprcrash_empty(self):
def g():
__tracebackhide__ = True
raise ValueError
def f():
__tracebackhide__ = True
g()
excinfo = pytest.raises(ValueError, f)
assert excinfo._getreprcrash() is None
def test_excinfo_exconly():
excinfo = pytest.raises(ValueError, h)
assert excinfo.exconly().startswith("ValueError")
with pytest.raises(ValueError) as excinfo:
raise ValueError("hello\nworld")
msg = excinfo.exconly(tryshort=True)
assert msg.startswith("ValueError")
assert msg.endswith("world")
def test_excinfo_repr_str() -> None:
excinfo1 = pytest.raises(ValueError, h)
assert repr(excinfo1) == "<ExceptionInfo ValueError() tblen=4>"
assert str(excinfo1) == "<ExceptionInfo ValueError() tblen=4>"
class CustomException(Exception):
def __repr__(self):
return "custom_repr"
def raises() -> None:
raise CustomException()
excinfo2 = pytest.raises(CustomException, raises)
assert repr(excinfo2) == "<ExceptionInfo custom_repr tblen=2>"
assert str(excinfo2) == "<ExceptionInfo custom_repr tblen=2>"
def test_excinfo_for_later() -> None:
e = ExceptionInfo[BaseException].for_later()
assert "for raises" in repr(e)
assert "for raises" in str(e)
def test_excinfo_errisinstance():
excinfo = pytest.raises(ValueError, h)
assert excinfo.errisinstance(ValueError)
def test_excinfo_no_sourcecode():
try:
exec("raise ValueError()")
except ValueError:
excinfo = _pytest._code.ExceptionInfo.from_current()
s = str(excinfo.traceback[-1])
# TODO: Since Python 3.13b1 under pytest-xdist, the * is `import
# sys;exec(eval(sys.stdin.readline()))` (execnet bootstrap code)
# instead of `???` like before. Is this OK?
fnmatch.fnmatch(s, " File '<string>':1 in <module>\n *\n")
def test_excinfo_no_python_sourcecode(tmp_path: Path) -> None:
# XXX: simplified locally testable version
tmp_path.joinpath("test.txt").write_text("{{ h()}}:", encoding="utf-8")
jinja2 = pytest.importorskip("jinja2")
loader = jinja2.FileSystemLoader(str(tmp_path))
env = jinja2.Environment(loader=loader)
template = env.get_template("test.txt")
excinfo = pytest.raises(ValueError, template.render, h=h)
for item in excinfo.traceback:
print(item) # XXX: for some reason jinja.Template.render is printed in full
_ = item.source # shouldn't fail
if isinstance(item.path, Path) and item.path.name == "test.txt":
assert str(item.source) == "{{ h()}}:"
def test_entrysource_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = _pytest._code.ExceptionInfo.from_current()
entry = excinfo.traceback[-1]
source = entry.getsource()
assert source is not None
s = str(source).strip()
assert s.startswith("def get")
def test_codepath_Queue_example() -> None:
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = _pytest._code.ExceptionInfo.from_current()
entry = excinfo.traceback[-1]
path = entry.path
assert isinstance(path, Path)
assert path.name.lower() == "queue.py"
assert path.exists()
def test_match_succeeds():
with pytest.raises(ZeroDivisionError) as excinfo:
_ = 0 // 0
excinfo.match(r".*zero.*")
def test_match_raises_error(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
def test_division_zero():
with pytest.raises(ZeroDivisionError) as excinfo:
0 / 0
excinfo.match(r'[123]+')
"""
)
result = pytester.runpytest("--tb=short")
assert result.ret != 0
match = [
r"E\s+AssertionError: Regex pattern did not match.",
r"E\s+Expected regex: '\[123\]\+'",
r"E\s+Actual message: 'division by zero'",
]
result.stdout.re_match_lines(match)
result.stdout.no_fnmatch_line("*__tracebackhide__ = True*")
result = pytester.runpytest("--fulltrace")
assert result.ret != 0
result.stdout.re_match_lines([r".*__tracebackhide__ = True.*", *match])
def test_raises_accepts_generic_group() -> None:
with pytest.raises(ExceptionGroup[Exception]) as exc_info:
raise ExceptionGroup("", [RuntimeError()])
assert exc_info.group_contains(RuntimeError)
def test_raises_accepts_generic_base_group() -> None:
with pytest.raises(BaseExceptionGroup[BaseException]) as exc_info:
raise ExceptionGroup("", [RuntimeError()])
assert exc_info.group_contains(RuntimeError)
def test_raises_rejects_specific_generic_group() -> None:
with pytest.raises(ValueError):
pytest.raises(ExceptionGroup[RuntimeError])
def test_raises_accepts_generic_group_in_tuple() -> None:
with pytest.raises((ValueError, ExceptionGroup[Exception])) as exc_info:
raise ExceptionGroup("", [RuntimeError()])
assert exc_info.group_contains(RuntimeError)
def test_raises_exception_escapes_generic_group() -> None:
try:
with pytest.raises(ExceptionGroup[Exception]):
raise ValueError("my value error")
except ValueError as e:
assert str(e) == "my value error"
else:
pytest.fail("Expected ValueError to be raised")
| TestTraceback_f_g_h |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 204187,
"end": 207218
} | class ____(TestCase):
def test_inner_scalar_and_vector(self):
for dt in np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "?":
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "?":
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
@skip(reason="[::-1] not supported")
def test_inner_product_reversed_view(self):
for dt in np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "?":
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "?":
a = np.arange(24).reshape(2, 3, 4).astype(dt)
b = np.arange(24, 48).reshape(2, 3, 4).astype(dt)
desired = np.array(
[
[
[[158, 182, 206], [230, 254, 278]],
[[566, 654, 742], [830, 918, 1006]],
[[974, 1126, 1278], [1430, 1582, 1734]],
],
[
[[1382, 1598, 1814], [2030, 2246, 2462]],
[[1790, 2070, 2350], [2630, 2910, 3190]],
[[2198, 2542, 2886], [3230, 3574, 3918]],
],
]
).astype(dt)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2, 3, 0, 1), desired)
@instantiate_parametrized_tests
| TestInner |
python | django-import-export__django-import-export | import_export/widgets.py | {
"start": 6528,
"end": 7818
} | class ____(Widget):
"""
Widget for converting text fields.
:param allow_blank: If True, then :meth:`~import_export.widgets.Widget.clean`
will return null values as empty strings, otherwise as ``None``.
"""
def __init__(self, coerce_to_string=True, allow_blank=True):
""" """
self.allow_blank = allow_blank
super().__init__(coerce_to_string)
def clean(self, value, row=None, **kwargs):
"""
Converts the input value to a string, handling None values based on
allow_blank setting.
:param value: The value to be converted to string.
:param row: The current row being processed.
:param **kwargs: Optional keyword arguments.
:returns: A string representation of the value. Returns empty string if
value is None
and ``allow_blank`` is True, otherwise returns None.
"""
val = super().clean(value, row, **kwargs)
if val is None:
return "" if self.allow_blank is True else None
return force_str(val)
def render(self, value, obj=None, **kwargs):
self._obj_deprecation_warning(obj)
if self.coerce_to_string:
return "" if value is None else force_str(value)
return value
| CharWidget |
python | pandas-dev__pandas | pandas/tests/base/test_conversion.py | {
"start": 559,
"end": 18003
} | class ____:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
if dtype == "float16" and issubclass(typ, pd.Index):
with pytest.raises(NotImplementedError, match="float16 indexes are not "):
typ([1], dtype=dtype)
return
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = next(iter(s.items()))
assert isinstance(result, rdtype)
_, result = next(iter(s.items()))
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
if dtype == "float16" and issubclass(typ, pd.Index):
with pytest.raises(NotImplementedError, match="float16 indexes are not "):
typ([1], dtype=dtype)
return
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = (rdtype,)
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box_dt64(self, unit):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
ser = Series(vals).dt.as_unit(unit)
assert ser.dtype == f"datetime64[{unit}]"
for res, exp in zip(ser, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
assert res.unit == unit
def test_iter_box_dt64tz(self, unit):
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
ser = Series(vals).dt.as_unit(unit)
assert ser.dtype == f"datetime64[{unit}, US/Eastern]"
for res, exp in zip(ser, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
assert res.unit == unit
def test_iter_box_timedelta64(self, unit):
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
ser = Series(vals).dt.as_unit(unit)
assert ser.dtype == f"timedelta64[{unit}]"
for res, exp in zip(ser, vals):
assert isinstance(res, Timedelta)
assert res == exp
assert res.unit == unit
def test_iter_box_period(self):
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "ME"
assert res == exp
@pytest.mark.parametrize(
"arr, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="Y"),
PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("Y-DEC"),
),
(pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval"),
(
pd.DatetimeIndex(["2017", "2018"]),
DatetimeArray,
"datetime64[ns]",
),
(
pd.TimedeltaIndex([10**10]),
TimedeltaArray,
"m8[ns]",
),
],
)
def test_values_consistent(arr, expected_type, dtype, using_infer_string):
if using_infer_string and dtype == "object":
expected_type = ArrowStringArray if HAS_PYARROW else StringArray
l_values = Series(arr)._values
r_values = pd.Index(arr)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = Series(arr)
result = ser.array
expected = NumpyExtensionArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = Series(dtype=any_numpy_dtype)
result = ser.array
if np.dtype(any_numpy_dtype).kind == "M":
assert isinstance(result, DatetimeArray)
elif np.dtype(any_numpy_dtype).kind == "m":
assert isinstance(result, TimedeltaArray)
else:
assert isinstance(result, NumpyExtensionArray)
@pytest.mark.parametrize(
"arr, attr",
[
(pd.Categorical(["a", "b"]), "_codes"),
(PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]"), "_ndarray"),
(pd.array([0, pd.NA], dtype="Int64"), "_data"),
(IntervalArray.from_breaks([0, 1]), "_left"),
(SparseArray([0, 1]), "_sparse_values"),
(
DatetimeArray._from_sequence(np.array([1, 2], dtype="datetime64[ns]")),
"_ndarray",
),
# tz-aware Datetime
(
DatetimeArray._from_sequence(
np.array(
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
"_ndarray",
),
],
)
def test_array(arr, attr, index_or_series):
box = index_or_series
result = box(arr, copy=False).array
if attr:
arr = getattr(arr, attr)
result = getattr(result, attr)
assert np.shares_memory(result, arr)
def test_array_multiindex_raises():
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
msg = "MultiIndex has no single backing array"
with pytest.raises(ValueError, match=msg):
idx.array
@pytest.mark.parametrize(
"arr, expected, zero_copy",
[
(np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64), True),
(pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object), False),
(
pd.core.arrays.period_array(["2000", "2001"], freq="D"),
np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]),
False,
),
(pd.array([0, pd.NA], dtype="Int64"), np.array([0, np.nan]), False),
(
IntervalArray.from_breaks([0, 1, 2]),
np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object),
False,
),
(SparseArray([0, 1]), np.array([0, 1], dtype=np.int64), False),
# tz-naive datetime
(
DatetimeArray._from_sequence(np.array(["2000", "2001"], dtype="M8[ns]")),
np.array(["2000", "2001"], dtype="M8[ns]"),
True,
),
# tz-aware stays tz`-aware
(
DatetimeArray._from_sequence(
np.array(["2000-01-01T06:00:00", "2000-01-02T06:00:00"], dtype="M8[ns]")
)
.tz_localize("UTC")
.tz_convert("US/Central"),
np.array(
[
Timestamp("2000-01-01", tz="US/Central"),
Timestamp("2000-01-02", tz="US/Central"),
]
),
False,
),
# Timedelta
(
TimedeltaArray._from_sequence(
np.array([0, 3600000000000], dtype="i8").view("m8[ns]"),
dtype=np.dtype("m8[ns]"),
),
np.array([0, 3600000000000], dtype="m8[ns]"),
True,
),
# GH#26406 tz is preserved in Categorical[dt64tz]
(
pd.Categorical(date_range("2016-01-01", periods=2, tz="US/Pacific")),
np.array(
[
Timestamp("2016-01-01", tz="US/Pacific"),
Timestamp("2016-01-02", tz="US/Pacific"),
]
),
False,
),
],
)
def test_to_numpy(arr, expected, zero_copy, index_or_series_or_array, using_nan_is_na):
if not using_nan_is_na and arr[-1] is pd.NA:
expected = np.array([0, pd.NA], dtype=object)
box = index_or_series_or_array
with tm.assert_produces_warning(None):
thing = box(arr)
result = thing.to_numpy()
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(thing)
tm.assert_numpy_array_equal(result, expected)
# Additionally, we check the `copy=` semantics for array/asarray
# (these are implemented by us via `__array__`).
result_cp1 = np.array(thing, copy=True)
result_cp2 = np.array(thing, copy=True)
# When called with `copy=True` NumPy/we should ensure a copy was made
assert not np.may_share_memory(result_cp1, result_cp2)
if not np_version_gt2:
# copy=False semantics are only supported in NumPy>=2.
return
if not zero_copy:
with pytest.raises(ValueError, match="Unable to avoid copy while creating"):
# An error is always acceptable for `copy=False`
np.array(thing, copy=False)
else:
result_nocopy1 = np.array(thing, copy=False)
result_nocopy2 = np.array(thing, copy=False)
# If copy=False was given, these must share the same data
assert np.may_share_memory(result_nocopy1, result_nocopy2)
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize(
"arr", [np.array([1, 2, 3], dtype="int64"), np.array(["a", "b", "c"], dtype=object)]
)
def test_to_numpy_copy(arr, as_series, using_infer_string):
obj = pd.Index(arr, copy=False)
if as_series:
obj = Series(obj.values, copy=False)
# no copy by default
result = obj.to_numpy()
if using_infer_string and arr.dtype == object and obj.dtype.storage == "pyarrow":
assert np.shares_memory(arr, result) is False
else:
assert np.shares_memory(arr, result) is True
result = obj.to_numpy(copy=False)
if using_infer_string and arr.dtype == object and obj.dtype.storage == "pyarrow":
assert np.shares_memory(arr, result) is False
else:
assert np.shares_memory(arr, result) is True
# copy=True
result = obj.to_numpy(copy=True)
assert np.shares_memory(arr, result) is False
@pytest.mark.parametrize("as_series", [True, False])
def test_to_numpy_dtype(as_series):
tz = "US/Eastern"
obj = pd.DatetimeIndex(["2000", "2001"], tz=tz)
if as_series:
obj = Series(obj)
# preserve tz by default
result = obj.to_numpy()
expected = np.array(
[Timestamp("2000", tz=tz), Timestamp("2001", tz=tz)], dtype=object
)
tm.assert_numpy_array_equal(result, expected)
result = obj.to_numpy(dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = obj.to_numpy(dtype="M8[ns]")
expected = np.array(["2000-01-01T05", "2001-01-01T05"], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"values, dtype, na_value, expected",
[
([1, 2, None], "float64", 0, [1.0, 2.0, 0.0]),
(
[Timestamp("2000").as_unit("s"), Timestamp("2000").as_unit("s"), pd.NaT],
None,
Timestamp("2000").as_unit("s"),
[np.datetime64("2000-01-01T00:00:00", "s")] * 3,
),
],
)
def test_to_numpy_na_value_numpy_dtype(
index_or_series, values, dtype, na_value, expected
):
obj = index_or_series(values)
result = obj.to_numpy(dtype=dtype, na_value=na_value)
expected = np.array(expected)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"data, multiindex, dtype, na_value, expected",
[
(
[1, 2, None, 4],
[(0, "a"), (0, "b"), (1, "b"), (1, "c")],
float,
None,
[1.0, 2.0, np.nan, 4.0],
),
(
[1, 2, None, 4],
[(0, "a"), (0, "b"), (1, "b"), (1, "c")],
float,
np.nan,
[1.0, 2.0, np.nan, 4.0],
),
(
[1.0, 2.0, np.nan, 4.0],
[("a", 0), ("a", 1), ("a", 2), ("b", 0)],
int,
0,
[1, 2, 0, 4],
),
(
[Timestamp("2000").as_unit("s"), Timestamp("2000").as_unit("s"), pd.NaT],
[
(0, Timestamp("2021").as_unit("s")),
(0, Timestamp("2022").as_unit("s")),
(1, Timestamp("2000").as_unit("s")),
],
None,
Timestamp("2000").as_unit("s"),
[np.datetime64("2000-01-01T00:00:00", "s")] * 3,
),
],
)
def test_to_numpy_multiindex_series_na_value(
data, multiindex, dtype, na_value, expected
):
index = pd.MultiIndex.from_tuples(multiindex)
series = Series(data, index=index)
result = series.to_numpy(dtype=dtype, na_value=na_value)
expected = np.array(expected)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_kwargs_raises():
# numpy
s = Series([1, 2, 3])
msg = r"to_numpy\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
# extension
s = Series([1, 2, 3], dtype="Int64")
with pytest.raises(TypeError, match=msg):
s.to_numpy(foo=True)
@pytest.mark.parametrize(
"data",
[
{"a": [1, 2, 3], "b": [1, 2, None]},
{"a": np.array([1, 2, 3]), "b": np.array([1, 2, np.nan])},
{"a": pd.array([1, 2, 3]), "b": pd.array([1, 2, None])},
],
)
@pytest.mark.parametrize("dtype, na_value", [(float, np.nan), (object, None)])
def test_to_numpy_dataframe_na_value(data, dtype, na_value):
# https://github.com/pandas-dev/pandas/issues/33820
df = pd.DataFrame(data)
result = df.to_numpy(dtype=dtype, na_value=na_value)
expected = np.array([[1, 1], [2, 2], [3, na_value]], dtype=dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"data, expected_data",
[
(
{"a": pd.array([1, 2, None])},
[[1.0], [2.0], [np.nan]],
),
(
{"a": [1, 2, 3], "b": [1, 2, 3]},
[[1, 1], [2, 2], [3, 3]],
),
],
)
def test_to_numpy_dataframe_single_block(data, expected_data):
# https://github.com/pandas-dev/pandas/issues/33820
df = pd.DataFrame(data)
result = df.to_numpy(dtype=float, na_value=np.nan)
expected = np.array(expected_data, dtype=float)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dataframe_single_block_no_mutate():
# https://github.com/pandas-dev/pandas/issues/33820
result = pd.DataFrame(np.array([1.0, 2.0, np.nan]))
expected = pd.DataFrame(np.array([1.0, 2.0, np.nan]))
result.to_numpy(na_value=0.0)
tm.assert_frame_equal(result, expected)
| TestToIterable |
python | google__pytype | pytype/tests/test_recursive_types.py | {
"start": 6346,
"end": 9237
} | class ____(test_base.BaseTest):
"""Tests inference of recursive types."""
def test_basic(self):
ty = self.Infer("""
from typing import List
Foo = List['Foo']
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
Foo = List[Foo]
""",
)
def test_mutual_recursion(self):
ty = self.Infer("""
from typing import List
X = List['Y']
Y = List['X']
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
X = List[Y]
Y = List[List[Y]]
""",
)
def test_parameterization(self):
ty = self.Infer("""
from typing import List, TypeVar, Union
T = TypeVar('T')
X = List['Y[int]']
Y = Union[T, List['Y']]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, TypeVar, Union
T = TypeVar('T')
X = List[_Y_LBAR_int_RBAR]
Y = Union[T, List[Y]]
_Y_LBAR_int_RBAR = Union[int, List[_Y_LBAR_int_RBAR]]
""",
)
def test_parameterization_with_inner_parameter(self):
ty = self.Infer("""
from typing import List, TypeVar, Union
T = TypeVar('T')
X = Union[T, List['X[T]']]
Y = List[X[int]]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, TypeVar, Union
T = TypeVar('T')
X = Union[T, List[_X_LBAR_T_RBAR]]
Y = List[Union[int, List[_X_LBAR_T_RBAR_LBAR_int_RBAR]]]
_X_LBAR_T_RBAR = Union[T, List[_X_LBAR_T_RBAR]]
_X_LBAR_T_RBAR_LBAR_int_RBAR = Union[int, List[
_X_LBAR_T_RBAR_LBAR_int_RBAR]]
""",
)
def test_branching(self):
ty = self.Infer("""
from typing import Mapping, TypeVar, Union
K = TypeVar('K')
V = TypeVar('V')
StructureKV = Union[Mapping[K, 'StructureKV[K, V]'], V]
try:
Structure = StructureKV[str, V]
except TypeError:
Structure = Union[Mapping[str, 'Structure[V]'], V]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Mapping, TypeVar, Union
K = TypeVar('K')
V = TypeVar('V')
StructureKV = Union[Mapping[K, _StructureKV_LBAR_K_COMMA_V_RBAR], V]
# The two Mapping values are redundant, but pytype isn't smart enough to
# deduplicate them.
Structure = Union[
Mapping[str, Union[
_StructureKV_LBAR_K_COMMA_V_RBAR_LBAR_str_COMMA_V_RBAR,
_Structure_LBAR_V_RBAR]],
V,
]
_StructureKV_LBAR_K_COMMA_V_RBAR = Union[Mapping[
K, _StructureKV_LBAR_K_COMMA_V_RBAR], V]
_StructureKV_LBAR_K_COMMA_V_RBAR_LBAR_str_COMMA_V_RBAR = Union[Mapping[
str, _StructureKV_LBAR_K_COMMA_V_RBAR_LBAR_str_COMMA_V_RBAR], V]
_Structure_LBAR_V_RBAR = Union[Mapping[str, _Structure_LBAR_V_RBAR], V]
""",
)
| InferenceTest |
python | nedbat__coveragepy | coverage/html.py | {
"start": 24617,
"end": 24794
} | class ____:
"""Summary of the information from last rendering, to avoid duplicate work."""
hash: str = ""
index: IndexItem = field(default_factory=IndexItem)
| FileInfo |
python | realpython__materials | python-range/float_range.py | {
"start": 85,
"end": 3036
} | class ____:
"""Range of numbers that allows floating point numbers."""
start: float | int
stop: float | int | None = None
step: float | int = 1.0
def __post_init__(self):
"""Validate parameters."""
# Only one argument is given
if self.stop is None:
self.stop = self.start
self.start = 0
# Validate that all arguments are ints or floats
if not isinstance(self.start, float | int):
raise ValueError("'start' must be a floating point number")
if not isinstance(self.stop, float | int):
raise ValueError("'stop' must be a floating point number")
if not isinstance(self.step, float | int) or isclose(self.step, 0):
raise ValueError("'step' must be a non-zero floating point number")
def __iter__(self):
"""Create an iterator based on the range."""
return _FloatRangeIterator(self.start, self.stop, self.step)
def __contains__(self, element):
"""Check if element is a member of the range.
Use isclose() to handle floats.
"""
offset = (element - self.start) % self.step
if self.step > 0:
return self.start <= element < self.stop and (
isclose(offset, 0) or isclose(offset, self.step)
)
else:
return self.stop < element <= self.start and (
isclose(offset, 0) or isclose(offset, self.step)
)
def __len__(self):
"""Calculate the number of elements in the range."""
if any(
[
self.step > 0 and self.stop <= self.start,
self.step < 0 and self.stop >= self.start,
]
):
return 0
return ceil((self.stop - self.start) / self.step)
def __getitem__(self, index):
"""Get an element in the range based on its index."""
if index < 0 or index >= len(self):
raise IndexError(f"range index out of range: {index}")
return self.start + index * self.step
def __reversed__(self):
"""Create a FloatRange with elements in the reverse order.
Any number 0 < x < self.step can be used as offset. Use 0.1 when
possible as an "esthetically nice" offset.
"""
cls = type(self)
offset = (1 if self.step > 0 else -1) * min(0.1, abs(self.step) / 2)
return cls(
(self.stop - self.step) + (self.start - self.stop) % self.step,
self.start - offset,
-self.step,
)
def count(self, element):
"""Count number of occurences of element in range."""
return 1 if element in self else 0
def index(self, element):
"""Calculate index of element in range."""
if element not in self:
raise ValueError(f"{element} is not in range")
return round((element - self.start) / self.step)
@dataclass
| FloatRange |
python | pytest-dev__pytest | testing/test_terminal.py | {
"start": 1126,
"end": 1253
} | class ____(NamedTuple):
project_name: str
version: int
TRANS_FNMATCH = str.maketrans({"[": "[[]", "]": "[]]"})
| DistInfo |
python | pytorch__pytorch | test/package/package_a/test_module.py | {
"start": 495,
"end": 744
} | class ____(torch.nn.Module):
def __init__(self, tensor, sub_mod):
super().__init__()
self.tensor = tensor
self.sub_mod = sub_mod
def forward(self, x):
return self.sub_mod(x) + self.tensor
| ModWithSubmodAndTensor |
python | apache__airflow | airflow-core/src/airflow/secrets/base_secrets.py | {
"start": 948,
"end": 3405
} | class ____(ABC):
"""Abstract base class to retrieve Connection object given a conn_id or Variable given a key."""
@staticmethod
def build_path(path_prefix: str, secret_id: str, sep: str = "/") -> str:
"""
Given conn_id, build path for Secrets Backend.
:param path_prefix: Prefix of the path to get secret
:param secret_id: Secret id
:param sep: separator used to concatenate connections_prefix and conn_id. Default: "/"
"""
return f"{path_prefix}{sep}{secret_id}"
def get_conn_value(self, conn_id: str) -> str | None:
"""
Retrieve from Secrets Backend a string value representing the Connection object.
If the client your secrets backend uses already returns a python dict, you should override
``get_connection`` instead.
:param conn_id: connection id
"""
raise NotImplementedError
def deserialize_connection(self, conn_id: str, value: str) -> Connection:
"""
Given a serialized representation of the airflow Connection, return an instance.
Looks at first character to determine how to deserialize.
:param conn_id: connection id
:param value: the serialized representation of the Connection object
:return: the deserialized Connection
"""
from airflow.models.connection import Connection
value = value.strip()
if value[0] == "{":
return Connection.from_json(conn_id=conn_id, value=value)
return Connection(conn_id=conn_id, uri=value)
def get_connection(self, conn_id: str) -> Connection | None:
"""
Return connection object with a given ``conn_id``.
Tries ``get_conn_value`` first and if not implemented, tries ``get_conn_uri``
:param conn_id: connection id
"""
value = self.get_conn_value(conn_id=conn_id)
if value:
return self.deserialize_connection(conn_id=conn_id, value=value)
return None
def get_variable(self, key: str) -> str | None:
"""
Return value for Airflow Variable.
:param key: Variable Key
:return: Variable Value
"""
raise NotImplementedError()
def get_config(self, key: str) -> str | None:
"""
Return value for Airflow Config Key.
:param key: Config Key
:return: Config Value
"""
return None
| BaseSecretsBackend |
python | getsentry__sentry | src/sentry/db/models/fields/jsonfield.py | {
"start": 5403,
"end": 5584
} | class ____(NoPrepareMixin, IExact):
def get_prep_lookup(self):
return self.lhs.output_field.to_python(self.lhs.output_field.get_prep_value(self.rhs))
| JSONFieldIExactLookup |
python | apache__airflow | providers/sftp/src/airflow/providers/sftp/operators/sftp.py | {
"start": 1159,
"end": 1285
} | class ____:
"""Operation that can be used with SFTP."""
PUT = "put"
GET = "get"
DELETE = "delete"
| SFTPOperation |
python | django__django | tests/custom_managers/tests.py | {
"start": 21561,
"end": 23321
} | class ____(TestCase):
def test_managers(self):
# Each model class gets a "_default_manager" attribute, which is a
# reference to the first manager defined in the class.
Car.cars.create(name="Corvette", mileage=21, top_speed=180)
Car.cars.create(name="Neon", mileage=31, top_speed=100)
self.assertQuerySetEqual(
Car._default_manager.order_by("name"),
[
"Corvette",
"Neon",
],
lambda c: c.name,
)
self.assertQuerySetEqual(
Car.cars.order_by("name"),
[
"Corvette",
"Neon",
],
lambda c: c.name,
)
# alternate manager
self.assertQuerySetEqual(
Car.fast_cars.all(),
[
"Corvette",
],
lambda c: c.name,
)
# explicit default manager
self.assertQuerySetEqual(
FastCarAsDefault.cars.order_by("name"),
[
"Corvette",
"Neon",
],
lambda c: c.name,
)
self.assertQuerySetEqual(
FastCarAsDefault._default_manager.all(),
[
"Corvette",
],
lambda c: c.name,
)
# explicit base manager
self.assertQuerySetEqual(
FastCarAsBase.cars.order_by("name"),
[
"Corvette",
"Neon",
],
lambda c: c.name,
)
self.assertQuerySetEqual(
FastCarAsBase._base_manager.all(),
[
"Corvette",
],
lambda c: c.name,
)
| TestCars |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/expression.py | {
"start": 567,
"end": 4241
} | class ____(Generative, elements.BinaryExpression[Any]):
"""Produce a ``MATCH (X, Y) AGAINST ('TEXT')`` clause.
E.g.::
from sqlalchemy import desc
from sqlalchemy.dialects.mysql import match
match_expr = match(
users_table.c.firstname,
users_table.c.lastname,
against="Firstname Lastname",
)
stmt = (
select(users_table)
.where(match_expr.in_boolean_mode())
.order_by(desc(match_expr))
)
Would produce SQL resembling:
.. sourcecode:: sql
SELECT id, firstname, lastname
FROM user
WHERE MATCH(firstname, lastname) AGAINST (:param_1 IN BOOLEAN MODE)
ORDER BY MATCH(firstname, lastname) AGAINST (:param_2) DESC
The :func:`_mysql.match` function is a standalone version of the
:meth:`_sql.ColumnElement.match` method available on all
SQL expressions, as when :meth:`_expression.ColumnElement.match` is
used, but allows to pass multiple columns
:param cols: column expressions to match against
:param against: expression to be compared towards
:param in_boolean_mode: boolean, set "boolean mode" to true
:param in_natural_language_mode: boolean , set "natural language" to true
:param with_query_expansion: boolean, set "query expansion" to true
.. versionadded:: 1.4.19
.. seealso::
:meth:`_expression.ColumnElement.match`
"""
__visit_name__ = "mysql_match"
inherit_cache = True
modifiers: util.immutabledict[str, Any]
def __init__(self, *cols: elements.ColumnElement[Any], **kw: Any):
if not cols:
raise exc.ArgumentError("columns are required")
against = kw.pop("against", None)
if against is None:
raise exc.ArgumentError("against is required")
against = coercions.expect(
roles.ExpressionElementRole,
against,
)
left = elements.BooleanClauseList._construct_raw(
operators.comma_op,
clauses=cols,
)
left.group = False
flags = util.immutabledict(
{
"mysql_boolean_mode": kw.pop("in_boolean_mode", False),
"mysql_natural_language": kw.pop(
"in_natural_language_mode", False
),
"mysql_query_expansion": kw.pop("with_query_expansion", False),
}
)
if kw:
raise exc.ArgumentError("unknown arguments: %s" % (", ".join(kw)))
super().__init__(left, against, operators.match_op, modifiers=flags)
@_generative
def in_boolean_mode(self) -> Self:
"""Apply the "IN BOOLEAN MODE" modifier to the MATCH expression.
:return: a new :class:`_mysql.match` instance with modifications
applied.
"""
self.modifiers = self.modifiers.union({"mysql_boolean_mode": True})
return self
@_generative
def in_natural_language_mode(self) -> Self:
"""Apply the "IN NATURAL LANGUAGE MODE" modifier to the MATCH
expression.
:return: a new :class:`_mysql.match` instance with modifications
applied.
"""
self.modifiers = self.modifiers.union({"mysql_natural_language": True})
return self
@_generative
def with_query_expansion(self) -> Self:
"""Apply the "WITH QUERY EXPANSION" modifier to the MATCH expression.
:return: a new :class:`_mysql.match` instance with modifications
applied.
"""
self.modifiers = self.modifiers.union({"mysql_query_expansion": True})
return self
| match |
python | kamyu104__LeetCode-Solutions | Python/count-paths-with-the-given-xor-value.py | {
"start": 46,
"end": 664
} | class ____(object):
def countPathsWithXorValue(self, grid, k):
"""
:type grid: List[List[int]]
:type k: int
:rtype: int
"""
MOD = 10**9+7
MAX_R = 16
dp = [[0]*MAX_R for _ in xrange(len(grid[0]))]
dp[0][0] = 1
for i in xrange(len(grid)):
new_dp = [[0]*MAX_R for _ in xrange(len(grid[0]))]
for j in xrange(len(grid[0])):
for v in range(MAX_R):
new_dp[j][grid[i][j]^v] = (dp[j][v]+(new_dp[j-1][v] if j-1 >= 0 else 0)) % MOD
dp = new_dp
return dp[-1][k]
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.