language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ipython__ipython | tests/test_guarded_eval.py | {
"start": 8059,
"end": 8134
} | class ____:
def test_method(self) -> bool:
return True
| TypedClass |
python | apache__airflow | providers/elasticsearch/tests/unit/elasticsearch/log/elasticmock/fake_elasticsearch.py | {
"start": 2190,
"end": 19267
} | class ____(Elasticsearch):
__documents_dict = None
def __init__(self):
super().__init__("http://localhost:9200")
self.__documents_dict = {}
@query_params()
def ping(self, params=None):
return True
@query_params()
def info(self, params=None):
return {
"status": 200,
"cluster_name": "elasticmock",
"version": {
"lucene_version": "4.10.4",
"build_hash": "00f95f4ffca6de89d68b7ccaf80d148f1f70e4d4",
"number": "1.7.5",
"build_timestamp": "2016-02-02T09:55:30Z",
"build_snapshot": False,
},
"name": "Nightwatch",
"tagline": "You Know, for Search",
}
@query_params()
def sample_log_response(self, headers=None, params=None):
return {
"_shards": {"failed": 0, "skipped": 0, "successful": 7, "total": 7},
"hits": {
"hits": [
{
"_id": "jdeZT4kBjAZqZnexVUxk",
"_index": ".ds-filebeat-8.8.2-2023.07.09-000001",
"_score": 2.482621,
"_source": {
"@timestamp": "2023-07-13T14:13:15.140Z",
"asctime": "2023-07-09T07:47:43.907+0000",
"container": {"id": "airflow"},
"dag_id": "example_bash_operator",
"ecs": {"version": "8.0.0"},
"execution_date": "2023_07_09T07_47_32_000000",
"filename": "taskinstance.py",
"input": {"type": "log"},
"levelname": "INFO",
"lineno": 1144,
"log": {
"file": {
"path": "/opt/airflow/Documents/GitHub/airflow/logs/"
"dag_id=example_bash_operator'"
"/run_id=owen_run_run/task_id=run_after_loop/attempt=1.log"
},
"offset": 0,
},
"log.offset": 1688888863907337472,
"log_id": "example_bash_operator-run_after_loop-owen_run_run--1-1",
"message": "Dependencies all met for "
"dep_context=non-requeueable deps "
"ti=<TaskInstance: "
"example_bash_operator.run_after_loop "
"owen_run_run [queued]>",
"task_id": "run_after_loop",
"try_number": "1",
},
"_type": "_doc",
},
{
"_id": "qteZT4kBjAZqZnexVUxl",
"_index": ".ds-filebeat-8.8.2-2023.07.09-000001",
"_score": 2.482621,
"_source": {
"@timestamp": "2023-07-13T14:13:15.141Z",
"asctime": "2023-07-09T07:47:43.917+0000",
"container": {"id": "airflow"},
"dag_id": "example_bash_operator",
"ecs": {"version": "8.0.0"},
"execution_date": "2023_07_09T07_47_32_000000",
"filename": "taskinstance.py",
"input": {"type": "log"},
"levelname": "INFO",
"lineno": 1347,
"log": {
"file": {
"path": "/opt/airflow/Documents/GitHub/airflow/logs/"
"dag_id=example_bash_operator"
"/run_id=owen_run_run/task_id=run_after_loop/attempt=1.log"
},
"offset": 988,
},
"log.offset": 1688888863917961216,
"log_id": "example_bash_operator-run_after_loop-owen_run_run--1-1",
"message": "Starting attempt 1 of 1",
"task_id": "run_after_loop",
"try_number": "1",
},
"_type": "_doc",
},
{
"_id": "v9eZT4kBjAZqZnexVUx2",
"_index": ".ds-filebeat-8.8.2-2023.07.09-000001",
"_score": 2.482621,
"_source": {
"@timestamp": "2023-07-13T14:13:15.143Z",
"asctime": "2023-07-09T07:47:43.928+0000",
"container": {"id": "airflow"},
"dag_id": "example_bash_operator",
"ecs": {"version": "8.0.0"},
"execution_date": "2023_07_09T07_47_32_000000",
"filename": "taskinstance.py",
"input": {"type": "log"},
"levelname": "INFO",
"lineno": 1368,
"log": {
"file": {
"path": "/opt/airflow/Documents/GitHub/airflow/logs/"
"dag_id=example_bash_operator"
"/run_id=owen_run_run/task_id=run_after_loop/attempt=1.log"
},
"offset": 1372,
},
"log.offset": 1688888863928218880,
"log_id": "example_bash_operator-run_after_loop-owen_run_run--1-1",
"message": "Executing <Task(BashOperator): "
"run_after_loop> on 2023-07-09 "
"07:47:32+00:00",
"task_id": "run_after_loop",
"try_number": "1",
},
"_type": "_doc",
},
],
"max_score": 2.482621,
"total": {"relation": "eq", "value": 36},
},
"timed_out": False,
"took": 7,
}
@query_params(
"consistency",
"op_type",
"parent",
"refresh",
"replication",
"routing",
"timeout",
"timestamp",
"ttl",
"version",
"version_type",
)
def index(self, index, doc_type, body, id=None, params=None, headers=None):
if index not in self.__documents_dict:
self.__documents_dict[index] = []
if id is None:
id = get_random_id()
version = 1
self.__documents_dict[index].append(
{
"_type": doc_type,
"_id": id,
"_source": body,
"_index": index,
"_version": version,
"_headers": headers,
}
)
return {
"_type": doc_type,
"_id": id,
"created": True,
"_version": version,
"_index": index,
"_headers": headers,
}
@query_params("parent", "preference", "realtime", "refresh", "routing")
def exists(self, index, doc_type, id, params=None):
result = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get("_id") == id and document.get("_type") == doc_type:
result = True
break
return result
@query_params(
"_source",
"_source_exclude",
"_source_include",
"fields",
"parent",
"preference",
"realtime",
"refresh",
"routing",
"version",
"version_type",
)
def get(self, index, id, doc_type="_all", params=None):
result = None
if index in self.__documents_dict:
result = self.find_document(doc_type, id, index, result)
if result:
result["found"] = True
else:
error_data = {"_index": index, "_type": doc_type, "_id": id, "found": False}
raise NotFoundError(404, json.dumps(error_data))
return result
def find_document(self, doc_type, id, index, result):
for document in self.__documents_dict[index]:
if document.get("_id") == id:
if doc_type == "_all" or document.get("_type") == doc_type:
result = document
break
return result
@query_params(
"_source",
"_source_exclude",
"_source_include",
"parent",
"preference",
"realtime",
"refresh",
"routing",
"version",
"version_type",
)
def get_source(self, index, doc_type, id, params=None):
document = self.get(index=index, doc_type=doc_type, id=id, params=params)
return document.get("_source")
@query_params(
"_source",
"_source_exclude",
"_source_include",
"allow_no_indices",
"analyze_wildcard",
"analyzer",
"default_operator",
"df",
"expand_wildcards",
"explain",
"fielddata_fields",
"fields",
"from_",
"ignore_unavailable",
"lenient",
"lowercase_expanded_terms",
"preference",
"q",
"request_cache",
"routing",
"scroll",
"search_type",
"size",
"sort",
"stats",
"suggest_field",
"suggest_mode",
"suggest_size",
"suggest_text",
"terminate_after",
"timeout",
"track_scores",
"version",
)
def count(self, index=None, doc_type=None, query=None, params=None, headers=None):
searchable_indexes = self._normalize_index_to_list(index, query=query)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
i = 0
for searchable_index in searchable_indexes:
for document in self.__documents_dict[searchable_index]:
if not searchable_doc_types or document.get("_type") in searchable_doc_types:
i += 1
result = {"count": i, "_shards": {"successful": 1, "failed": 0, "total": 1}}
return result
@query_params(
"_source",
"_source_exclude",
"_source_include",
"allow_no_indices",
"analyze_wildcard",
"analyzer",
"default_operator",
"df",
"expand_wildcards",
"explain",
"fielddata_fields",
"fields",
"from_",
"ignore_unavailable",
"lenient",
"lowercase_expanded_terms",
"preference",
"q",
"request_cache",
"routing",
"scroll",
"search_type",
"size",
"sort",
"stats",
"suggest_field",
"suggest_mode",
"suggest_size",
"suggest_text",
"terminate_after",
"timeout",
"track_scores",
"version",
)
def search(self, index=None, doc_type=None, query=None, params=None, headers=None):
searchable_indexes = self._normalize_index_to_list(index, query=query)
matches = self._find_match(index, doc_type, query=query)
result = {
"hits": {"total": len(matches), "max_score": 1.0},
"_shards": {
# Simulate indexes with 1 shard each
"successful": len(searchable_indexes),
"failed": 0,
"total": len(searchable_indexes),
},
"took": 1,
"timed_out": False,
}
hits = []
for match in matches:
match["_score"] = 1.0
hits.append(match)
result["hits"]["hits"] = hits
return result
@query_params(
"consistency", "parent", "refresh", "replication", "routing", "timeout", "version", "version_type"
)
def delete(self, index, doc_type, id, params=None, headers=None):
found = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get("_type") == doc_type and document.get("_id") == id:
found = True
self.__documents_dict[index].remove(document)
break
result_dict = {
"found": found,
"_index": index,
"_type": doc_type,
"_id": id,
"_version": 1,
}
if found:
return result_dict
raise NotFoundError(404, json.dumps(result_dict))
@query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "preference", "routing")
def suggest(self, body, index=None):
if index is not None and index not in self.__documents_dict:
raise NotFoundError(404, f"IndexMissingException[[{index}] missing]")
result_dict = {}
for key, value in body.items():
text = value.get("text")
suggestion = int(text) + 1 if isinstance(text, int) else f"{text}_suggestion"
result_dict[key] = [
{
"text": text,
"length": 1,
"options": [{"text": suggestion, "freq": 1, "score": 1.0}],
"offset": 0,
}
]
return result_dict
def _find_match(self, index, doc_type, query):
searchable_indexes = self._normalize_index_to_list(index, query=query)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
must = query["bool"]["must"][0] # only support one must
matches = []
for searchable_index in searchable_indexes:
self.find_document_in_searchable_index(matches, must, searchable_doc_types, searchable_index)
return matches
def find_document_in_searchable_index(self, matches, must, searchable_doc_types, searchable_index):
for document in self.__documents_dict[searchable_index]:
if not searchable_doc_types or document.get("_type") in searchable_doc_types:
if "match_phrase" in must:
self.match_must_phrase(document, matches, must)
else:
matches.append(document)
@staticmethod
def match_must_phrase(document, matches, must):
for query_id in must["match_phrase"]:
query_val = must["match_phrase"][query_id]
if query_id in document["_source"]:
if query_val in document["_source"][query_id]:
# use in as a proxy for match_phrase
matches.append(document)
# Check index(es) exists.
def _validate_search_targets(self, targets, query):
# TODO: support allow_no_indices query parameter
matches = set()
for target in targets:
print(f"Loop over:::target = {target}")
if target in ("_all", ""):
matches.update(self.__documents_dict)
elif "*" in target:
matches.update(fnmatch.filter(self.__documents_dict, target))
elif target not in self.__documents_dict:
raise MissingIndexException(msg=f"IndexMissingException[[{target}] missing]", query=query)
return matches
def _normalize_index_to_list(self, index, query):
# Ensure to have a list of index
if index is None:
searchable_indexes = self.__documents_dict.keys()
elif isinstance(index, str):
searchable_indexes = [index]
elif isinstance(index, list):
searchable_indexes = index
else:
# Is it the correct exception to use ?
raise ValueError("Invalid param 'index'")
generator = (target for index in searchable_indexes for target in index.split(","))
return list(self._validate_search_targets(generator, query=query))
@staticmethod
def _normalize_doc_type_to_list(doc_type):
# Ensure to have a list of index
if doc_type is None:
searchable_doc_types = []
elif isinstance(doc_type, str):
searchable_doc_types = [doc_type]
elif isinstance(doc_type, list):
searchable_doc_types = doc_type
else:
# Is it the correct exception to use ?
raise ValueError("Invalid param 'index'")
return searchable_doc_types
| FakeElasticsearch |
python | django-extensions__django-extensions | django_extensions/management/commands/sqldiff.py | {
"start": 2255,
"end": 39301
} | class ____:
DATA_TYPES_REVERSE_OVERRIDE = {} # type: Dict[int, Union[str, Callable]]
IGNORE_MISSING_TABLES = [
"django_migrations",
]
DIFF_TYPES = [
"error",
"comment",
"table-missing-in-db",
"table-missing-in-model",
"field-missing-in-db",
"field-missing-in-model",
"fkey-missing-in-db",
"fkey-missing-in-model",
"index-missing-in-db",
"index-missing-in-model",
"unique-missing-in-db",
"unique-missing-in-model",
"field-type-differ",
"field-parameter-differ",
"notnull-differ",
]
DIFF_TEXTS = {
"error": "error: %(0)s",
"comment": "comment: %(0)s",
"table-missing-in-db": "table '%(0)s' missing in database",
"table-missing-in-model": "table '%(0)s' missing in models",
"field-missing-in-db": "field '%(1)s' defined in model but missing in database",
"field-missing-in-model": "field '%(1)s' defined in database but missing in model", # noqa: E501
"fkey-missing-in-db": "field '%(1)s' FOREIGN KEY defined in model but missing in database", # noqa: E501
"fkey-missing-in-model": "field '%(1)s' FOREIGN KEY defined in database but missing in model", # noqa: E501
"index-missing-in-db": "field '%(1)s' INDEX named '%(2)s' defined in model but missing in database", # noqa: E501
"index-missing-in-model": "field '%(1)s' INDEX defined in database schema but missing in model", # noqa: E501
"unique-missing-in-db": "field '%(1)s' UNIQUE named '%(2)s' defined in model but missing in database", # noqa: E501
"unique-missing-in-model": "field '%(1)s' UNIQUE defined in database schema but missing in model", # noqa: E501
"field-type-differ": "field '%(1)s' not of same type: db='%(3)s', model='%(2)s'", # noqa: E501
"field-parameter-differ": "field '%(1)s' parameters differ: db='%(3)s', model='%(2)s'", # noqa: E501
"notnull-differ": "field '%(1)s' null constraint should be '%(2)s' in the database", # noqa: E501
}
SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (
style.SQL_KEYWORD("ALTER TABLE"),
style.SQL_TABLE(qn(args[0])),
style.SQL_KEYWORD("ADD COLUMN"),
style.SQL_FIELD(qn(args[1])),
" ".join(
style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a)
for i, a in enumerate(args[2:])
),
)
SQL_FIELD_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s;" % (
style.SQL_KEYWORD("ALTER TABLE"),
style.SQL_TABLE(qn(args[0])),
style.SQL_KEYWORD("DROP COLUMN"),
style.SQL_FIELD(qn(args[1])),
)
SQL_FKEY_MISSING_IN_DB = (
lambda self, style, qn, args: "%s %s\n\t%s %s %s %s %s (%s)%s;"
% (
style.SQL_KEYWORD("ALTER TABLE"),
style.SQL_TABLE(qn(args[0])),
style.SQL_KEYWORD("ADD COLUMN"),
style.SQL_FIELD(qn(args[1])),
" ".join(
style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a)
for i, a in enumerate(args[4:])
),
style.SQL_KEYWORD("REFERENCES"),
style.SQL_TABLE(qn(args[2])),
style.SQL_FIELD(qn(args[3])),
connection.ops.deferrable_sql(),
)
)
SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s%s);" % (
style.SQL_KEYWORD("CREATE INDEX"),
style.SQL_TABLE(qn(args[2])),
# style.SQL_TABLE(qn("%s" % '_'.join('_'.join(a) if isinstance(a, (list, tuple)) else a for a in args[0:3] if a))), # noqa: E501
style.SQL_KEYWORD("ON"),
style.SQL_TABLE(qn(args[0])),
style.SQL_FIELD(", ".join(qn(e) for e in args[1])),
style.SQL_KEYWORD(args[3]),
)
SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (
style.SQL_KEYWORD("DROP INDEX"),
style.SQL_TABLE(qn(args[1])),
)
SQL_UNIQUE_MISSING_IN_DB = (
lambda self, style, qn, args: "%s %s\n\t%s %s %s (%s);"
% (
style.SQL_KEYWORD("ALTER TABLE"),
style.SQL_TABLE(qn(args[0])),
style.SQL_KEYWORD("ADD CONSTRAINT"),
style.SQL_TABLE(qn(args[2])),
style.SQL_KEYWORD("UNIQUE"),
style.SQL_FIELD(", ".join(qn(e) for e in args[1])),
)
)
SQL_UNIQUE_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (
style.SQL_KEYWORD("ALTER TABLE"),
style.SQL_TABLE(qn(args[0])),
style.SQL_KEYWORD("DROP"),
style.SQL_KEYWORD("CONSTRAINT"),
style.SQL_TABLE(qn(args[1])),
)
SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (
style.SQL_KEYWORD("ALTER TABLE"),
style.SQL_TABLE(qn(args[0])),
style.SQL_KEYWORD("MODIFY"),
style.SQL_FIELD(qn(args[1])),
style.SQL_COLTYPE(args[2]),
)
SQL_FIELD_PARAMETER_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (
style.SQL_KEYWORD("ALTER TABLE"),
style.SQL_TABLE(qn(args[0])),
style.SQL_KEYWORD("MODIFY"),
style.SQL_FIELD(qn(args[1])),
style.SQL_COLTYPE(args[2]),
)
SQL_NOTNULL_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s;" % (
style.SQL_KEYWORD("ALTER TABLE"),
style.SQL_TABLE(qn(args[0])),
style.SQL_KEYWORD("MODIFY"),
style.SQL_FIELD(qn(args[1])),
style.SQL_KEYWORD(args[2]),
style.SQL_KEYWORD("NOT NULL"),
)
SQL_ERROR = lambda self, style, qn, args: style.NOTICE(
"-- Error: %s" % style.ERROR(args[0])
)
SQL_COMMENT = lambda self, style, qn, args: style.NOTICE(
"-- Comment: %s" % style.SQL_TABLE(args[0])
)
SQL_TABLE_MISSING_IN_DB = lambda self, style, qn, args: style.NOTICE(
"-- Table missing: %s" % args[0]
)
SQL_TABLE_MISSING_IN_MODEL = lambda self, style, qn, args: style.NOTICE(
"-- Model missing for table: %s" % args[0]
)
can_detect_notnull_differ = False
can_detect_unsigned_differ = False
unsigned_suffix = None # type: Optional[str]
def __init__(self, app_models, options, stdout, stderr):
self.has_differences = None
self.app_models = app_models
self.options = options
self.dense = options["dense_output"]
self.stdout = stdout
self.stderr = stderr
self.introspection = connection.introspection
self.differences = []
self.unknown_db_fields = {}
self.new_db_fields = set()
self.null = {}
self.unsigned = set()
self.DIFF_SQL = {
"error": self.SQL_ERROR,
"comment": self.SQL_COMMENT,
"table-missing-in-db": self.SQL_TABLE_MISSING_IN_DB,
"table-missing-in-model": self.SQL_TABLE_MISSING_IN_MODEL,
"field-missing-in-db": self.SQL_FIELD_MISSING_IN_DB,
"field-missing-in-model": self.SQL_FIELD_MISSING_IN_MODEL,
"fkey-missing-in-db": self.SQL_FKEY_MISSING_IN_DB,
"fkey-missing-in-model": self.SQL_FIELD_MISSING_IN_MODEL,
"index-missing-in-db": self.SQL_INDEX_MISSING_IN_DB,
"index-missing-in-model": self.SQL_INDEX_MISSING_IN_MODEL,
"unique-missing-in-db": self.SQL_UNIQUE_MISSING_IN_DB,
"unique-missing-in-model": self.SQL_UNIQUE_MISSING_IN_MODEL,
"field-type-differ": self.SQL_FIELD_TYPE_DIFFER,
"field-parameter-differ": self.SQL_FIELD_PARAMETER_DIFFER,
"notnull-differ": self.SQL_NOTNULL_DIFFER,
}
def load(self):
self.cursor = connection.cursor()
self.django_tables = self.introspection.django_table_names(
only_existing=self.options["only_existing"]
)
# TODO: We are losing information about tables which are views here
self.db_tables = [
table_info.name
for table_info in self.introspection.get_table_list(self.cursor)
]
if self.can_detect_notnull_differ:
self.load_null()
if self.can_detect_unsigned_differ:
self.load_unsigned()
def load_null(self):
raise NotImplementedError(
(
"load_null functions must be implemented if diff backend has "
"'can_detect_notnull_differ' set to True"
)
)
def load_unsigned(self):
raise NotImplementedError(
(
"load_unsigned function must be implemented if diff backend has "
"'can_detect_unsigned_differ' set to True"
)
)
def add_app_model_marker(self, app_label, model_name):
self.differences.append((app_label, model_name, []))
def add_difference(self, diff_type, *args):
assert diff_type in self.DIFF_TYPES, "Unknown difference type"
self.differences[-1][-1].append((diff_type, args))
def get_data_types_reverse_override(self):
# type: () -> Dict[int, Union[str, Callable]]
return self.DATA_TYPES_REVERSE_OVERRIDE
def format_field_names(self, field_names):
return field_names
def sql_to_dict(self, query, param):
"""
Execute query and return a dict
sql_to_dict(query, param) -> list of dicts
code from snippet at https://www.djangosnippets.org/snippets/1383/
"""
cursor = connection.cursor()
cursor.execute(query, param)
fieldnames = [name[0] for name in cursor.description]
fieldnames = self.format_field_names(fieldnames)
result = []
for row in cursor.fetchall():
rowset = []
for field in zip(fieldnames, row):
rowset.append(field)
result.append(dict(rowset))
return result
def get_field_model_type(self, field):
return field.db_type(connection=connection)
def get_field_db_type_kwargs(
self,
current_kwargs,
description,
field=None,
table_name=None,
reverse_type=None,
):
return {}
def get_field_db_type(self, description, field=None, table_name=None):
# DB-API cursor.description
# (name, type_code, display_size, internal_size, precision, scale, null_ok)
type_code = description[1]
DATA_TYPES_REVERSE_OVERRIDE = self.get_data_types_reverse_override()
if type_code in DATA_TYPES_REVERSE_OVERRIDE:
reverse_type = DATA_TYPES_REVERSE_OVERRIDE[type_code]
else:
try:
reverse_type = self.introspection.get_field_type(type_code, description)
except KeyError:
reverse_type = self.get_field_db_type_lookup(type_code)
if not reverse_type:
# type_code not found in data_types_reverse map
key = (self.differences[-1][:2], description[:2])
if key not in self.unknown_db_fields:
self.unknown_db_fields[key] = 1
self.add_difference(
"comment",
"Unknown database type for field '%s' (%s)"
% (description[0], type_code),
)
return None
if callable(reverse_type):
reverse_type = reverse_type()
kwargs = {}
if isinstance(reverse_type, dict):
kwargs.update(reverse_type["kwargs"])
reverse_type = reverse_type["name"]
if (
type_code == 16946
and field
and getattr(field, "geom_type", None) == "POINT"
):
reverse_type = "django.contrib.gis.db.models.fields.PointField"
if isinstance(reverse_type, tuple):
kwargs.update(reverse_type[1])
reverse_type = reverse_type[0]
if reverse_type == "CharField" and description[3]:
kwargs["max_length"] = description[3]
if reverse_type == "DecimalField":
kwargs["max_digits"] = description[4]
kwargs["decimal_places"] = (
description[5] and abs(description[5]) or description[5]
)
if description[6]:
kwargs["blank"] = True
if reverse_type not in ("TextField", "CharField"):
kwargs["null"] = True
if field and getattr(field, "geography", False):
kwargs["geography"] = True
if reverse_type == "GeometryField":
geo_col = description[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
reverse_type, geo_params = self.introspection.get_geometry_type(
table_name, geo_col
)
if geo_params:
kwargs.update(geo_params)
reverse_type = "django.contrib.gis.db.models.fields.%s" % reverse_type
extra_kwargs = self.get_field_db_type_kwargs(
kwargs, description, field, table_name, reverse_type
)
kwargs.update(extra_kwargs)
field_class = self.get_field_class(reverse_type)
field_db_type = field_class(**kwargs).db_type(connection=connection)
tablespace = field.db_tablespace
if not tablespace:
tablespace = "public"
if (
tablespace,
table_name,
field.column,
) in self.unsigned and self.unsigned_suffix not in field_db_type:
field_db_type = "%s %s" % (field_db_type, self.unsigned_suffix)
return field_db_type
def get_field_db_type_lookup(self, type_code):
return None
def get_field_class(self, class_path):
if "." in class_path:
module_path, package_name = class_path.rsplit(".", 1)
module = importlib.import_module(module_path)
return getattr(module, package_name)
return getattr(models, class_path)
def get_field_db_nullable(self, field, table_name):
tablespace = field.db_tablespace
if tablespace == "":
tablespace = "public"
attname = field.db_column or field.attname
return self.null.get((tablespace, table_name, attname), "fixme")
def strip_parameters(self, field_type):
if field_type and field_type != "double precision":
return field_type.split(" ")[0].split("(")[0].lower()
return field_type
def get_index_together(self, meta):
indexes_normalized = []
if hasattr(meta, "index_together"):
# Django 4.2 deprecated index_together
indexes_normalized += list(normalize_together(meta.index_together))
for idx in meta.indexes:
indexes_normalized.append(idx.fields)
return self.expand_together(indexes_normalized, meta)
def get_unique_together(self, meta):
unique_normalized = list(normalize_together(meta.unique_together))
for constraint in meta.constraints:
if isinstance(constraint, UniqueConstraint):
unique_normalized.append(constraint.fields)
return self.expand_together(unique_normalized, meta)
def expand_together(self, together, meta):
new_together = []
for fields in normalize_together(together):
new_together.append(
tuple(meta.get_field(field).attname for field in fields)
)
return new_together
def find_unique_missing_in_db(
self, meta, table_indexes, table_constraints, table_name, skip_list=None
):
schema_editor = connection.SchemaEditorClass(connection)
for field in all_local_fields(meta):
if skip_list and field.attname in skip_list:
continue
if field.unique and meta.managed:
attname = field.db_column or field.attname
db_field_unique = table_indexes.get(attname, {}).get("unique")
if not db_field_unique and table_constraints:
db_field_unique = any(
constraint["unique"]
for contraint_name, constraint in table_constraints.items()
if [attname] == constraint["columns"]
)
if attname in table_indexes and db_field_unique:
continue
index_name = schema_editor._create_index_name(table_name, [attname])
self.add_difference(
"unique-missing-in-db", table_name, [attname], index_name + "_uniq"
)
db_type = field.db_type(connection=connection)
if db_type.startswith("varchar"):
self.add_difference(
"index-missing-in-db",
table_name,
[attname],
index_name + "_like",
" varchar_pattern_ops",
)
if db_type.startswith("text"):
self.add_difference(
"index-missing-in-db",
table_name,
[attname],
index_name + "_like",
" text_pattern_ops",
)
unique_together = self.get_unique_together(meta)
db_unique_columns = normalize_together(
[
v["columns"]
for v in table_constraints.values()
if v["unique"] and not v["index"]
]
)
for unique_columns in unique_together:
if unique_columns in db_unique_columns:
continue
if skip_list and unique_columns in skip_list:
continue
index_name = schema_editor._create_index_name(table_name, unique_columns)
self.add_difference(
"unique-missing-in-db", table_name, unique_columns, index_name + "_uniq"
)
def find_unique_missing_in_model(
self, meta, table_indexes, table_constraints, table_name
):
fields = dict([(field.column, field) for field in all_local_fields(meta)])
unique_together = self.get_unique_together(meta)
for constraint_name, constraint in table_constraints.items():
if not constraint["unique"]:
continue
if constraint["index"]:
# unique indexes are handled by find_index_missing_in_model
continue
columns = constraint["columns"]
if len(columns) == 1:
field = fields.get(columns[0])
if field is None:
pass
elif field.unique:
continue
else:
if tuple(columns) in unique_together:
continue
self.add_difference("unique-missing-in-model", table_name, constraint_name)
def find_index_missing_in_db(
self, meta, table_indexes, table_constraints, table_name
):
schema_editor = connection.SchemaEditorClass(connection)
for field in all_local_fields(meta):
if field.db_index:
attname = field.db_column or field.attname
if attname not in table_indexes:
index_name = schema_editor._create_index_name(table_name, [attname])
self.add_difference(
"index-missing-in-db", table_name, [attname], index_name, ""
)
db_type = field.db_type(connection=connection)
if db_type.startswith("varchar"):
self.add_difference(
"index-missing-in-db",
table_name,
[attname],
index_name + "_like",
" varchar_pattern_ops",
)
if db_type.startswith("text"):
self.add_difference(
"index-missing-in-db",
table_name,
[attname],
index_name + "_like",
" text_pattern_ops",
)
index_together = self.get_index_together(meta)
db_index_together = normalize_together(
[
v["columns"]
for v in table_constraints.values()
if v["index"] and not v["unique"]
]
)
for columns in index_together:
if columns in db_index_together:
continue
index_name = schema_editor._create_index_name(table_name, columns)
self.add_difference(
"index-missing-in-db", table_name, columns, index_name + "_idx", ""
)
for index in meta.indexes:
if index.name not in table_constraints:
self.add_difference(
"index-missing-in-db", table_name, index.fields, index.name, ""
)
def find_index_missing_in_model(
self, meta, table_indexes, table_constraints, table_name
):
fields = dict([(field.column, field) for field in all_local_fields(meta)])
meta_index_names = [idx.name for idx in meta.indexes]
index_together = self.get_index_together(meta)
for constraint_name, constraint in table_constraints.items():
if constraint_name in meta_index_names:
continue
if constraint["unique"] and not constraint["index"]:
# unique constraints are handled by find_unique_missing_in_model
continue
columns = constraint["columns"]
field = fields.get(columns[0])
if (constraint["unique"] and constraint["index"]) or field is None:
# unique indexes do not exist in django ? only unique constraints
pass
elif len(columns) == 1:
if constraint["primary_key"] and field.primary_key:
continue
if (
constraint["foreign_key"]
and isinstance(field, models.ForeignKey)
and field.db_constraint
):
continue
if constraint["unique"] and field.unique:
continue
if (
constraint["index"]
and constraint["type"] == "idx"
and constraint.get("orders")
and field.unique
):
# django automatically creates a _like varchar_pattern_ops
# / text_pattern_ops index see https://code.djangoproject.com/ticket/12234
# note: mysql does not have and/or introspect and fill the 'orders'
# attribute of constraint information
continue
if constraint["index"] and field.db_index:
continue
if constraint["check"] and field.db_check(connection=connection):
continue
if getattr(field, "spatial_index", False):
continue
else:
if constraint["index"] and tuple(columns) in index_together:
continue
self.add_difference("index-missing-in-model", table_name, constraint_name)
def find_field_missing_in_model(self, fieldmap, table_description, table_name):
for row in table_description:
if row[0] not in fieldmap:
self.add_difference("field-missing-in-model", table_name, row[0])
def find_field_missing_in_db(self, fieldmap, table_description, table_name):
db_fields = [row[0] for row in table_description]
for field_name, field in fieldmap.items():
if field_name not in db_fields:
field_output = []
if field.remote_field:
field_output.extend(
[
field.remote_field.model._meta.db_table,
field.remote_field.model._meta.get_field(
field.remote_field.field_name
).column,
]
)
op = "fkey-missing-in-db"
else:
op = "field-missing-in-db"
field_output.append(field.db_type(connection=connection))
if self.options["include_defaults"] and field.has_default():
field_output.append(
"DEFAULT %s" % field.get_prep_value(field.get_default())
)
if not field.null:
field_output.append("NOT NULL")
self.add_difference(op, table_name, field_name, *field_output)
self.new_db_fields.add((table_name, field_name))
def find_field_type_differ(self, meta, table_description, table_name, func=None):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
if not self.strip_parameters(db_type) == self.strip_parameters(
model_type
) and (db_type, model_type) not in {
("serial", "integer"),
("bigserial", "bigint"),
}:
self.add_difference(
"field-type-differ", table_name, field.name, model_type, db_type
)
def find_field_parameter_differ(
self, meta, table_description, table_name, func=None
):
db_fields = dict([(row[0], row) for row in table_description])
for field in all_local_fields(meta):
if field.name not in db_fields:
continue
description = db_fields[field.name]
model_type = self.get_field_model_type(field)
db_type = self.get_field_db_type(description, field, table_name)
if not self.strip_parameters(model_type) == self.strip_parameters(db_type):
continue
# use callback function if defined
if func:
model_type, db_type = func(field, description, model_type, db_type)
model_check = field.db_parameters(connection=connection)["check"]
if " CHECK" in db_type:
db_type, db_check = db_type.split(" CHECK", 1)
db_check = db_check.strip().lstrip("(").rstrip(")")
else:
db_check = None
if not model_type == db_type or not model_check == db_check:
self.add_difference(
"field-parameter-differ",
table_name,
field.name,
model_type,
db_type,
)
def find_field_notnull_differ(self, meta, table_description, table_name):
if not self.can_detect_notnull_differ:
return
for field in all_local_fields(meta):
attname = field.db_column or field.attname
if (table_name, attname) in self.new_db_fields:
continue
null = self.get_field_db_nullable(field, table_name)
if field.null != null:
action = field.null and "DROP" or "SET"
self.add_difference("notnull-differ", table_name, attname, action)
def get_constraints(self, cursor, table_name, introspection):
return {}
def find_differences(self):
if self.options["all_applications"]:
self.add_app_model_marker(None, None)
for table in self.db_tables:
if (
table not in self.django_tables
and table not in self.IGNORE_MISSING_TABLES
):
self.add_difference("table-missing-in-model", table)
cur_app_label = None
for app_model in self.app_models:
meta = app_model._meta
table_name = meta.db_table
app_label = meta.app_label
if not self.options["include_proxy_models"] and meta.proxy:
continue
if cur_app_label != app_label:
# Marker indicating start of difference scan for this table_name
self.add_app_model_marker(app_label, app_model.__name__)
if table_name not in self.db_tables:
# Table is missing from database
self.add_difference("table-missing-in-db", table_name)
continue
if hasattr(self.introspection, "get_constraints"):
table_constraints = self.introspection.get_constraints(
self.cursor, table_name
)
else:
table_constraints = self.get_constraints(
self.cursor, table_name, self.introspection
)
fieldmap = dict(
[
(field.db_column or field.get_attname(), field)
for field in all_local_fields(meta)
]
)
# add ordering field if model uses order_with_respect_to
if meta.order_with_respect_to:
fieldmap["_order"] = ORDERING_FIELD
try:
table_description = self.introspection.get_table_description(
self.cursor, table_name
)
except Exception as e:
self.add_difference(
"error", "unable to introspect table: %s" % str(e).strip()
)
transaction.rollback() # reset transaction
continue
# map table_constraints into table_indexes
table_indexes = {}
for contraint_name, dct in table_constraints.items():
columns = dct["columns"]
if len(columns) == 1:
table_indexes[columns[0]] = {
"primary_key": dct["primary_key"],
"unique": dct["unique"],
"type": dct.get("type"),
"contraint_name": contraint_name,
}
# Fields which are defined in database but not in model
# 1) find: 'unique-missing-in-model'
self.find_unique_missing_in_model(
meta, table_indexes, table_constraints, table_name
)
# 2) find: 'index-missing-in-model'
self.find_index_missing_in_model(
meta, table_indexes, table_constraints, table_name
)
# 3) find: 'field-missing-in-model'
self.find_field_missing_in_model(fieldmap, table_description, table_name)
# Fields which are defined in models but not in database
# 4) find: 'field-missing-in-db'
self.find_field_missing_in_db(fieldmap, table_description, table_name)
# 5) find: 'unique-missing-in-db'
self.find_unique_missing_in_db(
meta, table_indexes, table_constraints, table_name
)
# 6) find: 'index-missing-in-db'
self.find_index_missing_in_db(
meta, table_indexes, table_constraints, table_name
)
# Fields which have a different type or parameters
# 7) find: 'type-differs'
self.find_field_type_differ(meta, table_description, table_name)
# 8) find: 'type-parameter-differs'
self.find_field_parameter_differ(meta, table_description, table_name)
# 9) find: 'field-notnull'
self.find_field_notnull_differ(meta, table_description, table_name)
self.has_differences = max(
[len(diffs) for _app_label, _model_name, diffs in self.differences]
)
def print_diff(self, style=no_style()):
"""Print differences to stdout"""
if self.options["sql"]:
self.print_diff_sql(style)
else:
self.print_diff_text(style)
def print_diff_text(self, style):
if not self.can_detect_notnull_differ:
self.stdout.write(
style.NOTICE(
"# Detecting notnull changes not implemented for this "
"database backend"
)
)
self.stdout.write("")
if not self.can_detect_unsigned_differ:
self.stdout.write(
style.NOTICE(
"# Detecting unsigned changes not implemented for this "
"database backend"
)
)
self.stdout.write("")
cur_app_label = None
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and app_label and cur_app_label != app_label:
self.stdout.write(
"%s %s"
% (style.NOTICE("+ Application:"), style.SQL_TABLE(app_label))
)
cur_app_label = app_label
if not self.dense and model_name:
self.stdout.write(
"%s %s"
% (
style.NOTICE("|-+ Differences for model:"),
style.SQL_TABLE(model_name),
)
)
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_TEXTS[diff_type] % dict(
(
str(i),
style.SQL_TABLE(
", ".join(e) if isinstance(e, (list, tuple)) else e
),
)
for i, e in enumerate(diff_args)
)
text = "'".join(
i % 2 == 0 and style.ERROR(e) or e
for i, e in enumerate(text.split("'"))
)
if not self.dense:
self.stdout.write("%s %s" % (style.NOTICE("|--+"), text))
else:
if app_label:
self.stdout.write(
"%s %s %s %s %s"
% (
style.NOTICE("App"),
style.SQL_TABLE(app_label),
style.NOTICE("Model"),
style.SQL_TABLE(model_name),
text,
)
)
else:
self.stdout.write(text)
def print_diff_sql(self, style):
if not self.can_detect_notnull_differ:
self.stdout.write(
style.NOTICE(
"-- Detecting notnull changes not implemented for this "
"database backend"
)
)
self.stdout.write("")
cur_app_label = None
qn = connection.ops.quote_name
if not self.has_differences:
if not self.dense:
self.stdout.write(style.SQL_KEYWORD("-- No differences"))
else:
self.stdout.write(style.SQL_KEYWORD("BEGIN;"))
for app_label, model_name, diffs in self.differences:
if not diffs:
continue
if not self.dense and cur_app_label != app_label:
self.stdout.write(
style.NOTICE("-- Application: %s" % style.SQL_TABLE(app_label))
)
cur_app_label = app_label
if not self.dense and model_name:
self.stdout.write(
style.NOTICE("-- Model: %s" % style.SQL_TABLE(model_name))
)
for diff in diffs:
diff_type, diff_args = diff
text = self.DIFF_SQL[diff_type](style, qn, diff_args)
if self.dense:
text = text.replace("\n\t", " ")
self.stdout.write(text)
self.stdout.write(style.SQL_KEYWORD("COMMIT;"))
| SQLDiff |
python | crytic__slither | slither/core/expressions/index_access.py | {
"start": 205,
"end": 894
} | class ____(Expression):
def __init__(
self,
left_expression: Union["IndexAccess", Identifier],
right_expression: Union[Literal, Identifier],
) -> None:
super().__init__()
self._expressions = [left_expression, right_expression]
@property
def expressions(self) -> List["Expression"]:
return self._expressions
@property
def expression_left(self) -> "Expression":
return self._expressions[0]
@property
def expression_right(self) -> "Expression":
return self._expressions[1]
def __str__(self) -> str:
return str(self.expression_left) + "[" + str(self.expression_right) + "]"
| IndexAccess |
python | keras-team__keras | keras/src/random/random_test.py | {
"start": 462,
"end": 12894
} | class ____(testing.TestCase):
@parameterized.parameters(
{"seed": 10, "shape": (5,), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3, 4), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 3},
)
def test_normal(self, seed, shape, mean, stddev):
np.random.seed(seed)
np_res = np.random.normal(loc=mean, scale=stddev, size=shape)
res = random.normal(shape, mean=mean, stddev=stddev, seed=seed)
self.assertEqual(res.shape, shape)
self.assertEqual(res.shape, np_res.shape)
@parameterized.parameters(
{"seed": 10, "shape": (5,), "minval": 0, "maxval": 1},
{"seed": 10, "shape": (2, 3), "minval": 0, "maxval": 1},
{"seed": 10, "shape": (2, 3, 4), "minval": 0, "maxval": 2},
{"seed": 10, "shape": (2, 3), "minval": -1, "maxval": 1},
{"seed": 10, "shape": (2, 3), "minval": 1, "maxval": 3},
)
def test_uniform(self, seed, shape, minval, maxval):
np.random.seed(seed)
np_res = np.random.uniform(low=minval, high=maxval, size=shape)
res = random.uniform(shape, minval=minval, maxval=maxval, seed=seed)
self.assertEqual(res.shape, shape)
self.assertEqual(res.shape, np_res.shape)
self.assertLessEqual(ops.max(res), maxval)
self.assertGreaterEqual(ops.max(res), minval)
@parameterized.parameters(
{"seed": 10, "num_samples": 1, "batch_size": 1},
{"seed": 10, "num_samples": 5, "batch_size": 2},
{"seed": 10, "num_samples": 10, "batch_size": 4},
{"seed": 10, "num_samples": 15, "batch_size": 8},
)
def test_categorical(self, seed, num_samples, batch_size):
np.random.seed(seed)
# Create logits that definitely favors the batch index after a softmax
# is applied. Without a softmax, this would be close to random.
logits = np.eye(batch_size) * 1e5 + 1e6
res = random.categorical(logits, num_samples, seed=seed)
# Outputs should have shape `(batch_size, num_samples)`, where each
# output index matches the batch index.
self.assertEqual(res.shape, (batch_size, num_samples))
expected = np.tile(np.arange(batch_size)[:, None], (1, num_samples))
self.assertAllClose(res, expected)
@parameterized.parameters(
{"seed": 10, "shape": (5,), "min": 0, "max": 10, "dtype": "uint16"},
{"seed": 10, "shape": (2, 3), "min": 0, "max": 10, "dtype": "uint32"},
{"seed": 10, "shape": (2, 3, 4), "min": 0, "max": 2, "dtype": "int8"},
{"seed": 10, "shape": (2, 3), "min": -1, "max": 1, "dtype": "int16"},
{"seed": 10, "shape": (2, 3), "min": 1, "max": 3, "dtype": "int32"},
)
def test_randint(self, seed, shape, min, max, dtype):
np.random.seed(seed)
np_res = np.random.randint(low=min, high=max, size=shape)
res = random.randint(
shape, minval=min, maxval=max, seed=seed, dtype=dtype
)
self.assertEqual(res.shape, shape)
self.assertEqual(res.shape, np_res.shape)
self.assertLessEqual(ops.max(res), max)
self.assertGreaterEqual(ops.max(res), min)
# Torch has incomplete dtype support for uints; will remap some dtypes.
if keras.backend.backend() != "torch":
self.assertEqual(backend.standardize_dtype(res.dtype), dtype)
@parameterized.parameters(
{"seed": 10, "shape": (5,), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3, 4), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 3},
# Test list shapes.
{"seed": 10, "shape": [2, 3], "mean": 10, "stddev": 3},
)
def test_truncated_normal(self, seed, shape, mean, stddev):
np.random.seed(seed)
np_res = np.random.normal(loc=mean, scale=stddev, size=shape)
res = random.truncated_normal(
shape, mean=mean, stddev=stddev, seed=seed
)
self.assertEqual(res.shape, tuple(shape))
self.assertEqual(res.shape, np_res.shape)
self.assertLessEqual(ops.max(res), mean + 2 * stddev)
self.assertGreaterEqual(ops.max(res), mean - 2 * stddev)
def test_dropout(self):
x = ops.ones((3, 5))
self.assertAllClose(random.dropout(x, rate=0, seed=0), x)
x_res = random.dropout(x, rate=0.8, seed=0)
self.assertGreater(ops.max(x_res), ops.max(x))
self.assertGreater(ops.sum(x_res == 0), 2)
def test_dropout_noise_shape(self):
inputs = ops.ones((2, 3, 5, 7))
x = random.dropout(
inputs, rate=0.3, noise_shape=[None, 3, 5, None], seed=0
)
self.assertEqual(x.shape, (2, 3, 5, 7))
def test_global_seed_generator(self):
# Check that unseeded RNG calls use and update global_rng_state()
def random_numbers(seed):
rng_state = seed_generator.global_seed_generator().state
rng_state.assign(seed)
x = random.normal((), seed=None)
y = random.normal((), seed=None)
return x, y, rng_state.value
if backend.backend() == "tensorflow":
import tensorflow as tf
random_numbers = tf.function(jit_compile=True)(random_numbers)
seed = ops.zeros((2,))
seed0 = ops.convert_to_numpy(seed)
x1, y1, seed = random_numbers(seed)
x1 = ops.convert_to_numpy(x1)
y1 = ops.convert_to_numpy(y1)
seed1 = ops.convert_to_numpy(seed)
x2, y2, seed = random_numbers(seed)
x2 = ops.convert_to_numpy(x2)
y2 = ops.convert_to_numpy(y2)
seed2 = ops.convert_to_numpy(seed)
x3, y3, seed = random_numbers(seed)
x3 = ops.convert_to_numpy(x3)
y3 = ops.convert_to_numpy(y3)
seed3 = ops.convert_to_numpy(seed)
self.assertNotEqual(seed0[1], seed1[1])
self.assertNotEqual(seed1[1], seed2[1])
self.assertNotEqual(seed2[1], seed3[1])
self.assertGreater(np.abs(x1 - y1), 1e-4)
self.assertGreater(np.abs(x1 - y1), 1e-4)
self.assertGreater(np.abs(x2 - y2), 1e-4)
self.assertGreater(np.abs(x3 - y3), 1e-4)
self.assertGreater(np.abs(x1 - x2), 1e-4)
self.assertGreater(np.abs(x1 - x3), 1e-4)
self.assertGreater(np.abs(x2 - x3), 1e-4)
self.assertGreater(np.abs(y1 - y2), 1e-4)
self.assertGreater(np.abs(y1 - y3), 1e-4)
self.assertGreater(np.abs(y2 - y3), 1e-4)
seed_generator.global_seed_generator().state.assign(seed)
def test_shuffle(self):
x = np.arange(100).reshape(10, 10)
# Test axis=0
y = random.shuffle(x, seed=0)
self.assertFalse(np.all(x == ops.convert_to_numpy(y)))
self.assertAllClose(np.sum(x, axis=0), ops.sum(y, axis=0))
self.assertNotAllClose(np.sum(x, axis=1), ops.sum(y, axis=1))
# Test axis=1
y = random.shuffle(x, axis=1, seed=0)
self.assertFalse(np.all(x == ops.convert_to_numpy(y)))
self.assertAllClose(np.sum(x, axis=1), ops.sum(y, axis=1))
self.assertNotAllClose(np.sum(x, axis=0), ops.sum(y, axis=0))
@parameterized.parameters(
{"seed": 10, "shape": (5, 2), "alpha": 2.0, "dtype": "float16"},
{"seed": 10, "shape": (2,), "alpha": 1.5, "dtype": "float32"},
{"seed": 10, "shape": (2, 3), "alpha": 0.5, "dtype": "float32"},
)
def test_gamma(self, seed, shape, alpha, dtype):
values = random.gamma(shape, alpha=alpha, seed=seed, dtype=dtype)
self.assertEqual(ops.shape(values), shape)
self.assertEqual(backend.standardize_dtype(values.dtype), dtype)
self.assertGreater(np.min(ops.convert_to_numpy(values)), 0.0)
@parameterized.parameters(
{
"seed": 10,
"shape": (5, 2),
"counts": 5e4,
"probabilities": 0.5,
"dtype": "float16",
},
{
"seed": 10,
"shape": (2,),
"counts": 1e5,
"probabilities": 0.5,
"dtype": "float32",
},
{
"seed": 10,
"shape": (2, 3),
"counts": [[1e5, 2e5, 3e5], [4e5, 5e5, 6e5]],
"probabilities": [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]],
"dtype": "float32",
},
)
def test_binomial(self, seed, shape, counts, probabilities, dtype):
set_random_seed(1337)
values = random.binomial(
shape=shape,
counts=counts,
probabilities=probabilities,
seed=seed,
dtype=dtype,
)
self.assertEqual(ops.shape(values), shape)
self.assertEqual(backend.standardize_dtype(values.dtype), dtype)
# The following test that ensures that the number of time
# each event occurs doesn't exceed the total input count specified
# by the user for that event.
# Hence, we do an element wise comparison between `counts` array
# and the (generated) `values` array.
values_np = ops.convert_to_numpy(values)
assert np.greater_equal(np.array(counts), values_np).all()
# Following test computes the probabilities of each event
# by dividing number of times an event occurs (which is the generated
# value) by the corresponding value in the (total) counts array.
# and then makes sure that the computed probabilities approximate
# the input probabilities
generated_probabilities = values_np / np.array(counts)
probabilities = np.ones(shape) * np.array(probabilities)
self.assertAllClose(
probabilities, generated_probabilities, rtol=0.005, atol=0.005
)
@parameterized.parameters(
{
"seed": 10,
"shape": (10000,),
"alpha": 3.0,
"beta": 2.0,
"dtype": "float16",
},
{
"seed": 10,
"shape": (10000, 3),
"alpha": [[7.0, 0.5, 1.5]],
"beta": [[15.0, 0.9, 4.5]],
"dtype": "float32",
},
{
"seed": 10,
"shape": (10000, 30),
"alpha": 1.0,
"beta": 1.0,
"dtype": "float32",
},
)
def test_beta(self, seed, shape, alpha, beta, dtype):
set_random_seed(1337)
values = random.beta(
shape=shape, alpha=alpha, beta=beta, seed=seed, dtype=dtype
)
self.assertEqual(ops.shape(values), shape)
self.assertEqual(backend.standardize_dtype(values.dtype), dtype)
values_np = ops.convert_to_numpy(values)
self.assertGreaterEqual(np.min(values_np), b=0.0)
self.assertLessEqual(np.max(values_np), b=1.0)
_alpha_is_an_array = False
if isinstance(alpha, list):
alpha = np.array(alpha)
beta = np.array(beta)
_alpha_is_an_array = True
# Mean check:
# For a beta distributed random variable,
# mean = alpha / (alpha + beta)
expected_mean = alpha / (alpha + beta)
if _alpha_is_an_array:
actual_mean = np.mean(values_np, axis=0)
self.assertAllClose(
expected_mean.flatten(), actual_mean, atol=0.005, rtol=0.005
)
else:
actual_mean = np.mean(values_np.flatten())
self.assertAlmostEqual(expected_mean, actual_mean, decimal=2)
# Variance check:
# For a beta distributed random variable,
# variance = (alpha * beta) / ((alpha + beta)^2)(alpha + beta + 1)
expected_variance = (alpha * beta) / (
np.square(alpha + beta) * (alpha + beta + 1)
)
if _alpha_is_an_array:
actual_variance = np.var(values_np, axis=0)
self.assertAllClose(
expected_variance.flatten(),
actual_variance,
atol=0.005,
rtol=0.005,
)
else:
actual_variance = np.var(values_np.flatten())
self.assertAlmostEqual(
expected_variance, actual_variance, decimal=2
)
| RandomCorrectnessTest |
python | pypa__warehouse | warehouse/email/services.py | {
"start": 644,
"end": 1759
} | class ____:
def __init__(
self,
subject: str,
body_text: str,
body_html: str | None = None,
sender: str | None = None,
):
self.subject = subject
self.body_text = body_text
self.body_html = body_html
self.sender = sender
@classmethod
def from_template(cls, email_name, context, *, request):
subject = render(
f"email/{email_name}/subject.txt", context, request=request
).replace("\n", "")
body_text = render(f"email/{email_name}/body.txt", context, request=request)
try:
body_html = render(
f"email/{email_name}/body.html", context, request=request
)
body_html = premailer.Premailer(body_html, remove_classes=True).transform()
# Catching TemplateNotFound here is a bit of a leaky abstraction, but there's
# not much we can do about it.
except TemplateNotFound:
body_html = None
return cls(subject=subject, body_text=body_text, body_html=body_html)
@implementer(IEmailSender)
| EmailMessage |
python | pola-rs__polars | py-polars/src/polars/exceptions.py | {
"start": 4300,
"end": 4420
} | class ____(PolarsError):
"""Exception raised when the number of returned rows does not match expectation."""
| RowsError |
python | pytorch__pytorch | torch/_export/db/examples/pytree_flatten.py | {
"start": 84,
"end": 376
} | class ____(torch.nn.Module):
"""
Pytree from PyTorch can be captured by TorchDynamo.
"""
def forward(self, x):
y, _spec = pytree.tree_flatten(x)
return y[0] + 1
example_args = ({1: torch.randn(3, 2), 2: torch.randn(3, 2)},),
model = PytreeFlatten()
| PytreeFlatten |
python | kamyu104__LeetCode-Solutions | Python/find-the-shortest-superstring-ii.py | {
"start": 53,
"end": 1114
} | class ____(object):
def shortestSuperstring(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: str
"""
def getPrefix(pattern):
prefix = [-1]*len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j+1 > 0 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
def KMP(text, pattern):
prefix = getPrefix(pattern)
j = -1
for i in xrange(len(text)):
while j+1 > 0 and pattern[j+1] != text[i]:
j = prefix[j]
if pattern[j+1] == text[i]:
j += 1
if j+1 == len(pattern):
break
return text+pattern[j+1:] # modified
result1 = KMP(s1, s2)
result2 = KMP(s2, s1)
return result1 if len(result1) < len(result2) else result2
| Solution |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/retrievers/test_time_weighted_retriever.py | {
"start": 788,
"end": 6397
} | class ____(VectorStore):
"""Mock invalid vector store."""
@override
def add_texts(
self,
texts: Iterable[str],
metadatas: list[dict] | None = None,
**kwargs: Any,
) -> list[str]:
return list(texts)
@override
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[Document]:
return []
@classmethod
@override
def from_texts(
cls: type["MockVectorStore"],
texts: list[str],
embedding: Embeddings,
metadatas: list[dict] | None = None,
**kwargs: Any,
) -> "MockVectorStore":
return cls()
@override
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[tuple[Document, float]]:
return [(doc, 0.5) for doc in _get_example_memories()]
async def _asimilarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[tuple[Document, float]]:
return self._similarity_search_with_relevance_scores(query, k, **kwargs)
@pytest.fixture
def time_weighted_retriever() -> TimeWeightedVectorStoreRetriever:
vectorstore = MockVectorStore()
return TimeWeightedVectorStoreRetriever(
vectorstore=vectorstore,
memory_stream=_get_example_memories(),
)
def test__get_hours_passed() -> None:
time1 = datetime(2023, 4, 14, 14, 30)
time2 = datetime(2023, 4, 14, 12, 0)
expected_hours_passed = 2.5
hours_passed = _get_hours_passed(time1, time2)
assert hours_passed == expected_hours_passed
def test_get_combined_score(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
document = Document(
page_content="Test document",
metadata={"last_accessed_at": datetime(2023, 4, 14, 12, 0)},
)
vector_salience = 0.7
expected_hours_passed = 2.5
current_time = datetime(2023, 4, 14, 14, 30)
combined_score = time_weighted_retriever._get_combined_score(
document,
vector_salience,
current_time,
)
expected_score = (
1.0 - time_weighted_retriever.decay_rate
) ** expected_hours_passed + vector_salience
assert combined_score == pytest.approx(expected_score)
def test_get_salient_docs(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
query = "Test query"
docs_and_scores = time_weighted_retriever.get_salient_docs(query)
want = [(doc, 0.5) for doc in _get_example_memories()]
assert isinstance(docs_and_scores, dict)
assert len(docs_and_scores) == len(want)
for doc in docs_and_scores.values():
assert doc in want
async def test_aget_salient_docs(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
query = "Test query"
docs_and_scores = await time_weighted_retriever.aget_salient_docs(query)
want = [(doc, 0.5) for doc in _get_example_memories()]
assert isinstance(docs_and_scores, dict)
assert len(docs_and_scores) == len(want)
for doc in docs_and_scores.values():
assert doc in want
def test_invoke(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
query = "Test query"
relevant_documents = time_weighted_retriever.invoke(query)
want = [(doc, 0.5) for doc in _get_example_memories()]
assert isinstance(relevant_documents, list)
assert len(relevant_documents) == len(want)
now = datetime.now()
for doc in relevant_documents:
# assert that the last_accessed_at is close to now.
assert now - timedelta(hours=1) < doc.metadata["last_accessed_at"] <= now
# assert that the last_accessed_at in the memory stream is updated.
for d in time_weighted_retriever.memory_stream:
assert now - timedelta(hours=1) < d.metadata["last_accessed_at"] <= now
async def test_ainvoke(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
query = "Test query"
relevant_documents = await time_weighted_retriever.ainvoke(query)
want = [(doc, 0.5) for doc in _get_example_memories()]
assert isinstance(relevant_documents, list)
assert len(relevant_documents) == len(want)
now = datetime.now()
for doc in relevant_documents:
# assert that the last_accessed_at is close to now.
assert now - timedelta(hours=1) < doc.metadata["last_accessed_at"] <= now
# assert that the last_accessed_at in the memory stream is updated.
for d in time_weighted_retriever.memory_stream:
assert now - timedelta(hours=1) < d.metadata["last_accessed_at"] <= now
def test_add_documents(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
documents = [Document(page_content="test_add_documents document")]
added_documents = time_weighted_retriever.add_documents(documents)
assert isinstance(added_documents, list)
assert len(added_documents) == 1
assert (
time_weighted_retriever.memory_stream[-1].page_content
== documents[0].page_content
)
async def test_aadd_documents(
time_weighted_retriever: TimeWeightedVectorStoreRetriever,
) -> None:
documents = [Document(page_content="test_add_documents document")]
added_documents = await time_weighted_retriever.aadd_documents(documents)
assert isinstance(added_documents, list)
assert len(added_documents) == 1
assert (
time_weighted_retriever.memory_stream[-1].page_content
== documents[0].page_content
)
| MockVectorStore |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_query.py | {
"start": 20867,
"end": 23122
} | class ____(fixtures.TestBase):
__backend__ = True
__only_on__ = "mssql"
@testing.fixture
def scalar_strings(self, connection):
connection.exec_driver_sql(
"""
CREATE FUNCTION scalar_strings (
)
RETURNS TABLE
AS
RETURN
SELECT
my_string
FROM (
VALUES ('some string'), ('some string'), ('some string')
) AS my_tab(my_string)
"""
)
yield
connection.exec_driver_sql("DROP FUNCTION scalar_strings")
@testing.fixture
def two_strings(self, connection):
connection.exec_driver_sql(
"""
CREATE FUNCTION three_pairs (
)
RETURNS TABLE
AS
RETURN
SELECT
s1 AS string1, s2 AS string2
FROM (
VALUES ('a', 'b'), ('c', 'd'), ('e', 'f')
) AS my_tab(s1, s2)
"""
)
yield
connection.exec_driver_sql("DROP FUNCTION three_pairs")
def test_scalar_strings_control(self, scalar_strings, connection):
result = (
connection.exec_driver_sql(
"SELECT my_string FROM scalar_strings()"
)
.scalars()
.all()
)
eq_(result, ["some string"] * 3)
def test_scalar_strings_named_control(self, scalar_strings, connection):
result = (
connection.exec_driver_sql(
"SELECT anon_1.my_string FROM scalar_strings() AS anon_1"
)
.scalars()
.all()
)
eq_(result, ["some string"] * 3)
def test_scalar_strings(self, scalar_strings, connection):
fn = func.scalar_strings().table_valued("my_string")
result = connection.execute(select(fn.c.my_string)).scalars().all()
eq_(result, ["some string"] * 3)
def test_two_strings_control(self, two_strings, connection):
result = connection.exec_driver_sql(
"SELECT string1, string2 FROM three_pairs ()"
).all()
eq_(result, [("a", "b"), ("c", "d"), ("e", "f")])
def test_two_strings(self, two_strings, connection):
fn = func.three_pairs().table_valued("string1", "string2")
result = connection.execute(select(fn.c.string1, fn.c.string2)).all()
eq_(result, [("a", "b"), ("c", "d"), ("e", "f")])
| TableValuedTest |
python | scikit-image__scikit-image | src/skimage/filters/ridges.py | {
"start": 163,
"end": 14609
} | class ____ ridge filters relies on the eigenvalues of the Hessian matrix of
image intensities to detect tube-like structures where the intensity changes
perpendicular but not along the structure.
"""
from warnings import warn
import numpy as np
from scipy import linalg
from .._shared.utils import _supported_float_type, check_nD
from ..feature.corner import hessian_matrix, hessian_matrix_eigvals
def meijering(
image, sigmas=range(1, 10, 2), alpha=None, black_ridges=True, mode='reflect', cval=0
):
"""
Filter an image with the Meijering neuriteness filter.
This filter can be used to detect continuous ridges, e.g. neurites,
wrinkles, rivers. It can be used to calculate the fraction of the
whole image containing such objects.
Calculates the eigenvalues of the Hessian to compute the similarity of
an image region to neurites, according to the method described in [1]_.
Parameters
----------
image : (M, N[, ...]) ndarray
Array with input image data.
sigmas : iterable of floats, optional
Sigmas used as scales of filter
alpha : float, optional
Shaping filter constant, that selects maximally flat elongated
features. The default, None, selects the optimal value -1/(ndim+1).
black_ridges : bool, optional
When True (the default), the filter detects black ridges; when
False, it detects white ridges.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
out : (M, N[, ...]) ndarray
Filtered image (maximum of pixels across all scales).
See also
--------
sato
frangi
hessian
References
----------
.. [1] Meijering, E., Jacob, M., Sarria, J. C., Steiner, P., Hirling, H.,
Unser, M. (2004). Design and validation of a tool for neurite tracing
and analysis in fluorescence microscopy images. Cytometry Part A,
58(2), 167-176.
:DOI:`10.1002/cyto.a.20022`
"""
image = image.astype(_supported_float_type(image.dtype), copy=False)
if not black_ridges: # Normalize to black ridges.
image = -image
if alpha is None:
alpha = 1 / (image.ndim + 1)
mtx = linalg.circulant([1, *[alpha] * (image.ndim - 1)]).astype(image.dtype)
# Generate empty array for storing maximum value
# from different (sigma) scales
filtered_max = np.zeros_like(image)
for sigma in sigmas: # Filter for all sigmas.
eigvals = hessian_matrix_eigvals(
hessian_matrix(
image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True
)
)
# Compute normalized eigenvalues l_i = e_i + sum_{j!=i} alpha * e_j.
vals = np.tensordot(mtx, eigvals, 1)
# Get largest normalized eigenvalue (by magnitude) at each pixel.
vals = np.take_along_axis(vals, abs(vals).argmax(0)[None], 0).squeeze(0)
# Remove negative values.
vals = np.maximum(vals, 0)
# Normalize to max = 1 (unless everything is already zero).
max_val = vals.max()
if max_val > 0:
vals /= max_val
filtered_max = np.maximum(filtered_max, vals)
return filtered_max # Return pixel-wise max over all sigmas.
def sato(image, sigmas=range(1, 10, 2), black_ridges=True, mode='reflect', cval=0):
"""
Filter an image with the Sato tubeness filter.
This filter can be used to detect continuous ridges, e.g. tubes,
wrinkles, rivers. It can be used to calculate the fraction of the
whole image containing such objects.
Defined only for 2-D and 3-D images. Calculates the eigenvalues of the
Hessian to compute the similarity of an image region to tubes, according to
the method described in [1]_.
Parameters
----------
image : (M, N[, P]) ndarray
Array with input image data.
sigmas : iterable of floats, optional
Sigmas used as scales of filter.
black_ridges : bool, optional
When True (the default), the filter detects black ridges; when
False, it detects white ridges.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
out : (M, N[, P]) ndarray
Filtered image (maximum of pixels across all scales).
See also
--------
meijering
frangi
hessian
References
----------
.. [1] Sato, Y., Nakajima, S., Shiraga, N., Atsumi, H., Yoshida, S.,
Koller, T., ..., Kikinis, R. (1998). Three-dimensional multi-scale line
filter for segmentation and visualization of curvilinear structures in
medical images. Medical image analysis, 2(2), 143-168.
:DOI:`10.1016/S1361-8415(98)80009-1`
"""
check_nD(image, [2, 3]) # Check image dimensions.
image = image.astype(_supported_float_type(image.dtype), copy=False)
if not black_ridges: # Normalize to black ridges.
image = -image
# Generate empty array for storing maximum value
# from different (sigma) scales
filtered_max = np.zeros_like(image)
for sigma in sigmas: # Filter for all sigmas.
eigvals = hessian_matrix_eigvals(
hessian_matrix(
image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True
)
)
# Compute normalized tubeness (eqs. (9) and (22), ref. [1]_) as the
# geometric mean of eigvals other than the lowest one
# (hessian_matrix_eigvals returns eigvals in decreasing order), clipped
# to 0, multiplied by sigma^2.
eigvals = eigvals[:-1]
vals = sigma**2 * np.prod(np.maximum(eigvals, 0), 0) ** (1 / len(eigvals))
filtered_max = np.maximum(filtered_max, vals)
return filtered_max # Return pixel-wise max over all sigmas.
def frangi(
image,
sigmas=range(1, 10, 2),
scale_range=None,
scale_step=None,
alpha=0.5,
beta=0.5,
gamma=None,
black_ridges=True,
mode='reflect',
cval=0,
):
"""
Filter an image with the Frangi vesselness filter.
This filter can be used to detect continuous ridges, e.g. vessels,
wrinkles, rivers. It can be used to calculate the fraction of the
whole image containing such objects.
Defined only for 2-D and 3-D images. Calculates the eigenvalues of the
Hessian to compute the similarity of an image region to vessels, according
to the method described in [1]_.
Parameters
----------
image : (M, N[, P]) ndarray
Array with input image data.
sigmas : iterable of floats, optional
Sigmas used as scales of filter, i.e.,
np.arange(scale_range[0], scale_range[1], scale_step)
scale_range : 2-tuple of floats, optional
The range of sigmas used.
scale_step : float, optional
Step size between sigmas.
alpha : float, optional
Frangi correction constant that adjusts the filter's
sensitivity to deviation from a plate-like structure.
beta : float, optional
Frangi correction constant that adjusts the filter's
sensitivity to deviation from a blob-like structure.
gamma : float, optional
Frangi correction constant that adjusts the filter's
sensitivity to areas of high variance/texture/structure.
.. versionchanged:: 0.20
The default, None, uses half of the maximum Hessian norm.
black_ridges : bool, optional
When True (the default), the filter detects black ridges; when
False, it detects white ridges.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
out : (M, N[, P]) ndarray
Filtered image (maximum of pixels across all scales).
.. versionchanged:: 0.20
The implementation got rewritten and gives different output values wrt
the previous implementation (backwards incompatible change).
The filter is now set to zero whenever one of the Hessian eigenvalues
has a sign which is incompatible with a ridge of the desired polarity.
Notes
-----
Earlier versions of this filter were implemented by Marc Schrijver,
(November 2001), D. J. Kroon, University of Twente (May 2009) [2]_, and
D. G. Ellis (January 2017) [3]_.
See also
--------
meijering
sato
hessian
References
----------
.. [1] Frangi, A. F., Niessen, W. J., Vincken, K. L., & Viergever, M. A.
(1998,). Multiscale vessel enhancement filtering. In International
Conference on Medical Image Computing and Computer-Assisted
Intervention (pp. 130-137). Springer Berlin Heidelberg.
:DOI:`10.1007/BFb0056195`
.. [2] Kroon, D. J.: Hessian based Frangi vesselness filter.
.. [3] Ellis, D. G.: https://github.com/ellisdg/frangi3d/tree/master/frangi
"""
if scale_range is not None and scale_step is not None:
warn(
'Use keyword parameter `sigmas` instead of `scale_range` and '
'`scale_range` which will be removed in version 0.17.',
stacklevel=2,
)
sigmas = np.arange(scale_range[0], scale_range[1], scale_step)
check_nD(image, [2, 3]) # Check image dimensions.
image = image.astype(_supported_float_type(image.dtype), copy=False)
if not black_ridges: # Normalize to black ridges.
image = -image
# Generate empty array for storing maximum value
# from different (sigma) scales
filtered_max = np.zeros_like(image)
for sigma in sigmas: # Filter for all sigmas.
eigvals = hessian_matrix_eigvals(
hessian_matrix(
image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True
)
)
# Sort eigenvalues by magnitude.
eigvals = np.take_along_axis(eigvals, abs(eigvals).argsort(0), 0)
lambda1 = eigvals[0]
if image.ndim == 2:
(lambda2,) = np.maximum(eigvals[1:], 1e-10)
r_a = np.inf # implied by eq. (15).
r_b = abs(lambda1) / lambda2 # eq. (15).
else: # ndim == 3
lambda2, lambda3 = np.maximum(eigvals[1:], 1e-10)
r_a = lambda2 / lambda3 # eq. (11).
r_b = abs(lambda1) / np.sqrt(lambda2 * lambda3) # eq. (10).
s = np.sqrt((eigvals**2).sum(0)) # eq. (12).
if gamma is None:
gamma = s.max() / 2
if gamma == 0:
gamma = 1 # If s == 0 everywhere, gamma doesn't matter.
# Filtered image, eq. (13) and (15). Our implementation relies on the
# blobness exponential factor underflowing to zero whenever the second
# or third eigenvalues are negative (we clip them to 1e-10, to make r_b
# very large).
vals = 1.0 - np.exp(
-(r_a**2) / (2 * alpha**2), dtype=image.dtype
) # plate sensitivity
vals *= np.exp(-(r_b**2) / (2 * beta**2), dtype=image.dtype) # blobness
vals *= 1.0 - np.exp(
-(s**2) / (2 * gamma**2), dtype=image.dtype
) # structuredness
filtered_max = np.maximum(filtered_max, vals)
return filtered_max # Return pixel-wise max over all sigmas.
def hessian(
image,
sigmas=range(1, 10, 2),
scale_range=None,
scale_step=None,
alpha=0.5,
beta=0.5,
gamma=15,
black_ridges=True,
mode='reflect',
cval=0,
):
"""Filter an image with the Hybrid Hessian filter.
This filter can be used to detect continuous edges, e.g. vessels,
wrinkles, rivers. It can be used to calculate the fraction of the whole
image containing such objects.
Defined only for 2-D and 3-D images. Almost equal to Frangi filter, but
uses alternative method of smoothing. Refer to [1]_ to find the differences
between Frangi and Hessian filters.
Parameters
----------
image : (M, N[, P]) ndarray
Array with input image data.
sigmas : iterable of floats, optional
Sigmas used as scales of filter, i.e.,
np.arange(scale_range[0], scale_range[1], scale_step)
scale_range : 2-tuple of floats, optional
The range of sigmas used.
scale_step : float, optional
Step size between sigmas.
beta : float, optional
Frangi correction constant that adjusts the filter's
sensitivity to deviation from a blob-like structure.
gamma : float, optional
Frangi correction constant that adjusts the filter's
sensitivity to areas of high variance/texture/structure.
black_ridges : bool, optional
When True (the default), the filter detects black ridges; when
False, it detects white ridges.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
out : (M, N[, P]) ndarray
Filtered image (maximum of pixels across all scales).
Notes
-----
Written by Marc Schrijver (November 2001)
Re-Written by D. J. Kroon University of Twente (May 2009) [2]_
See also
--------
meijering
sato
frangi
References
----------
.. [1] Ng, C. C., Yap, M. H., Costen, N., & Li, B. (2014,). Automatic
wrinkle detection using hybrid Hessian filter. In Asian Conference on
Computer Vision (pp. 609-622). Springer International Publishing.
:DOI:`10.1007/978-3-319-16811-1_40`
.. [2] Kroon, D. J.: Hessian based Frangi vesselness filter.
"""
filtered = frangi(
image,
sigmas=sigmas,
scale_range=scale_range,
scale_step=scale_step,
alpha=alpha,
beta=beta,
gamma=gamma,
black_ridges=black_ridges,
mode=mode,
cval=cval,
)
filtered[filtered <= 0] = 1
return filtered
| of |
python | has2k1__plotnine | plotnine/scales/limits.py | {
"start": 3739,
"end": 3830
} | class ____(_lim):
"""
Linetype limits
"""
aesthetic = "linetype"
| linetypelim |
python | django__django | tests/model_fields/models.py | {
"start": 12308,
"end": 12605
} | class ____(models.Model):
value = models.JSONField(blank=True, null=True)
value_custom = models.JSONField(
encoder=DjangoJSONEncoder,
decoder=CustomJSONDecoder,
null=True,
)
class Meta:
required_db_features = {"supports_json_field"}
| NullableJSONModel |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 240649,
"end": 240932
} | class ____(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
| SendfileUsingSendfileTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/completion/fuzzy_completer.py | {
"start": 453,
"end": 6580
} | class ____(Completer):
"""
Fuzzy completion.
This wraps any other completer and turns it into a fuzzy completer.
If the list of words is: ["leopard" , "gorilla", "dinosaur", "cat", "bee"]
Then trying to complete "oar" would yield "leopard" and "dinosaur", but not
the others, because they match the regular expression 'o.*a.*r'.
Similar, in another application "djm" could expand to "django_migrations".
The results are sorted by relevance, which is defined as the start position
and the length of the match.
Notice that this is not really a tool to work around spelling mistakes,
like what would be possible with difflib. The purpose is rather to have a
quicker or more intuitive way to filter the given completions, especially
when many completions have a common prefix.
Fuzzy algorithm is based on this post:
https://blog.amjith.com/fuzzyfinder-in-10-lines-of-python
:param completer: A :class:`~.Completer` instance.
:param WORD: When True, use WORD characters.
:param pattern: Regex pattern which selects the characters before the
cursor that are considered for the fuzzy matching.
:param enable_fuzzy: (bool or `Filter`) Enabled the fuzzy behavior. For
easily turning fuzzyness on or off according to a certain condition.
"""
def __init__(
self,
completer: Completer,
WORD: bool = False,
pattern: str | None = None,
enable_fuzzy: FilterOrBool = True,
) -> None:
assert pattern is None or pattern.startswith("^")
self.completer = completer
self.pattern = pattern
self.WORD = WORD
self.pattern = pattern
self.enable_fuzzy = to_filter(enable_fuzzy)
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
if self.enable_fuzzy():
return self._get_fuzzy_completions(document, complete_event)
else:
return self.completer.get_completions(document, complete_event)
def _get_pattern(self) -> str:
if self.pattern:
return self.pattern
if self.WORD:
return r"[^\s]+"
return "^[a-zA-Z0-9_]*"
def _get_fuzzy_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
word_before_cursor = document.get_word_before_cursor(
pattern=re.compile(self._get_pattern())
)
# Get completions
document2 = Document(
text=document.text[: document.cursor_position - len(word_before_cursor)],
cursor_position=document.cursor_position - len(word_before_cursor),
)
inner_completions = list(
self.completer.get_completions(document2, complete_event)
)
fuzzy_matches: list[_FuzzyMatch] = []
if word_before_cursor == "":
# If word before the cursor is an empty string, consider all
# completions, without filtering everything with an empty regex
# pattern.
fuzzy_matches = [_FuzzyMatch(0, 0, compl) for compl in inner_completions]
else:
pat = ".*?".join(map(re.escape, word_before_cursor))
pat = f"(?=({pat}))" # lookahead regex to manage overlapping matches
regex = re.compile(pat, re.IGNORECASE)
for compl in inner_completions:
matches = list(regex.finditer(compl.text))
if matches:
# Prefer the match, closest to the left, then shortest.
best = min(matches, key=lambda m: (m.start(), len(m.group(1))))
fuzzy_matches.append(
_FuzzyMatch(len(best.group(1)), best.start(), compl)
)
def sort_key(fuzzy_match: _FuzzyMatch) -> tuple[int, int]:
"Sort by start position, then by the length of the match."
return fuzzy_match.start_pos, fuzzy_match.match_length
fuzzy_matches = sorted(fuzzy_matches, key=sort_key)
for match in fuzzy_matches:
# Include these completions, but set the correct `display`
# attribute and `start_position`.
yield Completion(
text=match.completion.text,
start_position=match.completion.start_position
- len(word_before_cursor),
# We access to private `_display_meta` attribute, because that one is lazy.
display_meta=match.completion._display_meta,
display=self._get_display(match, word_before_cursor),
style=match.completion.style,
)
def _get_display(
self, fuzzy_match: _FuzzyMatch, word_before_cursor: str
) -> AnyFormattedText:
"""
Generate formatted text for the display label.
"""
def get_display() -> AnyFormattedText:
m = fuzzy_match
word = m.completion.text
if m.match_length == 0:
# No highlighting when we have zero length matches (no input text).
# In this case, use the original display text (which can include
# additional styling or characters).
return m.completion.display
result: StyleAndTextTuples = []
# Text before match.
result.append(("class:fuzzymatch.outside", word[: m.start_pos]))
# The match itself.
characters = list(word_before_cursor)
for c in word[m.start_pos : m.start_pos + m.match_length]:
classname = "class:fuzzymatch.inside"
if characters and c.lower() == characters[0].lower():
classname += ".character"
del characters[0]
result.append((classname, c))
# Text after match.
result.append(
("class:fuzzymatch.outside", word[m.start_pos + m.match_length :])
)
return result
return get_display()
| FuzzyCompleter |
python | walkccc__LeetCode | solutions/230. Kth Smallest Element in a BST/230.py | {
"start": 0,
"end": 461
} | class ____:
def kthSmallest(self, root: TreeNode | None, k: int) -> int:
def countNodes(root: TreeNode | None) -> int:
if not root:
return 0
return 1 + countNodes(root.left) + countNodes(root.right)
leftCount = countNodes(root.left)
if leftCount == k - 1:
return root.val
if leftCount >= k:
return self.kthSmallest(root.left, k)
return self.kthSmallest(root.right, k - 1 - leftCount) # leftCount < k
| Solution |
python | aimacode__aima-python | utils4e.py | {
"start": 12877,
"end": 16107
} | class ____:
"""Dependency injection of temporary values for global functions/classes/etc.
E.g., `with injection(DataBase=MockDataBase): ...`"""
def __init__(self, **kwds):
self.new = kwds
def __enter__(self):
self.old = {v: globals()[v] for v in self.new}
globals().update(self.new)
def __exit__(self, type, value, traceback):
globals().update(self.old)
def memoize(fn, slot=None, maxsize=32):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, use lru_cache for caching the values."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
@functools.lru_cache(maxsize=maxsize)
def memoized_fn(*args):
return fn(*args)
return memoized_fn
def name(obj):
"""Try to find some reasonable name for the object."""
return (getattr(obj, 'name', 0) or getattr(obj, '__name__', 0) or
getattr(getattr(obj, '__class__', 0), '__name__', 0) or
str(obj))
def isnumber(x):
"""Is x a number?"""
return hasattr(x, '__int__')
def issequence(x):
"""Is x a sequence?"""
return isinstance(x, collections.abc.Sequence)
def print_table(table, header=None, sep=' ', numfmt='{}'):
"""Print a list of lists as a table, so that columns line up nicely.
header, if specified, will be printed as the first row.
numfmt is the format for all numbers; you might want e.g. '{:.2f}'.
(If you want different formats in different columns,
don't use print_table.) sep is the separator between columns."""
justs = ['rjust' if isnumber(x) else 'ljust' for x in table[0]]
if header:
table.insert(0, header)
table = [[numfmt.format(x) if isnumber(x) else x for x in row]
for row in table]
sizes = list(
map(lambda seq: max(map(len, seq)),
list(zip(*[map(str, row) for row in table]))))
for row in table:
print(sep.join(getattr(
str(x), j)(size) for (j, size, x) in zip(justs, sizes, row)))
def open_data(name, mode='r'):
aima_root = os.path.dirname(__file__)
aima_file = os.path.join(aima_root, *['aima-data', name])
return open(aima_file, mode=mode)
def failure_test(algorithm, tests):
"""Grades the given algorithm based on how many tests it passes.
Most algorithms have arbitrary output on correct execution, which is difficult
to check for correctness. On the other hand, a lot of algorithms output something
particular on fail (for example, False, or None).
tests is a list with each element in the form: (values, failure_output)."""
return mean(int(algorithm(x) != y) for x, y in tests)
# ______________________________________________________________________________
# Expressions
# See https://docs.python.org/3/reference/expressions.html#operator-precedence
# See https://docs.python.org/3/reference/datamodel.html#special-method-names
| injection |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/default.py | {
"start": 43375,
"end": 85931
} | class ____(ExecutionContext):
isinsert = False
isupdate = False
isdelete = False
is_crud = False
is_text = False
isddl = False
execute_style: ExecuteStyle = ExecuteStyle.EXECUTE
compiled: Optional[Compiled] = None
result_column_struct: Optional[
Tuple[List[ResultColumnsEntry], bool, bool, bool, bool]
] = None
returned_default_rows: Optional[Sequence[Row[Unpack[TupleAny]]]] = None
execution_options: _ExecuteOptions = util.EMPTY_DICT
cursor_fetch_strategy = _cursor._DEFAULT_FETCH
invoked_statement: Optional[Executable] = None
_is_implicit_returning = False
_is_explicit_returning = False
_is_supplemental_returning = False
_is_server_side = False
_soft_closed = False
_rowcount: Optional[int] = None
# a hook for SQLite's translation of
# result column names
# NOTE: pyhive is using this hook, can't remove it :(
_translate_colname: Optional[
Callable[[str], Tuple[str, Optional[str]]]
] = None
_expanded_parameters: Mapping[str, List[str]] = util.immutabledict()
"""used by set_input_sizes().
This collection comes from ``ExpandedState.parameter_expansion``.
"""
cache_hit = NO_CACHE_KEY
root_connection: Connection
_dbapi_connection: PoolProxiedConnection
dialect: Dialect
unicode_statement: str
cursor: DBAPICursor
compiled_parameters: List[_MutableCoreSingleExecuteParams]
parameters: _DBAPIMultiExecuteParams
extracted_parameters: Optional[Sequence[BindParameter[Any]]]
_empty_dict_params = cast("Mapping[str, Any]", util.EMPTY_DICT)
_insertmanyvalues_rows: Optional[List[Tuple[Any, ...]]] = None
_num_sentinel_cols: int = 0
@classmethod
def _init_ddl(
cls,
dialect: Dialect,
connection: Connection,
dbapi_connection: PoolProxiedConnection,
execution_options: _ExecuteOptions,
compiled_ddl: DDLCompiler,
) -> ExecutionContext:
"""Initialize execution context for an ExecutableDDLElement
construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.compiled = compiled = compiled_ddl
self.isddl = True
self.execution_options = execution_options
self.unicode_statement = str(compiled)
if compiled.schema_translate_map:
schema_translate_map = self.execution_options.get(
"schema_translate_map", {}
)
rst = compiled.preparer._render_schema_translates
self.unicode_statement = rst(
self.unicode_statement, schema_translate_map
)
self.statement = self.unicode_statement
self.cursor = self.create_cursor()
self.compiled_parameters = []
if dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [self._empty_dict_params]
return self
@classmethod
def _init_compiled(
cls,
dialect: Dialect,
connection: Connection,
dbapi_connection: PoolProxiedConnection,
execution_options: _ExecuteOptions,
compiled: SQLCompiler,
parameters: _CoreMultiExecuteParams,
invoked_statement: Executable,
extracted_parameters: Optional[Sequence[BindParameter[Any]]],
cache_hit: CacheStats = CacheStats.CACHING_DISABLED,
param_dict: _CoreSingleExecuteParams | None = None,
) -> ExecutionContext:
"""Initialize execution context for a Compiled construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.extracted_parameters = extracted_parameters
self.invoked_statement = invoked_statement
self.compiled = compiled
self.cache_hit = cache_hit
self.execution_options = execution_options
self.result_column_struct = (
compiled._result_columns,
compiled._ordered_columns,
compiled._textual_ordered_columns,
compiled._ad_hoc_textual,
compiled._loose_column_name_matching,
)
self.isinsert = ii = compiled.isinsert
self.isupdate = iu = compiled.isupdate
self.isdelete = id_ = compiled.isdelete
self.is_text = compiled.isplaintext
if ii or iu or id_:
dml_statement = compiled.compile_state.statement # type: ignore
if TYPE_CHECKING:
assert isinstance(dml_statement, UpdateBase)
self.is_crud = True
self._is_explicit_returning = ier = bool(dml_statement._returning)
self._is_implicit_returning = iir = bool(
compiled.implicit_returning
)
if iir and dml_statement._supplemental_returning:
self._is_supplemental_returning = True
# dont mix implicit and explicit returning
assert not (iir and ier)
if (ier or iir) and compiled.for_executemany:
if ii and not self.dialect.insert_executemany_returning:
raise exc.InvalidRequestError(
f"Dialect {self.dialect.dialect_description} with "
f"current server capabilities does not support "
"INSERT..RETURNING when executemany is used"
)
elif (
ii
and dml_statement._sort_by_parameter_order
and not self.dialect.insert_executemany_returning_sort_by_parameter_order # noqa: E501
):
raise exc.InvalidRequestError(
f"Dialect {self.dialect.dialect_description} with "
f"current server capabilities does not support "
"INSERT..RETURNING with deterministic row ordering "
"when executemany is used"
)
elif (
ii
and self.dialect.use_insertmanyvalues
and not compiled._insertmanyvalues
):
raise exc.InvalidRequestError(
'Statement does not have "insertmanyvalues" '
"enabled, can't use INSERT..RETURNING with "
"executemany in this case."
)
elif iu and not self.dialect.update_executemany_returning:
raise exc.InvalidRequestError(
f"Dialect {self.dialect.dialect_description} with "
f"current server capabilities does not support "
"UPDATE..RETURNING when executemany is used"
)
elif id_ and not self.dialect.delete_executemany_returning:
raise exc.InvalidRequestError(
f"Dialect {self.dialect.dialect_description} with "
f"current server capabilities does not support "
"DELETE..RETURNING when executemany is used"
)
if not parameters:
self.compiled_parameters = [
compiled.construct_params(
extracted_parameters=extracted_parameters,
escape_names=False,
_collected_params=param_dict,
)
]
else:
self.compiled_parameters = [
compiled.construct_params(
m,
escape_names=False,
_group_number=grp,
extracted_parameters=extracted_parameters,
_collected_params=param_dict,
)
for grp, m in enumerate(parameters)
]
if len(parameters) > 1:
if self.isinsert and compiled._insertmanyvalues:
self.execute_style = ExecuteStyle.INSERTMANYVALUES
imv = compiled._insertmanyvalues
if imv.sentinel_columns is not None:
self._num_sentinel_cols = imv.num_sentinel_columns
else:
self.execute_style = ExecuteStyle.EXECUTEMANY
self.unicode_statement = compiled.string
self.cursor = self.create_cursor()
if self.compiled.insert_prefetch or self.compiled.update_prefetch:
self._process_execute_defaults()
processors = compiled._bind_processors
flattened_processors: Mapping[
str, _BindProcessorType[Any]
] = processors # type: ignore[assignment]
if compiled.literal_execute_params or compiled.post_compile_params:
if self.executemany:
raise exc.InvalidRequestError(
"'literal_execute' or 'expanding' parameters can't be "
"used with executemany()"
)
expanded_state = compiled._process_parameters_for_postcompile(
self.compiled_parameters[0]
)
# re-assign self.unicode_statement
self.unicode_statement = expanded_state.statement
self._expanded_parameters = expanded_state.parameter_expansion
flattened_processors = dict(processors) # type: ignore
flattened_processors.update(expanded_state.processors)
positiontup = expanded_state.positiontup
elif compiled.positional:
positiontup = self.compiled.positiontup
else:
positiontup = None
if compiled.schema_translate_map:
schema_translate_map = self.execution_options.get(
"schema_translate_map", {}
)
rst = compiled.preparer._render_schema_translates
self.unicode_statement = rst(
self.unicode_statement, schema_translate_map
)
# final self.unicode_statement is now assigned, encode if needed
# by dialect
self.statement = self.unicode_statement
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
if compiled.positional:
core_positional_parameters: MutableSequence[Sequence[Any]] = []
assert positiontup is not None
for compiled_params in self.compiled_parameters:
l_param: List[Any] = [
(
flattened_processors[key](compiled_params[key])
if key in flattened_processors
else compiled_params[key]
)
for key in positiontup
]
core_positional_parameters.append(
dialect.execute_sequence_format(l_param)
)
self.parameters = core_positional_parameters
else:
core_dict_parameters: MutableSequence[Dict[str, Any]] = []
escaped_names = compiled.escaped_bind_names
# note that currently, "expanded" parameters will be present
# in self.compiled_parameters in their quoted form. This is
# slightly inconsistent with the approach taken as of
# #8056 where self.compiled_parameters is meant to contain unquoted
# param names.
d_param: Dict[str, Any]
for compiled_params in self.compiled_parameters:
if escaped_names:
d_param = {
escaped_names.get(key, key): (
flattened_processors[key](compiled_params[key])
if key in flattened_processors
else compiled_params[key]
)
for key in compiled_params
}
else:
d_param = {
key: (
flattened_processors[key](compiled_params[key])
if key in flattened_processors
else compiled_params[key]
)
for key in compiled_params
}
core_dict_parameters.append(d_param)
self.parameters = core_dict_parameters
return self
@classmethod
def _init_statement(
cls,
dialect: Dialect,
connection: Connection,
dbapi_connection: PoolProxiedConnection,
execution_options: _ExecuteOptions,
statement: str,
parameters: _DBAPIMultiExecuteParams,
) -> ExecutionContext:
"""Initialize execution context for a string SQL statement."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.is_text = True
self.execution_options = execution_options
if not parameters:
if self.dialect.positional:
self.parameters = [dialect.execute_sequence_format()]
else:
self.parameters = [self._empty_dict_params]
elif isinstance(parameters[0], dialect.execute_sequence_format):
self.parameters = parameters
elif isinstance(parameters[0], dict):
self.parameters = parameters
else:
self.parameters = [
dialect.execute_sequence_format(p) for p in parameters
]
if len(parameters) > 1:
self.execute_style = ExecuteStyle.EXECUTEMANY
self.statement = self.unicode_statement = statement
self.cursor = self.create_cursor()
return self
@classmethod
def _init_default(
cls,
dialect: Dialect,
connection: Connection,
dbapi_connection: PoolProxiedConnection,
execution_options: _ExecuteOptions,
) -> ExecutionContext:
"""Initialize execution context for a ColumnDefault construct."""
self = cls.__new__(cls)
self.root_connection = connection
self._dbapi_connection = dbapi_connection
self.dialect = connection.dialect
self.execution_options = execution_options
self.cursor = self.create_cursor()
return self
def _get_cache_stats(self) -> str:
if self.compiled is None:
return "raw sql"
now = perf_counter()
ch = self.cache_hit
gen_time = self.compiled._gen_time
assert gen_time is not None
if ch is NO_CACHE_KEY:
return "no key %.5fs" % (now - gen_time,)
elif ch is CACHE_HIT:
return "cached since %.4gs ago" % (now - gen_time,)
elif ch is CACHE_MISS:
return "generated in %.5fs" % (now - gen_time,)
elif ch is CACHING_DISABLED:
if "_cache_disable_reason" in self.execution_options:
return "caching disabled (%s) %.5fs " % (
self.execution_options["_cache_disable_reason"],
now - gen_time,
)
else:
return "caching disabled %.5fs" % (now - gen_time,)
elif ch is NO_DIALECT_SUPPORT:
return "dialect %s+%s does not support caching %.5fs" % (
self.dialect.name,
self.dialect.driver,
now - gen_time,
)
else:
return "unknown"
@property
def executemany(self): # type: ignore[override]
return self.execute_style in (
ExecuteStyle.EXECUTEMANY,
ExecuteStyle.INSERTMANYVALUES,
)
@util.memoized_property
def identifier_preparer(self):
if self.compiled:
return self.compiled.preparer
elif "schema_translate_map" in self.execution_options:
return self.dialect.identifier_preparer._with_schema_translate(
self.execution_options["schema_translate_map"]
)
else:
return self.dialect.identifier_preparer
@util.memoized_property
def engine(self):
return self.root_connection.engine
@util.memoized_property
def postfetch_cols(self) -> Optional[Sequence[Column[Any]]]:
if TYPE_CHECKING:
assert isinstance(self.compiled, SQLCompiler)
return self.compiled.postfetch
@util.memoized_property
def prefetch_cols(self) -> Optional[Sequence[Column[Any]]]:
if TYPE_CHECKING:
assert isinstance(self.compiled, SQLCompiler)
if self.isinsert:
return self.compiled.insert_prefetch
elif self.isupdate:
return self.compiled.update_prefetch
else:
return ()
@util.memoized_property
def no_parameters(self):
return self.execution_options.get("no_parameters", False)
def _execute_scalar(
self,
stmt: str,
type_: Optional[TypeEngine[Any]],
parameters: Optional[_DBAPISingleExecuteParams] = None,
) -> Any:
"""Execute a string statement on the current cursor, returning a
scalar result.
Used to fire off sequences, default phrases, and "select lastrowid"
types of statements individually or in the context of a parent INSERT
or UPDATE statement.
"""
conn = self.root_connection
if "schema_translate_map" in self.execution_options:
schema_translate_map = self.execution_options.get(
"schema_translate_map", {}
)
rst = self.identifier_preparer._render_schema_translates
stmt = rst(stmt, schema_translate_map)
if not parameters:
if self.dialect.positional:
parameters = self.dialect.execute_sequence_format()
else:
parameters = {}
conn._cursor_execute(self.cursor, stmt, parameters, context=self)
row = self.cursor.fetchone()
if row is not None:
r = row[0]
else:
r = None
if type_ is not None:
# apply type post processors to the result
proc = type_._cached_result_processor(
self.dialect, self.cursor.description[0][1]
)
if proc:
return proc(r)
return r
@util.memoized_property
def connection(self):
return self.root_connection
def _use_server_side_cursor(self):
if not self.dialect.supports_server_side_cursors:
return False
if self.dialect.server_side_cursors:
# this is deprecated
use_server_side = self.execution_options.get(
"stream_results", True
) and (
self.compiled
and isinstance(self.compiled.statement, expression.Selectable)
or (
(
not self.compiled
or isinstance(
self.compiled.statement, expression.TextClause
)
)
and self.unicode_statement
and SERVER_SIDE_CURSOR_RE.match(self.unicode_statement)
)
)
else:
use_server_side = self.execution_options.get(
"stream_results", False
)
return use_server_side
def create_cursor(self) -> DBAPICursor:
if (
# inlining initial preference checks for SS cursors
self.dialect.supports_server_side_cursors
and (
self.execution_options.get("stream_results", False)
or (
self.dialect.server_side_cursors
and self._use_server_side_cursor()
)
)
):
self._is_server_side = True
return self.create_server_side_cursor()
else:
self._is_server_side = False
return self.create_default_cursor()
def fetchall_for_returning(self, cursor):
return cursor.fetchall()
def create_default_cursor(self) -> DBAPICursor:
return self._dbapi_connection.cursor()
def create_server_side_cursor(self) -> DBAPICursor:
raise NotImplementedError()
def pre_exec(self):
pass
def get_out_parameter_values(self, names):
raise NotImplementedError(
"This dialect does not support OUT parameters"
)
def post_exec(self):
pass
def get_result_processor(
self, type_: TypeEngine[Any], colname: str, coltype: DBAPIType
) -> Optional[_ResultProcessorType[Any]]:
"""Return a 'result processor' for a given type as present in
cursor.description.
This has a default implementation that dialects can override
for context-sensitive result type handling.
"""
return type_._cached_result_processor(self.dialect, coltype)
def get_lastrowid(self) -> int:
"""return self.cursor.lastrowid, or equivalent, after an INSERT.
This may involve calling special cursor functions, issuing a new SELECT
on the cursor (or a new one), or returning a stored value that was
calculated within post_exec().
This function will only be called for dialects which support "implicit"
primary key generation, keep preexecute_autoincrement_sequences set to
False, and when no explicit id value was bound to the statement.
The function is called once for an INSERT statement that would need to
return the last inserted primary key for those dialects that make use
of the lastrowid concept. In these cases, it is called directly after
:meth:`.ExecutionContext.post_exec`.
"""
return self.cursor.lastrowid
def handle_dbapi_exception(self, e):
pass
@util.non_memoized_property
def rowcount(self) -> int:
if self._rowcount is not None:
return self._rowcount
else:
return self.cursor.rowcount
@property
def _has_rowcount(self):
return self._rowcount is not None
def supports_sane_rowcount(self):
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
return self.dialect.supports_sane_multi_rowcount
def _setup_result_proxy(self):
exec_opt = self.execution_options
if self._rowcount is None and exec_opt.get("preserve_rowcount", False):
self._rowcount = self.cursor.rowcount
yp: Optional[Union[int, bool]]
if self.is_crud or self.is_text:
result = self._setup_dml_or_text_result()
yp = False
else:
yp = exec_opt.get("yield_per", None)
sr = self._is_server_side or exec_opt.get("stream_results", False)
strategy = self.cursor_fetch_strategy
if sr and strategy is _cursor._DEFAULT_FETCH:
strategy = _cursor.BufferedRowCursorFetchStrategy(
self.cursor, self.execution_options
)
cursor_description: _DBAPICursorDescription = (
strategy.alternate_cursor_description
or self.cursor.description
)
if cursor_description is None:
strategy = _cursor._NO_CURSOR_DQL
result = _cursor.CursorResult(self, strategy, cursor_description)
compiled = self.compiled
if (
compiled
and not self.isddl
and cast(SQLCompiler, compiled).has_out_parameters
):
self._setup_out_parameters(result)
self._soft_closed = result._soft_closed
if yp:
result = result.yield_per(yp)
return result
def _setup_out_parameters(self, result):
compiled = cast(SQLCompiler, self.compiled)
out_bindparams = [
(param, name)
for param, name in compiled.bind_names.items()
if param.isoutparam
]
out_parameters = {}
for bindparam, raw_value in zip(
[param for param, name in out_bindparams],
self.get_out_parameter_values(
[name for param, name in out_bindparams]
),
):
type_ = bindparam.type
impl_type = type_.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(self.dialect.loaded_dbapi)
result_processor = impl_type.result_processor(
self.dialect, dbapi_type
)
if result_processor is not None:
raw_value = result_processor(raw_value)
out_parameters[bindparam.key] = raw_value
result.out_parameters = out_parameters
def _setup_dml_or_text_result(self):
compiled = cast(SQLCompiler, self.compiled)
strategy: ResultFetchStrategy = self.cursor_fetch_strategy
if self.isinsert:
if (
self.execute_style is ExecuteStyle.INSERTMANYVALUES
and compiled.effective_returning
):
strategy = _cursor.FullyBufferedCursorFetchStrategy(
self.cursor,
initial_buffer=self._insertmanyvalues_rows,
# maintain alt cursor description if set by the
# dialect, e.g. mssql preserves it
alternate_description=(
strategy.alternate_cursor_description
),
)
if compiled.postfetch_lastrowid:
self.inserted_primary_key_rows = (
self._setup_ins_pk_from_lastrowid()
)
# else if not self._is_implicit_returning,
# the default inserted_primary_key_rows accessor will
# return an "empty" primary key collection when accessed.
if self._is_server_side and strategy is _cursor._DEFAULT_FETCH:
strategy = _cursor.BufferedRowCursorFetchStrategy(
self.cursor, self.execution_options
)
if strategy is _cursor._NO_CURSOR_DML:
cursor_description = None
else:
cursor_description = (
strategy.alternate_cursor_description
or self.cursor.description
)
if cursor_description is None:
strategy = _cursor._NO_CURSOR_DML
elif self._num_sentinel_cols:
assert self.execute_style is ExecuteStyle.INSERTMANYVALUES
# the sentinel columns are handled in CursorResult._init_metadata
# using essentially _reduce
result: _cursor.CursorResult[Any] = _cursor.CursorResult(
self, strategy, cursor_description
)
if self.isinsert:
if self._is_implicit_returning:
rows = result.all()
self.returned_default_rows = rows
self.inserted_primary_key_rows = (
self._setup_ins_pk_from_implicit_returning(result, rows)
)
# test that it has a cursor metadata that is accurate. the
# first row will have been fetched and current assumptions
# are that the result has only one row, until executemany()
# support is added here.
assert result._metadata.returns_rows
# Insert statement has both return_defaults() and
# returning(). rewind the result on the list of rows
# we just used.
if self._is_supplemental_returning:
result._rewind(rows)
else:
result._soft_close()
elif not self._is_explicit_returning:
result._soft_close()
# we assume here the result does not return any rows.
# *usually*, this will be true. However, some dialects
# such as that of MSSQL/pyodbc need to SELECT a post fetch
# function so this is not necessarily true.
# assert not result.returns_rows
elif self._is_implicit_returning:
rows = result.all()
if rows:
self.returned_default_rows = rows
self._rowcount = len(rows)
if self._is_supplemental_returning:
result._rewind(rows)
else:
result._soft_close()
# test that it has a cursor metadata that is accurate.
# the rows have all been fetched however.
assert result._metadata.returns_rows
elif not result._metadata.returns_rows:
# no results, get rowcount
# (which requires open cursor on some drivers)
if self._rowcount is None:
self._rowcount = self.cursor.rowcount
result._soft_close()
elif self.isupdate or self.isdelete:
if self._rowcount is None:
self._rowcount = self.cursor.rowcount
return result
@util.memoized_property
def inserted_primary_key_rows(self):
# if no specific "get primary key" strategy was set up
# during execution, return a "default" primary key based
# on what's in the compiled_parameters and nothing else.
return self._setup_ins_pk_from_empty()
def _setup_ins_pk_from_lastrowid(self):
getter = cast(
SQLCompiler, self.compiled
)._inserted_primary_key_from_lastrowid_getter
lastrowid = self.get_lastrowid()
return [getter(lastrowid, self.compiled_parameters[0])]
def _setup_ins_pk_from_empty(self):
getter = cast(
SQLCompiler, self.compiled
)._inserted_primary_key_from_lastrowid_getter
return [getter(None, param) for param in self.compiled_parameters]
def _setup_ins_pk_from_implicit_returning(self, result, rows):
if not rows:
return []
getter = cast(
SQLCompiler, self.compiled
)._inserted_primary_key_from_returning_getter
compiled_params = self.compiled_parameters
return [
getter(row, param) for row, param in zip(rows, compiled_params)
]
def lastrow_has_defaults(self) -> bool:
return (self.isinsert or self.isupdate) and bool(
cast(SQLCompiler, self.compiled).postfetch
)
def _prepare_set_input_sizes(
self,
) -> Optional[List[Tuple[str, Any, TypeEngine[Any]]]]:
"""Given a cursor and ClauseParameters, prepare arguments
in order to call the appropriate
style of ``setinputsizes()`` on the cursor, using DB-API types
from the bind parameter's ``TypeEngine`` objects.
This method only called by those dialects which set the
:attr:`.Dialect.bind_typing` attribute to
:attr:`.BindTyping.SETINPUTSIZES`. Python-oracledb and cx_Oracle are
the only DBAPIs that requires setinputsizes(); pyodbc offers it as an
option.
Prior to SQLAlchemy 2.0, the setinputsizes() approach was also used
for pg8000 and asyncpg, which has been changed to inline rendering
of casts.
"""
if self.isddl or self.is_text:
return None
compiled = cast(SQLCompiler, self.compiled)
inputsizes = compiled._get_set_input_sizes_lookup()
if inputsizes is None:
return None
dialect = self.dialect
# all of the rest of this... cython?
if dialect._has_events:
inputsizes = dict(inputsizes)
dialect.dispatch.do_setinputsizes(
inputsizes, self.cursor, self.statement, self.parameters, self
)
if compiled.escaped_bind_names:
escaped_bind_names = compiled.escaped_bind_names
else:
escaped_bind_names = None
if dialect.positional:
items = [
(key, compiled.binds[key])
for key in compiled.positiontup or ()
]
else:
items = [
(key, bindparam)
for bindparam, key in compiled.bind_names.items()
]
generic_inputsizes: List[Tuple[str, Any, TypeEngine[Any]]] = []
for key, bindparam in items:
if bindparam in compiled.literal_execute_params:
continue
if key in self._expanded_parameters:
if is_tuple_type(bindparam.type):
num = len(bindparam.type.types)
dbtypes = inputsizes[bindparam]
generic_inputsizes.extend(
(
(
escaped_bind_names.get(paramname, paramname)
if escaped_bind_names is not None
else paramname
),
dbtypes[idx % num],
bindparam.type.types[idx % num],
)
for idx, paramname in enumerate(
self._expanded_parameters[key]
)
)
else:
dbtype = inputsizes.get(bindparam, None)
generic_inputsizes.extend(
(
(
escaped_bind_names.get(paramname, paramname)
if escaped_bind_names is not None
else paramname
),
dbtype,
bindparam.type,
)
for paramname in self._expanded_parameters[key]
)
else:
dbtype = inputsizes.get(bindparam, None)
escaped_name = (
escaped_bind_names.get(key, key)
if escaped_bind_names is not None
else key
)
generic_inputsizes.append(
(escaped_name, dbtype, bindparam.type)
)
return generic_inputsizes
def _exec_default(self, column, default, type_):
if default.is_sequence:
return self.fire_sequence(default, type_)
elif default.is_callable:
# this codepath is not normally used as it's inlined
# into _process_execute_defaults
self.current_column = column
return default.arg(self)
elif default.is_clause_element:
return self._exec_default_clause_element(column, default, type_)
else:
# this codepath is not normally used as it's inlined
# into _process_execute_defaults
return default.arg
def _exec_default_clause_element(self, column, default, type_):
# execute a default that's a complete clause element. Here, we have
# to re-implement a miniature version of the compile->parameters->
# cursor.execute() sequence, since we don't want to modify the state
# of the connection / result in progress or create new connection/
# result objects etc.
# .. versionchanged:: 1.4
if not default._arg_is_typed:
default_arg = expression.type_coerce(default.arg, type_)
else:
default_arg = default.arg
compiled = expression.select(default_arg).compile(dialect=self.dialect)
compiled_params = compiled.construct_params()
processors = compiled._bind_processors
if compiled.positional:
parameters = self.dialect.execute_sequence_format(
[
(
processors[key](compiled_params[key]) # type: ignore
if key in processors
else compiled_params[key]
)
for key in compiled.positiontup or ()
]
)
else:
parameters = {
key: (
processors[key](compiled_params[key]) # type: ignore
if key in processors
else compiled_params[key]
)
for key in compiled_params
}
return self._execute_scalar(
str(compiled), type_, parameters=parameters
)
current_parameters: Optional[_CoreSingleExecuteParams] = None
"""A dictionary of parameters applied to the current row.
This attribute is only available in the context of a user-defined default
generation function, e.g. as described at :ref:`context_default_functions`.
It consists of a dictionary which includes entries for each column/value
pair that is to be part of the INSERT or UPDATE statement. The keys of the
dictionary will be the key value of each :class:`_schema.Column`,
which is usually
synonymous with the name.
Note that the :attr:`.DefaultExecutionContext.current_parameters` attribute
does not accommodate for the "multi-values" feature of the
:meth:`_expression.Insert.values` method. The
:meth:`.DefaultExecutionContext.get_current_parameters` method should be
preferred.
.. seealso::
:meth:`.DefaultExecutionContext.get_current_parameters`
:ref:`context_default_functions`
"""
def get_current_parameters(self, isolate_multiinsert_groups=True):
"""Return a dictionary of parameters applied to the current row.
This method can only be used in the context of a user-defined default
generation function, e.g. as described at
:ref:`context_default_functions`. When invoked, a dictionary is
returned which includes entries for each column/value pair that is part
of the INSERT or UPDATE statement. The keys of the dictionary will be
the key value of each :class:`_schema.Column`,
which is usually synonymous
with the name.
:param isolate_multiinsert_groups=True: indicates that multi-valued
INSERT constructs created using :meth:`_expression.Insert.values`
should be
handled by returning only the subset of parameters that are local
to the current column default invocation. When ``False``, the
raw parameters of the statement are returned including the
naming convention used in the case of multi-valued INSERT.
.. seealso::
:attr:`.DefaultExecutionContext.current_parameters`
:ref:`context_default_functions`
"""
try:
parameters = self.current_parameters
column = self.current_column
except AttributeError:
raise exc.InvalidRequestError(
"get_current_parameters() can only be invoked in the "
"context of a Python side column default function"
)
else:
assert column is not None
assert parameters is not None
compile_state = cast(
"DMLState", cast(SQLCompiler, self.compiled).compile_state
)
assert compile_state is not None
if (
isolate_multiinsert_groups
and dml.isinsert(compile_state)
and compile_state._has_multi_parameters
):
if column._is_multiparam_column:
index = column.index + 1
d = {column.original.key: parameters[column.key]}
else:
d = {column.key: parameters[column.key]}
index = 0
assert compile_state._dict_parameters is not None
keys = compile_state._dict_parameters.keys()
d.update(
(key, parameters["%s_m%d" % (key, index)]) for key in keys
)
return d
else:
return parameters
def get_insert_default(self, column):
if column.default is None:
return None
else:
return self._exec_default(column, column.default, column.type)
def get_update_default(self, column):
if column.onupdate is None:
return None
else:
return self._exec_default(column, column.onupdate, column.type)
def _process_execute_defaults(self):
compiled = cast(SQLCompiler, self.compiled)
key_getter = compiled._within_exec_param_key_getter
sentinel_counter = 0
if compiled.insert_prefetch:
prefetch_recs = [
(
c,
key_getter(c),
c._default_description_tuple,
self.get_insert_default,
)
for c in compiled.insert_prefetch
]
elif compiled.update_prefetch:
prefetch_recs = [
(
c,
key_getter(c),
c._onupdate_description_tuple,
self.get_update_default,
)
for c in compiled.update_prefetch
]
else:
prefetch_recs = []
for param in self.compiled_parameters:
self.current_parameters = param
for (
c,
param_key,
(arg, is_scalar, is_callable, is_sentinel),
fallback,
) in prefetch_recs:
if is_sentinel:
param[param_key] = sentinel_counter
sentinel_counter += 1
elif is_scalar:
param[param_key] = arg
elif is_callable:
self.current_column = c
param[param_key] = arg(self)
else:
val = fallback(c)
if val is not None:
param[param_key] = val
del self.current_parameters
DefaultDialect.execution_ctx_cls = DefaultExecutionContext
| DefaultExecutionContext |
python | astropy__astropy | astropy/modeling/tests/test_fitters.py | {
"start": 26635,
"end": 27914
} | class ____:
def setup_class(self):
self.x = np.linspace(-5.0, 5.0, 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0] * np.exp(-0.5 * (x - p[1]) ** 2 / p[2] ** 2)
self.y = func(self.model_params, self.x)
@pytest.mark.filterwarnings("ignore:The fit may be unsuccessful")
@pytest.mark.filterwarnings(
r"ignore:Values in x were outside bounds during a minimize step, "
r"clipping to bounds"
)
@pytest.mark.parametrize("fitter", non_linear_fitters_bounds + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
y = self.y + (
np.random.normal(0.0, 0.2, self.x.shape)
+ c * np.random.normal(3.0, 5.0, self.x.shape)
)
g_init = models.Gaussian1D(amplitude=1.0, mean=0, stddev=1.0)
fit = FittingWithOutlierRemoval(fitter, sigma_clip, niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
| Test1DFittingWithOutlierRemoval |
python | keon__algorithms | algorithms/queues/priority_queue.py | {
"start": 332,
"end": 1790
} | class ____:
def __init__(self, items=None, priorities=None):
"""Create a priority queue with items (list or iterable).
If items is not passed, create empty priority queue."""
self.priority_queue_list = []
if items is None:
return
if priorities is None:
priorities = itertools.repeat(None)
for item, priority in zip(items, priorities):
self.push(item, priority=priority)
def __repr__(self):
return "PriorityQueue({!r})".format(self.priority_queue_list)
def size(self):
"""Return size of the priority queue.
"""
return len(self.priority_queue_list)
def push(self, item, priority=None):
"""Push the item in the priority queue.
if priority is not given, priority is set to the value of item.
"""
priority = item if priority is None else priority
node = PriorityQueueNode(item, priority)
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
self.priority_queue_list.insert(index, node)
return
# when traversed complete queue
self.priority_queue_list.append(node)
def pop(self):
"""Remove and return the item with the lowest priority.
"""
# remove and return the first node from the queue
return self.priority_queue_list.pop().data
| PriorityQueue |
python | numba__numba | numba/core/errors.py | {
"start": 20808,
"end": 21237
} | class ____(InternalError):
"""For signalling a target mismatch error occurred internally within the
compiler.
"""
def __init__(self, kind, target_hw, hw_clazz):
msg = (f"{kind.title()} being resolved on a target from which it does "
f"not inherit. Local target is {target_hw}, declared "
f"target class is {hw_clazz}.")
super().__init__(msg)
| InternalTargetMismatchError |
python | keras-team__keras | keras/src/layers/merging/average.py | {
"start": 166,
"end": 2214
} | class ____(Merge):
"""Averages a list of inputs element-wise..
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Average()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.average([x1, x2])`
>>> y = keras.layers.Average()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.add(output, inputs[i])
return output / len(inputs)
@keras_export("keras.layers.average")
def average(inputs, **kwargs):
"""Functional interface to the `keras.layers.Average` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the element-wise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.average([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.average([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Average(**kwargs)(inputs)
| Average |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ001.py | {
"start": 543,
"end": 959
} | class ____(DjangoModel):
charfield = DjangoModel.CharField(max_length=255, null=True)
textfield = SmthCharField(max_length=255, null=True)
slugfield = models.SlugField(max_length=255, null=True)
emailfield = models.EmailField(max_length=255, null=True)
filepathfield = models.FilePathField(max_length=255, null=True)
urlfield = models.URLField(max_length=255, null=True)
| IncorrectModelWithAlias |
python | pytorch__pytorch | tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py | {
"start": 1290,
"end": 3711
} | class ____(CoveragePlugin): # type: ignore[misc, no-any-unimported]
"""
dynamic_context is an overridden function that gives us access to every frame run during the coverage process. We
look for when the function being run is `should_drop`, as all functions that get passed into `should_drop` will be
compiled and thus should be marked as covered.
"""
def dynamic_context(self, frame: Any) -> None:
if frame.f_code.co_name == "should_drop":
obj = frame.f_locals["fn"]
# The many conditions in the if statement below are based on the accepted arguments to getsourcefile. Based
# on its documentation (https://docs.python.org/3/library/inspect.html#inspect.getsourcefile), the argument
# must be a module, class, method, function, traceback, frame, or code object AND it cannot be a built-in
# module, class, or function.
# Currently, we DO NOT include tracebacks or frames as they should not be JIT'd, and we have not checked for
# built-in modules or functions as those do not seem to be JIT'd either.
if (
is_not_builtin_class(obj)
or ismodule(obj)
or ismethod(obj)
or isfunction(obj)
or iscode(obj)
):
filename = getsourcefile(obj)
# We don't want to report for filename = None
if filename:
# TODO: Because torch.jit._IgnoreContextManager relies on Python's `exec` method
# which doesn't generate source codelines, getsourcelines(obj) fails. For now,
# we just ignore the exception until we figure out a better way to
# implement torch.jit._IgnoreContextManager.
try:
sourcelines, starting_lineno = getsourcelines(obj)
except OSError:
pass
else:
line_data = {
filename: range(
starting_lineno, starting_lineno + len(sourcelines)
)
}
cov_data.add_lines(line_data)
super().dynamic_context(frame)
def coverage_init(reg: Any, options: Any) -> None:
reg.add_dynamic_context(JitPlugin())
| JitPlugin |
python | pyca__cryptography | src/cryptography/hazmat/primitives/ciphers/algorithms.py | {
"start": 1605,
"end": 2697
} | class ____(BlockCipherAlgorithm):
name = "AES"
block_size = 128
key_sizes = frozenset([256])
key_size = 256
def __init__(self, key: utils.Buffer):
self.key = _verify_key_size(self, key)
utils.deprecated(
Camellia,
__name__,
"Camellia has been moved to "
"cryptography.hazmat.decrepit.ciphers.algorithms.Camellia and "
"will be removed from "
"cryptography.hazmat.primitives.ciphers.algorithms in 49.0.0.",
utils.DeprecatedIn43,
name="Camellia",
)
utils.deprecated(
ARC4,
__name__,
"ARC4 has been moved to "
"cryptography.hazmat.decrepit.ciphers.algorithms.ARC4 and "
"will be removed from "
"cryptography.hazmat.primitives.ciphers.algorithms in 48.0.0.",
utils.DeprecatedIn43,
name="ARC4",
)
utils.deprecated(
TripleDES,
__name__,
"TripleDES has been moved to "
"cryptography.hazmat.decrepit.ciphers.algorithms.TripleDES and "
"will be removed from "
"cryptography.hazmat.primitives.ciphers.algorithms in 48.0.0.",
utils.DeprecatedIn43,
name="TripleDES",
)
| AES256 |
python | rapidsai__cudf | python/cudf/cudf/core/dtypes.py | {
"start": 30467,
"end": 30682
} | class ____(DecimalDtype):
name = "decimal64"
MAX_PRECISION = np.floor(np.log10(np.iinfo("int64").max))
ITEMSIZE = 8
@doc_apply(
decimal_dtype_template.format(
size="128",
)
)
| Decimal64Dtype |
python | TheAlgorithms__Python | data_structures/linked_list/deque_doubly.py | {
"start": 214,
"end": 1649
} | class ____:
"""A Private class (to be inherited)"""
class _Node:
__slots__ = "_data", "_next", "_prev"
def __init__(self, link_p, element, link_n):
self._prev = link_p
self._data = element
self._next = link_n
def has_next_and_prev(self):
return (
f" Prev -> {self._prev is not None}, Next -> {self._next is not None}"
)
def __init__(self):
self._header = self._Node(None, None, None)
self._trailer = self._Node(None, None, None)
self._header._next = self._trailer
self._trailer._prev = self._header
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self.__len__() == 0
def _insert(self, predecessor, e, successor):
# Create new_node by setting it's prev.link -> header
# setting it's next.link -> trailer
new_node = self._Node(predecessor, e, successor)
predecessor._next = new_node
successor._prev = new_node
self._size += 1
return self
def _delete(self, node):
predecessor = node._prev
successor = node._next
predecessor._next = successor
successor._prev = predecessor
self._size -= 1
temp = node._data
node._prev = node._next = node._data = None
del node
return temp
| _DoublyLinkedBase |
python | modin-project__modin | modin/pandas/window.py | {
"start": 8574,
"end": 11442
} | class ____(Rolling):
def __init__(self, groupby_obj, *args, **kwargs):
self._as_index = groupby_obj._kwargs.get("as_index", True)
self._groupby_obj = (
groupby_obj if self._as_index else groupby_obj._override(as_index=True)
)
super().__init__(self._groupby_obj._df, *args, **kwargs)
def sem(self, *args, **kwargs):
ErrorMessage.mismatch_with_pandas(
operation="RollingGroupby.sem() when 'as_index=False'",
message=(
"The group columns won't be involved in the aggregation.\n"
+ "See this gh-issue for more information: https://github.com/modin-project/modin/issues/6291"
),
)
return super().sem(*args, **kwargs)
def corr(self, other=None, pairwise=None, *args, **kwargs):
# pandas behavior is that it always assumes that 'as_index=True' for the '.corr()' method
return super().corr(
*args, as_index=True, other=other, pairwise=pairwise, **kwargs
)
def cov(self, other=None, pairwise=None, ddof: Optional[int] = 1, **kwargs):
# pandas behavior is that it always assumes that 'as_index=True' for the '.cov()' method
return super().cov(as_index=True, other=other, pairwise=pairwise, **kwargs)
def _aggregate(self, method_name, *args, as_index=None, **kwargs):
"""
Run the specified rolling aggregation.
Parameters
----------
method_name : str
Name of the aggregation.
*args : tuple
Positional arguments to pass to the aggregation.
as_index : bool, optional
Whether the result should have the group labels as index levels or as columns.
If not specified the parameter value will be taken from groupby kwargs.
**kwargs : dict
Keyword arguments to pass to the aggregation.
Returns
-------
DataFrame or Series
Result of the aggregation.
"""
res = self._groupby_obj._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_rolling,
numeric_only=False,
agg_args=args,
agg_kwargs=kwargs,
agg_func=method_name,
rolling_kwargs=self.rolling_kwargs,
)
if as_index is None:
as_index = self._as_index
if not as_index:
res = res.reset_index(
level=[i for i in range(len(self._groupby_obj._internal_by))],
drop=False,
)
return res
def _call_qc_method(self, method_name, *args, **kwargs):
return self._aggregate(method_name, *args, **kwargs)._query_compiler
@_inherit_docstrings(
pandas.core.window.expanding.Expanding,
excluded=[pandas.core.window.expanding.Expanding.__init__],
)
| RollingGroupby |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol22.py | {
"start": 737,
"end": 942
} | class ____(Protocol[_T1_contra, _T2_contra]):
def m1(self, a: _T1_contra | _T2_contra) -> None: ...
# This is right, as `_T1` and `_T2` are both covariant with the
# argument type and the return type.
| P2 |
python | keras-team__keras | keras/src/wrappers/sklearn_wrapper.py | {
"start": 10743,
"end": 13986
} | class ____(RegressorMixin, SKLBase):
"""scikit-learn compatible regressor wrapper for Keras models.
Note that there are sources of randomness in model initialization and
training. Refer to [Reproducibility in Keras Models](
https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to
control randomness.
Args:
model: `Model`.
An instance of `Model`, or a callable returning such an object.
Note that if input is a `Model`, it will be cloned using
`keras.models.clone_model` before being fitted, unless
`warm_start=True`.
The `Model` instance needs to be passed as already compiled.
If callable, it must accept at least `X` and `y` as keyword
arguments. Other arguments must be accepted if passed as
`model_kwargs` by the user.
warm_start: bool, defaults to `False`.
Whether to reuse the model weights from the previous fit. If `True`,
the given model won't be cloned and the weights from the previous
fit will be reused.
model_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model`, if `model` is callable.
fit_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model.fit`. These can also be passed
directly to the `fit` method of the scikit-learn wrapper. The
values passed directly to the `fit` method take precedence over
these.
Attributes:
model_ : `Model`
The fitted model.
Example:
Here we use a function which creates a basic MLP model dynamically
choosing the input and output shapes. We will use this to create our
scikit-learn model.
``` python
from keras.layers import Dense, Input
from keras.models import Model
def dynamic_model(X, y, loss, layers=[10]):
# Creates a basic MLP model dynamically choosing the input and
# output shapes.
n_features_in = X.shape[1]
inp = Input(shape=(n_features_in,))
hidden = inp
for layer_size in layers:
hidden = Dense(layer_size, activation="relu")(hidden)
n_outputs = y.shape[1] if len(y.shape) > 1 else 1
out = Dense(n_outputs)(hidden)
model = Model(inp, out)
model.compile(loss=loss, optimizer="rmsprop")
return model
```
You can then use this function to create a scikit-learn compatible model
and fit it on some data.
``` python
from sklearn.datasets import make_regression
from keras.wrappers import SKLearnRegressor
X, y = make_regression(n_samples=1000, n_features=10)
est = SKLearnRegressor(
model=dynamic_model,
model_kwargs={
"loss": "mse",
"layers": [20, 20, 20],
},
)
est.fit(X, y, epochs=5)
```
"""
def _more_tags(self):
# required to be compatible with scikit-learn<1.6
return {"poor_score": True}
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.regressor_tags.poor_score = True
return tags
@keras_export("keras.wrappers.SKLearnTransformer")
| SKLearnRegressor |
python | dateutil__dateutil | src/dateutil/tz/tz.py | {
"start": 10266,
"end": 10691
} | class ____(object):
"""
Lightweight class for holding the relevant transition and time zone
information read from binary tzfiles.
"""
attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list',
'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
def __init__(self, **kwargs):
for attr in self.attrs:
setattr(self, attr, kwargs.get(attr, None))
| _tzfile |
python | pypa__warehouse | tests/unit/organizations/test_models.py | {
"start": 20373,
"end": 24802
} | class ____:
def test_is_active_future_expiration(self, db_session):
# Freeze time to a known date
with freeze_time("2024-01-15"):
# Create activation that expires in the future
activation = DBOrganizationManualActivationFactory.create(
expires=datetime.date(2024, 12, 31)
)
assert activation.is_active
def test_is_active_past_expiration(self, db_session):
# Freeze time to a known date
with freeze_time("2024-01-15"):
# Create activation that already expired
activation = DBOrganizationManualActivationFactory.create(
expires=datetime.date(2023, 12, 31)
)
assert not activation.is_active
def test_current_member_count(self, db_session):
organization = DBOrganizationFactory.create()
activation = DBOrganizationManualActivationFactory.create(
organization=organization, seat_limit=10
)
# Create some organization roles (members)
for _ in range(3):
user = DBUserFactory.create()
DBOrganizationRoleFactory.create(
organization=organization,
user=user,
role_name=OrganizationRoleType.Member,
)
assert activation.current_member_count == 3
def test_has_available_seats_with_space(self, db_session):
organization = DBOrganizationFactory.create()
activation = DBOrganizationManualActivationFactory.create(
organization=organization, seat_limit=10
)
# Create some organization roles (members)
for _ in range(5):
user = DBUserFactory.create()
DBOrganizationRoleFactory.create(
organization=organization,
user=user,
role_name=OrganizationRoleType.Member,
)
assert activation.has_available_seats
def test_has_available_seats_at_limit(self, db_session):
organization = DBOrganizationFactory.create()
activation = DBOrganizationManualActivationFactory.create(
organization=organization, seat_limit=5
)
# Create organization roles up to the limit
for _ in range(5):
user = DBUserFactory.create()
DBOrganizationRoleFactory.create(
organization=organization,
user=user,
role_name=OrganizationRoleType.Member,
)
assert not activation.has_available_seats
def test_has_available_seats_over_limit(self, db_session):
organization = DBOrganizationFactory.create()
activation = DBOrganizationManualActivationFactory.create(
organization=organization, seat_limit=3
)
# Create more organization roles than the limit allows
for _ in range(5):
user = DBUserFactory.create()
DBOrganizationRoleFactory.create(
organization=organization,
user=user,
role_name=OrganizationRoleType.Member,
)
assert not activation.has_available_seats
def test_available_seats(self, db_session):
organization = DBOrganizationFactory.create()
activation = DBOrganizationManualActivationFactory.create(
organization=organization, seat_limit=10
)
# Create some organization roles (members)
for _ in range(3):
user = DBUserFactory.create()
DBOrganizationRoleFactory.create(
organization=organization,
user=user,
role_name=OrganizationRoleType.Member,
)
assert activation.available_seats == 7 # 10 - 3
def test_available_seats_negative(self, db_session):
organization = DBOrganizationFactory.create()
activation = DBOrganizationManualActivationFactory.create(
organization=organization, seat_limit=3
)
# Create more organization roles than the limit
for _ in range(5):
user = DBUserFactory.create()
DBOrganizationRoleFactory.create(
organization=organization,
user=user,
role_name=OrganizationRoleType.Member,
)
assert activation.available_seats == 0 # Should never be negative
| TestOrganizationManualActivation |
python | bokeh__bokeh | tests/unit/bokeh/command/subcommands/test_serve.py | {
"start": 2880,
"end": 20419
} | class ____:
def test_create(self) -> None:
from bokeh.command.subcommand import Subcommand
obj = bcss.Serve(parser=argparse.ArgumentParser())
assert isinstance(obj, Subcommand)
def test_default_customize_applications_is_identity(self):
obj = bcss.Serve(parser=argparse.ArgumentParser())
apps = {}
result = obj.customize_applications(argparse.Namespace(), apps)
assert result == apps
assert result is not apps
def test_default_customize_kwargs_is_identity(self):
obj = bcss.Serve(parser=argparse.ArgumentParser())
kws = {}
result = obj.customize_kwargs(argparse.Namespace(), kws)
assert result == kws
assert result is not kws
def test_loglevels() -> None:
assert bcss.LOGLEVELS == ('trace', 'debug', 'info', 'warning', 'error', 'critical')
def test_name() -> None:
assert bcss.Serve.name == "serve"
def test_help() -> None:
assert bcss.Serve.help == "Run a Bokeh server hosting one or more applications"
def test_args() -> None:
from bokeh.util.strings import nice_join
assert bcss.Serve.args == (
('--port', Argument(
metavar = 'PORT',
type = int,
help = "Port to listen on",
default = DEFAULT_SERVER_PORT,
)),
('--address', Argument(
metavar = 'ADDRESS',
type = str,
help = "Address to listen on",
default = None,
)),
('--unix-socket', Argument(
metavar = 'UNIX-SOCKET',
type = str,
help = "Unix socket to bind. Network options such as port, address, ssl options are incompatible with unix socket",
default = None,
)),
('--log-level', Argument(
metavar = 'LOG-LEVEL',
action = 'store',
default = None,
choices = (*bcss.LOGLEVELS, 'None'),
help = f"One of: {nice_join(bcss.LOGLEVELS)}",
)),
('--log-format', Argument(
metavar ='LOG-FORMAT',
action = 'store',
default = bcss.DEFAULT_LOG_FORMAT,
help = f"A standard Python logging format string (default: {bcss.DEFAULT_LOG_FORMAT!r})".replace("%", "%%"),
)),
('--log-file', Argument(
metavar ='LOG-FILE',
action = 'store',
default = None,
help = "A filename to write logs to, or None to write to the standard stream (default: None)",
)),
('--use-config', Argument(
metavar = 'CONFIG',
type = str,
help = "Use a YAML config file for settings",
default = None,
)),
('files', Argument(
metavar = 'DIRECTORY-OR-SCRIPT',
nargs = '*',
help = "The app directories or scripts to serve (serve empty document if not specified)",
default = None,
)),
('--args', Argument(
metavar = 'COMMAND-LINE-ARGS',
nargs = argparse.REMAINDER,
help = "Command line arguments remaining to passed on to the application handler. "
"NOTE: if this argument precedes DIRECTORY-OR-SCRIPT then some other argument, e.g. "
"--show, must be placed before the directory or script. ",
)),
('--dev', Argument(
metavar ='FILES-TO-WATCH',
action ='store',
default = None,
type = str,
nargs = '*',
help = "Enable live reloading during app development. "
"By default it watches all *.py *.html *.css *.yaml files "
"in the app directory tree. Additional files can be passed "
"as arguments. "
"NOTE: if this argument precedes DIRECTORY-OR-SCRIPT then some other argument, e.g "
"--show, must be placed before the directory or script. "
"NOTE: This setting only works with a single app. "
"It also restricts the number of processes to 1. "
"NOTE FOR WINDOWS USERS : this option must be invoked using "
"'python -m bokeh'. If not Tornado will fail to restart the "
"server",
)),
('--show', Argument(
action = 'store_true',
help = "Open server app(s) in a browser",
)),
('--allow-websocket-origin', Argument(
metavar = 'HOST[:PORT]',
action = 'append',
type = str,
help = "Public hostnames which may connect to the Bokeh websocket "
"With unix socket, the websocket origin restrictions should be enforced by the proxy.",
)),
('--prefix', Argument(
metavar = 'PREFIX',
type = str,
help = "URL prefix for Bokeh server URLs",
default = None,
)),
('--ico-path', Argument(
metavar = "ICO_PATH",
type = str,
help = "Path to a .ico file to use as the favicon.ico, or 'none' to "
"disable favicon.ico support. If unset, a default Bokeh .ico "
"file will be used",
default = None,
)),
('--keep-alive', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How often to send a keep-alive ping to clients, 0 to disable.",
default = None,
)),
('--check-unused-sessions', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How often to check for unused sessions",
default = None,
)),
('--unused-session-lifetime', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How long unused sessions last",
default = None,
)),
('--stats-log-frequency', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How often to log stats",
default = None,
)),
('--mem-log-frequency', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How often to log memory usage information",
default = None,
)),
('--use-xheaders', Argument(
action = 'store_true',
help = "Prefer X-headers for IP/protocol information",
)),
('--ssl-certfile', Argument(
metavar = 'CERTFILE',
action = 'store',
default = None,
help = 'Absolute path to a certificate file for SSL termination',
)),
('--ssl-keyfile', Argument(
metavar = 'KEYFILE',
action = 'store',
default = None,
help = 'Absolute path to a private key file for SSL termination',
)),
('--session-ids', Argument(
metavar = 'MODE',
action = 'store',
default = None,
choices = bcss.SESSION_ID_MODES,
help = f"One of: {nice_join(bcss.SESSION_ID_MODES)}",
)),
('--auth-module', Argument(
metavar = 'AUTH_MODULE',
action = 'store',
default = None,
help = 'Absolute path to a Python module that implements auth hooks',
)),
('--enable-xsrf-cookies', Argument(
action = 'store_true',
default = False,
help = 'Whether to enable Tornado support for XSRF cookies. All '
'PUT, POST, or DELETE handlers must be properly instrumented '
'when this setting is enabled.',
)),
('--exclude-headers', Argument(
action = 'store',
default = None,
nargs='+',
help = 'A list of request headers to exclude from the session '
'context (by default all headers are included).',
)),
('--exclude-cookies', Argument(
action = 'store',
default = None,
nargs='+',
help = 'A list of request cookies to exclude from the session '
'context (by default all cookies are included).',
)),
('--include-headers', Argument(
action = 'store',
default = None,
nargs='+',
help = 'A list of request headers to make available in the session '
'context (by default all headers are included).',
)),
('--include-cookies', Argument(
action = 'store',
default = None,
nargs='+',
help = 'A list of request cookies to make available in the session '
'context (by default all cookies are included).',
)),
('--cookie-secret', Argument(
metavar = 'COOKIE_SECRET',
action = 'store',
default = None,
help = 'Configure to enable getting/setting secure cookies',
)),
('--index', Argument(
metavar = 'INDEX',
action = 'store',
default = None,
help = 'Path to a template to use for the site index',
)),
('--disable-index', Argument(
action = 'store_true',
help = 'Do not use the default index on the root path',
)),
('--disable-index-redirect', Argument(
action = 'store_true',
help = 'Do not redirect to running app from root path',
)),
('--num-procs', Argument(
metavar = 'N',
action = 'store',
help = "Number of worker processes for an app. Using "
"0 will autodetect number of cores (defaults to 1)",
default = 1,
type =int,
)),
('--session-token-expiration', Argument(
metavar = 'N',
action = 'store',
help = "Duration in seconds that a new session token "
"is valid for session creation. After the expiry "
"time has elapsed, the token will not be able "
"create a new session (defaults to seconds).",
default = 300,
type = int,
)),
('--websocket-max-message-size', Argument(
metavar = 'BYTES',
action = 'store',
help = "Set the Tornado websocket_max_message_size value "
"(default: 20MB)",
default = 20*1024*1024,
type = int,
)),
('--websocket-compression-level', Argument(
metavar = 'LEVEL',
action = 'store',
help = "Set the Tornado WebSocket compression_level",
default = None,
type = int,
)),
('--websocket-compression-mem-level', Argument(
metavar = 'LEVEL',
action = 'store',
help = "Set the Tornado WebSocket compression mem_level",
default = None,
type = int,
)),
('--glob', Argument(
action='store_true',
help='Process all filename arguments as globs',
)),
)
@contextlib.contextmanager
def run_bokeh_serve(args):
cmd = [sys.executable, '-m', 'bokeh', 'serve', *args]
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False) as p:
nbsr = NBSR(p.stdout)
try:
yield p, nbsr
except Exception as e:
p.terminate()
p.wait()
print("An error occurred: %s", e)
try:
out = p.stdout.read().decode()
print("\n---- subprocess stdout follows ----\n")
print(out)
except Exception:
pass
raise
else:
p.terminate()
p.wait()
def assert_pattern(nbsr, pat):
m = None
for i in range(20):
o = nbsr.readline(0.5)
if not o:
continue
m = pat.search(o.decode())
if m is not None:
break
if m is None:
pytest.fail("Did not find pattern in process output")
def check_port(nbsr):
m = None
for i in range(20):
o = nbsr.readline(0.5)
if not o:
continue
m = PORT_PAT.search(o.decode())
if m is not None:
break
if m is None:
pytest.fail("Did not find port in process output")
return int(m.group(1))
def check_error(args):
cmd = [sys.executable, '-m', 'bokeh', 'serve', *args]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
assert e.returncode == 1
out = e.output.decode()
else:
pytest.fail(f"command {cmd} unexpected successful")
return out
@pytest.mark.skipif(sys.platform != "win32", reason="Unix sockets not available on windows")
def test_unix_socket_on_windows() -> None:
unix_socket = "test.sock"
out = check_error(["--unix-socket", unix_socket]).strip()
expected = "ERROR: Unix sockets are not supported on windows."
assert expected in out
def test_unix_socket_with_port() -> None:
unix_socket = "test.sock"
out = check_error(["--unix-socket", unix_socket, "--port", "5000"]).strip()
expected = "ERROR: --port arg is not supported with a unix socket"
assert expected == out
def test_unix_socket_with_invalid_args() -> None:
invalid_args = ['address', 'ssl-certfile', 'ssl-keyfile']
for arg in invalid_args:
unix_socket = "test.sock"
out = check_error(["--unix-socket", unix_socket, f"--{arg}", "value"]).strip()
expected = "ERROR: ['address', 'ssl_certfile', 'ssl_keyfile', 'port'] args are not supported with a unix socket"
assert expected == out
def test_dev_with_no_app() -> None:
out = check_error(["--dev"]).strip()
expected = "ERROR: Bokeh server --dev option requires an app script or directory be provided"
assert expected == out
def test_dev_with_multiple_apps() -> None:
out = check_error(["--glob", APPS, "--dev"]).strip()
expected = "ERROR: Bokeh server --dev option can only support a single app"
assert expected == out
@pytest.mark.skipif(sys.platform == "win32", reason="Unix sockets not available on windows")
def test_unix_socket() -> None:
requests = pytest.importorskip("requests")
requests_unixsocket = pytest.importorskip("requests_unixsocket")
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:
file_name = "test.socket"
if os.path.exists(file_name):
os.remove(file_name)
sock.bind(file_name)
with run_bokeh_serve(["--unix-socket", file_name, "--glob", APPS]):
# The server is not ready is binds to the unix socket
# very quickly, having some sleep helps
with requests_unixsocket.monkeypatch():
for t in range(1, 11):
time.sleep(1)
try:
r = requests.get(f"http+unix://{file_name.replace('/', '%2F')}/line_on_off")
assert r.status_code == 200
break
except Exception:
if t == 10:
assert False
pass
os.remove(file_name)
def test_host_not_available() -> None:
host = "8.8.8.8"
out = check_error(["--address", host])
expected = f"Cannot start Bokeh server, address {host!r} not available"
assert expected in out
def test_port_not_available() -> None:
sock = socket.socket()
try:
sock.bind(('0.0.0.0', 0))
port = sock.getsockname()[1]
out = check_error(["--port", str(port)])
expected = f"Cannot start Bokeh server, port {port} is already in use"
assert expected in out
finally:
sock.close()
def test_no_glob_by_default_on_filename_if_wildcard_in_quotes() -> None:
out = check_error([APPS])
expected = "ERROR: Path for Bokeh server application does not exist:"
assert expected in out
assert '*' in out
def test_glob_flag_on_filename_if_wildcard_in_quotes() -> None:
requests = pytest.importorskip("requests")
pat = re.compile(r'Bokeh app running at: http://localhost:(\d+)/line_on_off')
with run_bokeh_serve(["--port", "0", "--glob", APPS]) as (_, nbsr):
port = check_port(nbsr)
assert port > 0
assert_pattern(nbsr, pat)
r = requests.get(f"http://localhost:{port}/apply_theme")
assert r.status_code == 200
def test_actual_port_printed_out() -> None:
requests = pytest.importorskip("requests")
with run_bokeh_serve(["--port", "0"]) as (_, nbsr):
port = check_port(nbsr)
assert port > 0
r = requests.get(f"http://localhost:{port}/")
assert r.status_code == 200
def test_websocket_max_message_size_printed_out() -> None:
pat = re.compile(r'Torndado websocket_max_message_size set to 12345')
with run_bokeh_serve(["--websocket-max-message-size", "12345"]) as (_, nbsr):
assert_pattern(nbsr, pat)
| TestServe |
python | doocs__leetcode | solution/0800-0899/0855.Exam Room/Solution.py | {
"start": 0,
"end": 1091
} | class ____:
def __init__(self, n: int):
def dist(x):
l, r = x
return r - l - 1 if l == -1 or r == n else (r - l) >> 1
self.n = n
self.ts = SortedList(key=lambda x: (-dist(x), x[0]))
self.left = {}
self.right = {}
self.add((-1, n))
def seat(self) -> int:
s = self.ts[0]
p = (s[0] + s[1]) >> 1
if s[0] == -1:
p = 0
elif s[1] == self.n:
p = self.n - 1
self.delete(s)
self.add((s[0], p))
self.add((p, s[1]))
return p
def leave(self, p: int) -> None:
l, r = self.left[p], self.right[p]
self.delete((l, p))
self.delete((p, r))
self.add((l, r))
def add(self, s):
self.ts.add(s)
self.left[s[1]] = s[0]
self.right[s[0]] = s[1]
def delete(self, s):
self.ts.remove(s)
self.left.pop(s[1])
self.right.pop(s[0])
# Your ExamRoom object will be instantiated and called as such:
# obj = ExamRoom(n)
# param_1 = obj.seat()
# obj.leave(p)
| ExamRoom |
python | scrapy__scrapy | tests/test_pipeline_media.py | {
"start": 893,
"end": 1341
} | class ____(MediaPipeline):
def media_to_download(self, request, info, *, item=None):
pass
def get_media_requests(self, item, info):
pass
def media_downloaded(self, response, request, info, *, item=None):
return {}
def media_failed(self, failure, request, info):
failure.raiseException()
def file_path(self, request, response=None, info=None, *, item=None):
return ""
| UserDefinedPipeline |
python | spyder-ide__spyder | spyder/plugins/layout/layouts.py | {
"start": 3514,
"end": 4689
} | class ____(BaseGridLayoutType):
ID = DefaultLayouts.RLayout
def __init__(self, parent_plugin):
super().__init__(parent_plugin)
self.add_area(
[Plugins.Editor],
row=0,
column=0,
)
self.add_area(
[Plugins.IPythonConsole, Plugins.Console],
row=1,
column=0,
hidden_plugin_ids=[Plugins.Console]
)
self.add_area(
[
Plugins.VariableExplorer,
Plugins.Debugger,
Plugins.Profiler,
Plugins.Plots,
Plugins.History,
Plugins.OutlineExplorer,
Plugins.Find,
],
row=0,
column=1,
default=True,
hidden_plugin_ids=[Plugins.OutlineExplorer, Plugins.Find]
)
self.add_area(
[Plugins.Explorer, Plugins.Projects, Plugins.Help,
Plugins.OnlineHelp],
row=1,
column=1,
hidden_plugin_ids=[Plugins.Projects, Plugins.OnlineHelp]
)
def get_name(self):
return _("Rstudio layout")
| RLayout |
python | wandb__wandb | wandb/sdk/data_types/trace_tree.py | {
"start": 4620,
"end": 5036
} | class ____:
"""Descriptor for accessing and setting attributes of the `Trace` class."""
def __set_name__(self, owner: type, name: str) -> None:
self.name = name
def __get__(self, instance: "Trace", owner: type) -> Any:
return getattr(instance._span, self.name)
def __set__(self, instance: "Trace", value: Any) -> None:
setattr(instance._span, self.name, value)
| TraceAttribute |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/middleware/anthropic_tools.py | {
"start": 1485,
"end": 2487
} | class ____(TypedDict):
"""Data structure for storing file contents."""
content: list[str]
"""Lines of the file."""
created_at: str
"""ISO 8601 timestamp of file creation."""
modified_at: str
"""ISO 8601 timestamp of last modification."""
def files_reducer(
left: dict[str, FileData] | None, right: dict[str, FileData | None]
) -> dict[str, FileData]:
"""Custom reducer that merges file updates.
Args:
left: Existing files dict.
right: New files dict to merge (`None` values delete files).
Returns:
Merged `dict` where right overwrites left for matching keys.
"""
if left is None:
# Filter out None values when initializing
return {k: v for k, v in right.items() if v is not None}
# Merge, filtering out None values (deletions)
result = {**left}
for k, v in right.items():
if v is None:
result.pop(k, None)
else:
result[k] = v
return result
| FileData |
python | wireservice__csvkit | csvkit/utilities/in2csv.py | {
"start": 451,
"end": 9517
} | class ____(CSVKitUtility):
description = 'Convert common, but less awesome, tabular data formats to CSV.'
epilog = 'Some command-line flags only pertain to specific input formats.'
# The utility handles the input file.
override_flags = ['f']
def add_arguments(self):
self.argparser.add_argument(
metavar='FILE', nargs='?', dest='input_path',
help='The CSV file to operate on. If omitted, will accept input as piped data via STDIN.')
self.argparser.add_argument(
'-f', '--format', dest='filetype', choices=SUPPORTED_FORMATS,
help='The format of the input file. If not specified will be inferred from the file type.')
self.argparser.add_argument(
'-s', '--schema', dest='schema',
help='Specify a CSV-formatted schema file for converting fixed-width files. See web documentation.')
self.argparser.add_argument(
'-k', '--key', dest='key',
help='Specify a top-level key to look within for a list of objects to be converted when processing JSON.')
self.argparser.add_argument(
'-n', '--names', dest='names_only', action='store_true',
help='Display sheet names from the input Excel file.')
self.argparser.add_argument(
'--sheet', dest='sheet',
help='The name of the Excel sheet to operate on.')
self.argparser.add_argument(
'--write-sheets', dest='write_sheets',
help='The names of the Excel sheets to write to files, or "-" to write all sheets.')
self.argparser.add_argument(
'--use-sheet-names', dest='use_sheet_names', action='store_true',
help='Use the sheet names as file names when --write-sheets is set.')
self.argparser.add_argument(
'--reset-dimensions', dest='reset_dimensions', action='store_true', default=None,
help='Ignore the sheet dimensions provided by the XLSX file.')
self.argparser.add_argument(
'--encoding-xls', dest='encoding_xls',
help='Specify the encoding of the input XLS file.')
self.argparser.add_argument(
'-y', '--snifflimit', dest='sniff_limit', type=int, default=1024,
help='Limit CSV dialect sniffing to the specified number of bytes. '
'Specify "0" to disable sniffing entirely, or "-1" to sniff the entire file.')
self.argparser.add_argument(
'-I', '--no-inference', dest='no_inference', action='store_true',
help='Disable type inference (and --locale, --date-format, --datetime-format, --no-leading-zeroes) '
'when parsing CSV input.')
# This is called only from open_excel_input_file(), but is a separate method to use caching.
@functools.lru_cache
def stdin(self):
return sys.stdin.buffer.read()
def open_excel_input_file(self, path):
if not path or path == '-':
return BytesIO(self.stdin())
return open(path, 'rb')
def sheet_names(self, path, filetype):
input_file = self.open_excel_input_file(path)
if filetype == 'xls':
sheet_names = xlrd.open_workbook(file_contents=input_file.read()).sheet_names()
else: # 'xlsx'
sheet_names = openpyxl.load_workbook(input_file, read_only=True, data_only=True).sheetnames
input_file.close()
return sheet_names
def main(self):
path = self.args.input_path
# Determine the file type.
if self.args.filetype:
filetype = self.args.filetype
elif self.args.schema:
filetype = 'fixed'
elif self.args.key:
filetype = 'json'
else:
if not path or path == '-':
self.argparser.error('You must specify a format when providing input as piped data via STDIN.')
filetype = convert.guess_format(path)
if not filetype:
self.argparser.error('Unable to automatically determine the format of the input file. Try specifying '
'a format with --format.')
if self.args.names_only:
if filetype in ('xls', 'xlsx'):
sheets = self.sheet_names(path, filetype)
for sheet in sheets:
self.output_file.write(f'{sheet}\n')
else:
self.argparser.error('You cannot use the -n or --names options with non-Excel files.')
return
# Set the input file.
if filetype in ('xls', 'xlsx'):
self.input_file = self.open_excel_input_file(path)
else:
self.input_file = self._open_input_file(path)
# Set the reader's arguments.
kwargs = {}
sniff_limit = self.args.sniff_limit if self.args.sniff_limit != -1 else None
if self.args.schema:
schema = self._open_input_file(self.args.schema)
elif filetype == 'fixed':
raise ValueError('schema must not be null when format is "fixed"')
if filetype == 'csv':
kwargs.update(self.reader_kwargs)
kwargs['sniff_limit'] = sniff_limit
if filetype in ('xls', 'xlsx'):
kwargs['header'] = not self.args.no_header_row
if filetype not in ('dbf', 'geojson', 'json', 'ndjson'): # csv, fixed, xls, xlsx
kwargs['skip_lines'] = self.args.skip_lines
if filetype != 'dbf':
kwargs['column_types'] = self.get_column_types()
# Convert the file.
if (
filetype == 'csv'
and self.args.no_inference
and not self.args.no_header_row
and not self.args.skip_lines
and sniff_limit == 0
):
reader = agate.csv.reader(self.input_file, **self.reader_kwargs)
writer = agate.csv.writer(self.output_file, **self.writer_kwargs)
writer.writerows(reader)
elif filetype == 'fixed':
self.output_file.write(fixed2csv(self.input_file, schema, output=self.output_file, **kwargs))
elif filetype == 'geojson':
self.output_file.write(geojson2csv(self.input_file, **kwargs))
elif filetype in ('csv', 'dbf', 'json', 'ndjson', 'xls', 'xlsx'):
if filetype == 'csv':
table = agate.Table.from_csv(self.input_file, **kwargs)
elif filetype == 'json':
table = agate.Table.from_json(self.input_file, key=self.args.key, **kwargs)
elif filetype == 'ndjson':
table = agate.Table.from_json(self.input_file, key=self.args.key, newline=True, **kwargs)
elif filetype == 'xls':
table = agate.Table.from_xls(self.input_file, sheet=self.args.sheet,
encoding_override=self.args.encoding_xls, **kwargs)
elif filetype == 'xlsx':
table = agate.Table.from_xlsx(
self.input_file, sheet=self.args.sheet, reset_dimensions=self.args.reset_dimensions, **kwargs
)
elif filetype == 'dbf':
if not hasattr(self.input_file, 'name'):
raise ValueError('DBF files can not be converted from stdin. You must pass a filename.')
table = agate.Table.from_dbf(self.input_file.name, **kwargs)
table.to_csv(self.output_file, **self.writer_kwargs)
if self.args.write_sheets:
# Close and re-open the file, as the file object has been mutated or closed.
self.input_file.close()
self.input_file = self.open_excel_input_file(path)
if self.args.write_sheets == '-':
sheets = self.sheet_names(path, filetype)
else:
sheets = [int(sheet) if sheet.isdigit() else sheet for sheet in self.args.write_sheets.split(',')]
if filetype == 'xls':
tables = agate.Table.from_xls(self.input_file, sheet=sheets,
encoding_override=self.args.encoding_xls, **kwargs)
elif filetype == 'xlsx':
tables = agate.Table.from_xlsx(
self.input_file, sheet=sheets, reset_dimensions=self.args.reset_dimensions, **kwargs
)
if not path or path == '-':
base = 'stdin'
else:
base = splitext(self.input_file.name)[0]
for i, (sheet_name, table) in enumerate(tables.items()):
if self.args.use_sheet_names:
filename = '%s_%s.csv' % (base, sheet_name)
else:
filename = '%s_%d.csv' % (base, i)
with open(filename, 'w') as f:
table.to_csv(f, **self.writer_kwargs)
self.input_file.close()
if self.args.schema:
schema.close()
def launch_new_instance():
utility = In2CSV()
utility.run()
if __name__ == '__main__':
launch_new_instance()
| In2CSV |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_bigquery.py | {
"start": 22121,
"end": 26493
} | class ____:
def test_interval_check_trigger_serialization(self, interval_check_trigger):
"""
Asserts that the BigQueryIntervalCheckTrigger correctly serializes its arguments and classpath.
"""
classpath, kwargs = interval_check_trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.bigquery.BigQueryIntervalCheckTrigger"
assert kwargs == {
"conn_id": TEST_CONN_ID,
"impersonation_chain": TEST_IMPERSONATION_CHAIN,
"first_job_id": TEST_FIRST_JOB_ID,
"second_job_id": TEST_SECOND_JOB_ID,
"project_id": TEST_GCP_PROJECT_ID,
"table": TEST_TABLE_ID,
"location": None,
"metrics_thresholds": TEST_METRIC_THRESHOLDS,
"date_filter_column": TEST_DATE_FILTER_COLUMN,
"days_back": TEST_DAYS_BACK,
"ratio_formula": TEST_RATIO_FORMULA,
"ignore_zero": TEST_IGNORE_ZERO,
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_output")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_records")
async def test_interval_check_trigger_success(
self, mock_get_records, mock_get_job_output, mock_job_status, interval_check_trigger
):
"""
Tests the BigQueryIntervalCheckTrigger only fires once the query execution reaches a successful state.
"""
mock_get_records.return_value = {}
mock_job_status.return_value = {"status": "success", "message": "Job completed"}
mock_get_job_output.return_value = ["0"]
generator = interval_check_trigger.run()
actual = await generator.asend(None)
assert actual == TriggerEvent({"status": "error", "message": "The second SQL query returned None"})
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_interval_check_trigger_pending(self, mock_job_status, caplog, interval_check_trigger):
"""
Tests that the BigQueryIntervalCheckTrigger do not fire while a query is still running.
"""
mock_job_status.return_value = {"status": "pending", "message": "Job pending"}
caplog.set_level(logging.INFO)
task = asyncio.create_task(interval_check_trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
assert "Query is still running..." in caplog.text
assert f"Sleeping for {POLLING_PERIOD_SECONDS} seconds." in caplog.text
# Prevents error when task is destroyed while in "pending" state
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_interval_check_trigger_terminated(self, mock_job_status, interval_check_trigger):
"""Tests the BigQueryIntervalCheckTrigger fires the correct event in case of an error."""
# Set the status to a value other than success or pending
mock_job_status.return_value = {
"status": "error",
"message": "The conn_id `bq_default` isn't defined",
}
generator = interval_check_trigger.run()
actual = await generator.asend(None)
assert (
TriggerEvent(
{"status": "error", "message": "The conn_id `bq_default` isn't defined", "data": None}
)
== actual
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryAsyncHook.get_job_status")
async def test_interval_check_trigger_exception(self, mock_job_status, caplog, interval_check_trigger):
"""Tests that the BigQueryIntervalCheckTrigger fires the correct event in case of an error."""
mock_job_status.side_effect = Exception("Test exception")
caplog.set_level(logging.DEBUG)
generator = interval_check_trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "error", "message": "Test exception"}) == actual
| TestBigQueryIntervalCheckTrigger |
python | numba__llvmlite | llvmlite/tests/test_refprune.py | {
"start": 1254,
"end": 5679
} | class ____(TestCase, PassManagerMixin):
"""
Test that the C++ implementation matches the expected behavior as for
the prototype.
This generates a LLVM module for each test case, runs the pruner and checks
that the expected results are achieved.
"""
def make_incref(self, m):
fnty = ir.FunctionType(ir.VoidType(), [ptr_ty])
return ir.Function(m, fnty, name='NRT_incref')
def make_decref(self, m):
fnty = ir.FunctionType(ir.VoidType(), [ptr_ty])
return ir.Function(m, fnty, name='NRT_decref')
def make_switcher(self, m):
fnty = ir.FunctionType(ir.IntType(32), ())
return ir.Function(m, fnty, name='switcher')
def make_brancher(self, m):
fnty = ir.FunctionType(ir.IntType(1), ())
return ir.Function(m, fnty, name='brancher')
def generate_ir(self, nodes, edges):
# Build LLVM module for the CFG
m = ir.Module()
incref_fn = self.make_incref(m)
decref_fn = self.make_decref(m)
switcher_fn = self.make_switcher(m)
brancher_fn = self.make_brancher(m)
fnty = ir.FunctionType(ir.VoidType(), [ptr_ty])
fn = ir.Function(m, fnty, name='main')
[ptr] = fn.args
ptr.name = 'mem'
# populate the BB nodes
bbmap = {}
for bb in edges:
bbmap[bb] = fn.append_basic_block(bb)
# populate the BB
builder = ir.IRBuilder()
for bb, jump_targets in edges.items():
builder.position_at_end(bbmap[bb])
# Insert increfs and decrefs
for action in nodes[bb]:
if action == 'incref':
builder.call(incref_fn, [ptr])
elif action == 'decref':
builder.call(decref_fn, [ptr])
else:
raise AssertionError('unreachable')
# Insert the terminator.
# Switch base on the number of jump targets.
n_targets = len(jump_targets)
if n_targets == 0:
builder.ret_void()
elif n_targets == 1:
[dst] = jump_targets
builder.branch(bbmap[dst])
elif n_targets == 2:
[left, right] = jump_targets
sel = builder.call(brancher_fn, ())
builder.cbranch(sel, bbmap[left], bbmap[right])
elif n_targets > 2:
sel = builder.call(switcher_fn, ())
[head, *tail] = jump_targets
sw = builder.switch(sel, default=bbmap[head])
for i, dst in enumerate(tail):
sw.add_case(sel.type(i), bbmap[dst])
else:
raise AssertionError('unreachable')
return m
def apply_refprune(self, irmod):
mod = llvm.parse_assembly(str(irmod))
pb = self.pb()
pm = pb.getModulePassManager()
pm.add_refprune_pass()
pm.run(mod, pb)
return mod
def check(self, mod, expected, nodes):
# preprocess incref/decref locations
# LLVM >= 18 adds an extra empty block "LoopExit" which causes
# regular dict to throw KeyError
d = defaultdict(lambda: defaultdict(int))
for k, vs in nodes.items():
n_incref = vs.count('incref')
n_decref = vs.count('decref')
d[k] = {'incref': n_incref, 'decref': n_decref}
for k, stats in d.items():
if expected.get(k):
stats['incref'] -= 1
for dec_bb in expected[k]:
d[dec_bb]['decref'] -= 1
# find the main function
for f in mod.functions:
if f.name == 'main':
break
# check each BB
for bb in f.blocks:
stats = d[bb.name]
text = str(bb)
n_incref = text.count('NRT_incref')
n_decref = text.count('NRT_decref')
self.assertEqual(stats['incref'], n_incref, msg=f'BB {bb}')
self.assertEqual(stats['decref'], n_decref, msg=f'BB {bb}')
def generate_test(self, case_gen):
nodes, edges, expected = case_gen()
irmod = self.generate_ir(nodes, edges)
outmod = self.apply_refprune(irmod)
self.check(outmod, expected, nodes)
# Generate tests
for name, case in _iterate_cases(generate_test):
locals()[name] = case
| TestRefPrunePass |
python | Pylons__pyramid | tests/test_scripts/dummy.py | {
"start": 962,
"end": 1121
} | class ____:
def __init__(self, *routes):
self.routes = routes
def get_routes(self, include_static=False):
return self.routes
| DummyMapper |
python | doocs__leetcode | solution/3500-3599/3568.Minimum Moves to Clean the Classroom/Solution.py | {
"start": 0,
"end": 1680
} | class ____:
def minMoves(self, classroom: List[str], energy: int) -> int:
m, n = len(classroom), len(classroom[0])
d = [[0] * n for _ in range(m)]
x = y = cnt = 0
for i, row in enumerate(classroom):
for j, c in enumerate(row):
if c == "S":
x, y = i, j
elif c == "L":
d[i][j] = cnt
cnt += 1
if cnt == 0:
return 0
vis = [
[[[False] * (1 << cnt) for _ in range(energy + 1)] for _ in range(n)]
for _ in range(m)
]
q = [(x, y, energy, (1 << cnt) - 1)]
vis[x][y][energy][(1 << cnt) - 1] = True
dirs = (-1, 0, 1, 0, -1)
ans = 0
while q:
t = q
q = []
for i, j, cur_energy, mask in t:
if mask == 0:
return ans
if cur_energy <= 0:
continue
for k in range(4):
x, y = i + dirs[k], j + dirs[k + 1]
if 0 <= x < m and 0 <= y < n and classroom[x][y] != "X":
nxt_energy = (
energy if classroom[x][y] == "R" else cur_energy - 1
)
nxt_mask = mask
if classroom[x][y] == "L":
nxt_mask &= ~(1 << d[x][y])
if not vis[x][y][nxt_energy][nxt_mask]:
vis[x][y][nxt_energy][nxt_mask] = True
q.append((x, y, nxt_energy, nxt_mask))
ans += 1
return -1
| Solution |
python | nedbat__coveragepy | coverage/files.py | {
"start": 6480,
"end": 7688
} | class ____:
"""A matcher for files in a tree.
Construct with a list of paths, either files or directories. Paths match
with the `match` method if they are one of the files, or if they are
somewhere in a subtree rooted at one of the directories.
"""
def __init__(self, paths: Iterable[str], name: str = "unknown") -> None:
self.original_paths: list[str] = human_sorted(paths)
self.paths = [os.path.normcase(p) for p in paths]
self.name = name
def __repr__(self) -> str:
return f"<TreeMatcher {self.name} {self.original_paths!r}>"
def info(self) -> list[str]:
"""A list of strings for displaying when dumping state."""
return self.original_paths
def match(self, fpath: str) -> bool:
"""Does `fpath` indicate a file in one of our trees?"""
fpath = os.path.normcase(fpath)
for p in self.paths:
if fpath.startswith(p):
if fpath == p:
# This is the same file!
return True
if fpath[len(p)] == os.sep:
# This is a file in the directory
return True
return False
| TreeMatcher |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_v2_test.py | {
"start": 191345,
"end": 193554
} | class ____(lite_v2_test_util.ModelTest):
def _run(self, experimental_preserve_all_tensors):
@tf.function
def f(x):
y = tf.add(x, x, name='y')
z = tf.add(y, y, name='z')
w = tf.add(z, z, name='w')
return w
# NOTE this is exactly representable as a float as are the intermediates of
# f. So direct comparison is ok below.
input_data = np.array(2.0, np.float32)
concrete_func = f.get_concrete_function(input_data)
converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func], f
)
tflite_model = converter.convert()
interp = interpreter.Interpreter(
model_content=tflite_model,
experimental_preserve_all_tensors=experimental_preserve_all_tensors,
)
interp.allocate_tensors()
interp.set_tensor(interp.get_input_details()[0]['index'], input_data)
interp.invoke()
out = interp.get_tensor(interp.get_output_details()[0]['index'])
tensors = {}
for t in interp.get_tensor_details():
# With Tensorflow Lite default delegate applied to the model graph, the
# access to original tensors of a delegated op could cause a ValueError
# (i.e. 'Tensor data is null. Run allocate_tensors() first') to be thrown
# out because the tensor memory isn't allocated at all.
val = None
try:
val = interp.get_tensor(t['index'])
except ValueError:
pass
tensors.update({t['name']: val})
return (tensors, out)
def testPreserve(self):
tensors, result = self._run(experimental_preserve_all_tensors=True)
# All intermediates should be true and result be true.
self.assertAllClose(tensors['x'], 2.0)
self.assertAllClose(tensors['y'], 4.0)
self.assertAllClose(tensors['z'], 8.0)
self.assertAllClose(result, 16.0)
def testNoPreserve(self):
tensors, result = self._run(experimental_preserve_all_tensors=False)
# One of them should be wrong if preserve is not true, but result should be
# ok. Input should still be ok for repeated invocation.
self.assertAllClose(tensors['x'], 2.0)
self.assertTrue(tensors['y'] != 4.0 or tensors['z'] != 8.0)
self.assertAllClose(result, 16.0)
| IntermediatesTest |
python | kamyu104__LeetCode-Solutions | Python/remove-invalid-parentheses.py | {
"start": 151,
"end": 2306
} | class ____(object):
def removeInvalidParentheses(self, s):
"""
:type s: str
:rtype: List[str]
"""
# Calculate the minimum left and right parantheses to remove
def findMinRemove(s):
left_removed, right_removed = 0, 0
for c in s:
if c == '(':
left_removed += 1
elif c == ')':
if not left_removed:
right_removed += 1
else:
left_removed -= 1
return (left_removed, right_removed)
# Check whether s is valid or not.
def isValid(s):
sum = 0
for c in s:
if c == '(':
sum += 1
elif c == ')':
sum -= 1
if sum < 0:
return False
return sum == 0
def removeInvalidParenthesesHelper(start, left_removed, right_removed):
if left_removed == 0 and right_removed == 0:
tmp = ""
for i, c in enumerate(s):
if i not in removed:
tmp += c
if isValid(tmp):
res.append(tmp)
return
for i in xrange(start, len(s)):
if right_removed == 0 and left_removed > 0 and s[i] == '(':
if i == start or s[i] != s[i - 1]: # Skip duplicated.
removed[i] = True
removeInvalidParenthesesHelper(i + 1, left_removed - 1, right_removed)
del removed[i]
elif right_removed > 0 and s[i] == ')':
if i == start or s[i] != s[i - 1]: # Skip duplicated.
removed[i] = True
removeInvalidParenthesesHelper(i + 1, left_removed, right_removed - 1)
del removed[i]
res, removed = [], {}
(left_removed, right_removed) = findMinRemove(s)
removeInvalidParenthesesHelper(0, left_removed, right_removed)
return res
| Solution |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/schedules/schedules.py | {
"start": 9911,
"end": 10096
} | class ____(graphene.Union):
class Meta:
types = (GrapheneSchedule, GrapheneScheduleNotFoundError, GraphenePythonError)
name = "ScheduleOrError"
| GrapheneScheduleOrError |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-weaviate/unit_tests/indexer_test.py | {
"start": 506,
"end": 12731
} | class ____(unittest.TestCase):
def setUp(self):
self.config = WeaviateIndexingConfigModel(
host="https://test-host:12345", auth=TokenAuth(mode="token", token="abc")
) # Setup your config here
self.indexer = WeaviateIndexer(self.config)
mock_catalog = Mock()
mock_stream = Mock()
mock_stream.stream.name = "test"
mock_stream.destination_sync_mode = DestinationSyncMode.append
self.mock_stream = mock_stream
mock_catalog.streams = [mock_stream]
self.mock_catalog = mock_catalog
@patch("destination_weaviate.indexer.weaviate.Client")
def test_successful_check(self, MockClient):
self.assertIsNone(self.indexer.check())
@patch("destination_weaviate.indexer.weaviate.Client")
def test_failed_check_due_to_exception(self, MockClient):
MockClient.side_effect = Exception("Random exception")
self.assertIsNotNone(self.indexer.check())
@patch("destination_weaviate.indexer.os.environ")
def test_failed_check_due_to_cloud_env_and_no_https_host(self, mock_os_environ):
mock_os_environ.get.return_value = "cloud"
self.indexer.config.host = "http://example.com"
self.assertEqual(self.indexer.check(), "Host must start with https:// and authentication must be enabled on cloud deployment.")
@patch("destination_weaviate.indexer.os.environ")
def test_failed_check_due_to_cloud_env_and_no_auth(self, mock_os_environ):
mock_os_environ.get.return_value = "cloud"
self.indexer.config.host = "http://example.com"
self.indexer.config.auth = NoAuth(mode="no_auth")
self.assertEqual(self.indexer.check(), "Host must start with https:// and authentication must be enabled on cloud deployment.")
@patch("destination_weaviate.indexer.weaviate.Client")
def test_pre_sync_that_creates_class(self, MockClient):
mock_client = Mock()
mock_client.schema.get.return_value = {"classes": []}
MockClient.return_value = mock_client
self.indexer.pre_sync(self.mock_catalog)
mock_client.schema.create_class.assert_called_with(
{
"class": "Test",
"vectorizer": "none",
"properties": [
{
"name": "_ab_record_id",
"dataType": ["text"],
"description": "Record ID, used for bookkeeping.",
"indexFilterable": True,
"indexSearchable": False,
"tokenization": "field",
}
],
}
)
@patch("destination_weaviate.indexer.weaviate.Client")
def test_pre_sync_that_creates_class_with_multi_tenancy_enabled(self, MockClient):
mock_client = Mock()
self.config.tenant_id = "test_tenant"
mock_client.schema.get_class_tenants.return_value = []
mock_client.schema.get.return_value = {"classes": []}
MockClient.return_value = mock_client
self.indexer.pre_sync(self.mock_catalog)
mock_client.schema.create_class.assert_called_with(
{
"class": "Test",
"multiTenancyConfig": {"enabled": True},
"vectorizer": "none",
"properties": [
{
"name": "_ab_record_id",
"dataType": ["text"],
"description": "Record ID, used for bookkeeping.",
"indexFilterable": True,
"indexSearchable": False,
"tokenization": "field",
}
],
}
)
@patch("destination_weaviate.indexer.weaviate.Client")
def test_pre_sync_that_deletes(self, MockClient):
mock_client = Mock()
mock_client.schema.get.return_value = {
"classes": [{"class": "Test", "properties": [{"name": "_ab_stream"}, {"name": "_ab_record_id"}]}]
}
MockClient.return_value = mock_client
self.mock_stream.destination_sync_mode = DestinationSyncMode.overwrite
self.indexer.pre_sync(self.mock_catalog)
mock_client.schema.delete_class.assert_called_with(class_name="Test")
mock_client.schema.create_class.assert_called_with(mock_client.schema.get.return_value["classes"][0])
@patch("destination_weaviate.indexer.weaviate.Client")
def test_pre_sync_no_delete_no_overwrite_mode(self, MockClient):
mock_client = Mock()
mock_client.schema.get.return_value = {
"classes": [{"class": "Test", "properties": [{"name": "_ab_stream"}, {"name": "_ab_record_id"}]}]
}
MockClient.return_value = mock_client
self.indexer.pre_sync(self.mock_catalog)
mock_client.schema.delete_class.assert_not_called()
def test_index_deletes_by_record_id(self):
mock_client = Mock()
self.indexer.client = mock_client
self.indexer.has_record_id_metadata = defaultdict(None)
self.indexer.has_record_id_metadata["Test"] = True
self.indexer.delete(["some_id", "some_other_id"], None, "test")
mock_client.batch.delete_objects.assert_called_with(
class_name="Test",
where={"path": ["_ab_record_id"], "operator": "ContainsAny", "valueStringArray": ["some_id", "some_other_id"]},
)
def test_index_deletes_by_record_id_with_tenant_id(self):
mock_client = Mock()
self.config.tenant_id = "test_tenant"
self.indexer.client = mock_client
self.indexer.has_record_id_metadata = defaultdict(None)
self.indexer.has_record_id_metadata["Test"] = True
self.indexer.delete(["some_id", "some_other_id"], None, "test")
mock_client.batch.delete_objects.assert_called_with(
class_name="Test",
tenant="test_tenant",
where={"path": ["_ab_record_id"], "operator": "ContainsAny", "valueStringArray": ["some_id", "some_other_id"]},
)
@patch("destination_weaviate.indexer.weaviate.Client")
def test_index_not_delete_no_metadata_field(self, MockClient):
mock_client = Mock()
MockClient.return_value = mock_client
self.indexer.has_record_id_metadata = defaultdict(None)
self.indexer.has_record_id_metadata["Test"] = False
self.indexer.delete(["some_id"], None, "test")
mock_client.batch.delete_objects.assert_not_called()
def test_index_flushes_batch(self):
mock_client = Mock()
self.indexer.client = mock_client
mock_client.batch.create_objects.return_value = []
mock_chunk1 = Chunk(
page_content="some_content",
embedding=[1, 2, 3],
metadata={"someField": "some_value"},
record=AirbyteRecordMessage(stream="test", data={"someField": "some_value"}, emitted_at=0),
)
mock_chunk2 = Chunk(
page_content="some_other_content",
embedding=[4, 5, 6],
metadata={"someField": "some_value2"},
record=AirbyteRecordMessage(stream="test", data={"someField": "some_value"}, emitted_at=0),
)
self.indexer.index([mock_chunk1, mock_chunk2], None, "test")
mock_client.batch.create_objects.assert_called()
chunk1_call = call({"someField": "some_value", "text": "some_content"}, "Test", ANY, vector=[1, 2, 3])
chunk2_call = call({"someField": "some_value2", "text": "some_other_content"}, "Test", ANY, vector=[4, 5, 6])
mock_client.batch.add_data_object.assert_has_calls([chunk1_call, chunk2_call], any_order=False)
def test_index_splits_batch(self):
mock_client = Mock()
self.indexer.client = mock_client
mock_client.batch.create_objects.return_value = []
self.indexer.config.batch_size = 2
mock_chunk1 = Chunk(
page_content="some_content",
embedding=[1, 2, 3],
metadata={"someField": "some_value"},
record=AirbyteRecordMessage(stream="test", data={"someField": "some_value"}, emitted_at=0),
)
mock_chunk2 = Chunk(
page_content="some_other_content",
embedding=[4, 5, 6],
metadata={"someField": "some_value2"},
record=AirbyteRecordMessage(stream="test", data={"someField": "some_value2"}, emitted_at=0),
)
mock_chunk3 = Chunk(
page_content="third",
embedding=[7, 8, 9],
metadata={"someField": "some_value3"},
record=AirbyteRecordMessage(stream="test", data={"someField": "some_value3"}, emitted_at=0),
)
self.indexer.index([mock_chunk1, mock_chunk2, mock_chunk3], None, "test")
assert mock_client.batch.create_objects.call_count == 2
def test_index_on_empty_batch(self):
mock_client = Mock()
self.indexer.client = mock_client
self.indexer.index([], None, "test")
assert mock_client.batch.create_objects.call_count == 0
@patch("destination_weaviate.indexer.uuid.uuid4")
@patch("time.sleep", return_value=None)
def test_index_flushes_batch_and_propagates_error(self, MockTime, MockUUID):
mock_client = Mock()
self.indexer.client = mock_client
mock_client.batch.create_objects.return_value = [{"result": {"errors": ["some_error"]}, "id": "some_id"}]
MockUUID.side_effect = ["some_id", "some_id2"]
mock_chunk1 = Chunk(
page_content="some_content",
embedding=[1, 2, 3],
metadata={"someField": "some_value"},
record=AirbyteRecordMessage(stream="test", data={"someField": "some_value"}, emitted_at=0),
)
mock_chunk2 = Chunk(
page_content="some_other_content",
embedding=[4, 5, 6],
metadata={"someField": "some_value2"},
record=AirbyteRecordMessage(stream="test", data={"someField": "some_value"}, emitted_at=0),
)
with self.assertRaises(WeaviatePartialBatchError):
self.indexer.index([mock_chunk1, mock_chunk2], None, "test")
chunk1_call = call({"someField": "some_value", "text": "some_content"}, "Test", "some_id", vector=[1, 2, 3])
self.assertEqual(mock_client.batch.create_objects.call_count, 1)
mock_client.batch.add_data_object.assert_has_calls([chunk1_call], any_order=False)
def test_index_flushes_batch_and_normalizes(self):
mock_client = Mock()
self.indexer.client = mock_client
mock_client.batch.create_objects.return_value = []
mock_chunk = Chunk(
page_content="some_content",
embedding=[1, 2, 3],
metadata={
"someField": "some_value",
"complex": {"a": [1, 2, 3]},
"UPPERCASE_NAME": "abc",
"id": 12,
"empty_list": [],
"referral Agency Name": "test1",
"123StartsWithNumber": "test2",
"special&*chars": "test3",
"with spaces": "test4",
"": "test5",
"_startsWithUnderscore": "test6",
"multiple spaces": "test7",
"SpecialCharacters!@#": "test8",
},
record=AirbyteRecordMessage(stream="test", data={"someField": "some_value"}, emitted_at=0),
)
self.indexer.index([mock_chunk], None, "test")
mock_client.batch.add_data_object.assert_called_with(
{
"someField": "some_value",
"complex": '{"a": [1, 2, 3]}',
"uPPERCASE_NAME": "abc",
"text": "some_content",
"raw_id": 12,
"referral_Agency_Name": "test1",
"_123StartsWithNumber": "test2",
"specialchars": "test3",
"with_spaces": "test4",
"_": "test5",
"_startsWithUnderscore": "test6",
"multiple__spaces": "test7",
"specialCharacters": "test8",
},
"Test",
ANY,
vector=[1, 2, 3],
)
| TestWeaviateIndexer |
python | keras-team__keras | keras/src/layers/normalization/unit_normalization_test.py | {
"start": 211,
"end": 2164
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_un_basics(self):
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": -1},
input_shape=(2, 3),
expected_output_shape=(2, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": (1, 2)},
input_shape=(1, 3, 3),
expected_output_shape=(1, 3, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Invalid value for `axis` argument: expected an int or a "
"list/tuple of ints."
),
):
layers.UnitNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.UnitNormalization(axis=-1)
inputs = np.random.normal(size=(2, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :]), 1.0)
layer = layers.UnitNormalization(axis=(1, 2))
inputs = np.random.normal(size=(2, 3, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, :]), 1.0)
layer = layers.UnitNormalization(axis=1)
inputs = np.random.normal(size=(2, 3, 2))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[0, :, 1]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 1]), 1.0)
| UnitNormalizationTest |
python | Textualize__textual | src/textual/document/_edit.py | {
"start": 255,
"end": 5707
} | class ____:
"""Implements the Undoable protocol to replace text at some range within a document."""
text: str
"""The text to insert. An empty string is equivalent to deletion."""
from_location: Location
"""The start location of the insert."""
to_location: Location
"""The end location of the insert"""
maintain_selection_offset: bool
"""If True, the selection will maintain its offset to the replacement range."""
_original_selection: Selection | None = field(init=False, default=None)
"""The Selection when the edit was originally performed, to be restored on undo."""
_updated_selection: Selection | None = field(init=False, default=None)
"""Where the selection should move to after the replace happens."""
_edit_result: EditResult | None = field(init=False, default=None)
"""The result of doing the edit."""
def do(self, text_area: TextArea, record_selection: bool = True) -> EditResult:
"""Perform the edit operation.
Args:
text_area: The `TextArea` to perform the edit on.
record_selection: If True, record the current selection in the TextArea
so that it may be restored if this Edit is undone in the future.
Returns:
An `EditResult` containing information about the replace operation.
"""
if record_selection:
self._original_selection = text_area.selection
text = self.text
# This code is mostly handling how we adjust TextArea.selection
# when an edit is made to the document programmatically.
# We want a user who is typing away to maintain their relative
# position in the document even if an insert happens before
# their cursor position.
edit_bottom_row, edit_bottom_column = self.bottom
selection_start, selection_end = text_area.selection
selection_start_row, selection_start_column = selection_start
selection_end_row, selection_end_column = selection_end
edit_result = text_area.document.replace_range(self.top, self.bottom, text)
new_edit_to_row, new_edit_to_column = edit_result.end_location
column_offset = new_edit_to_column - edit_bottom_column
target_selection_start_column = (
selection_start_column + column_offset
if edit_bottom_row == selection_start_row
and edit_bottom_column <= selection_start_column
else selection_start_column
)
target_selection_end_column = (
selection_end_column + column_offset
if edit_bottom_row == selection_end_row
and edit_bottom_column <= selection_end_column
else selection_end_column
)
row_offset = new_edit_to_row - edit_bottom_row
target_selection_start_row = (
selection_start_row + row_offset
if edit_bottom_row <= selection_start_row
else selection_start_row
)
target_selection_end_row = (
selection_end_row + row_offset
if edit_bottom_row <= selection_end_row
else selection_end_row
)
if self.maintain_selection_offset:
self._updated_selection = Selection(
start=(target_selection_start_row, target_selection_start_column),
end=(target_selection_end_row, target_selection_end_column),
)
else:
self._updated_selection = Selection.cursor(edit_result.end_location)
self._edit_result = edit_result
return edit_result
def undo(self, text_area: TextArea) -> EditResult:
"""Undo the edit operation.
Looks at the data stored in the edit, and performs the inverse operation of `Edit.do`.
Args:
text_area: The `TextArea` to undo the insert operation on.
Returns:
An `EditResult` containing information about the replace operation.
"""
replaced_text = self._edit_result.replaced_text
edit_end = self._edit_result.end_location
# Replace the span of the edit with the text that was originally there.
undo_edit_result = text_area.document.replace_range(
self.top, edit_end, replaced_text
)
self._updated_selection = self._original_selection
return undo_edit_result
def after(self, text_area: TextArea) -> None:
"""Hook for running code after an Edit has been performed via `Edit.do` *and*
side effects such as re-wrapping the document and refreshing the display
have completed.
For example, we can't record cursor visual offset until we know where the cursor will
land *after* wrapping has been performed, so we must wait until here to do it.
Args:
text_area: The `TextArea` this operation was performed on.
"""
if self._updated_selection is not None:
text_area.selection = self._updated_selection
text_area.record_cursor_width()
@property
def top(self) -> Location:
"""The Location impacted by this edit that is nearest the start of the document."""
return min([self.from_location, self.to_location])
@property
def bottom(self) -> Location:
"""The Location impacted by this edit that is nearest the end of the document."""
return max([self.from_location, self.to_location])
| Edit |
python | huggingface__transformers | src/transformers/models/unispeech/modular_unispeech.py | {
"start": 17397,
"end": 17649
} | class ____(Wav2Vec2ForSequenceClassification):
pass
__all__ = [
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
| UniSpeechForSequenceClassification |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/isinstance5.py | {
"start": 392,
"end": 492
} | class ____(DataProtocol, Protocol):
def method2(self) -> int: ...
@runtime_checkable
| DataProtocol2 |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py | {
"start": 1131,
"end": 1339
} | class ____(StrEnum):
ANOMALY_DETECTION = "anomaly_detection"
ISSUE_PRIORITY_GREATER_OR_EQUAL = "issue_priority_greater_or_equal"
ISSUE_PRIORITY_DEESCALATING = "issue_priority_deescalating"
| Condition |
python | openai__openai-python | src/openai/types/completion_create_params.py | {
"start": 6479,
"end": 7023
} | class ____(CompletionCreateParamsBase, total=False):
stream: Optional[Literal[False]]
"""Whether to stream back partial progress.
If set, tokens will be sent as data-only
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
as they become available, with the stream terminated by a `data: [DONE]`
message.
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
"""
| CompletionCreateParamsNonStreaming |
python | sqlalchemy__sqlalchemy | test/orm/test_transaction.py | {
"start": 42343,
"end": 45444
} | class ____(_LocalFixture):
__sparse_driver_backend__ = True
def test_expunge_pending_on_rollback(self):
User = self.classes.User
sess = fixture_session()
u2 = User(name="newuser")
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
def test_trans_pending_cleared_on_commit(self):
User = self.classes.User
sess = fixture_session()
u2 = User(name="newuser")
sess.add(u2)
assert u2 in sess
sess.commit()
assert u2 in sess
u3 = User(name="anotheruser")
sess.add(u3)
sess.rollback()
assert u3 not in sess
assert u2 in sess
def test_update_deleted_on_rollback(self):
User = self.classes.User
s = fixture_session()
u1 = User(name="ed")
s.add(u1)
s.commit()
# this actually tests that the delete() operation,
# when cascaded to the "addresses" collection, does not
# trigger a flush (via lazyload) before the cascade is complete.
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
def test_trans_deleted_cleared_on_rollback(self):
User = self.classes.User
s = fixture_session()
u1 = User(name="ed")
s.add(u1)
s.commit()
s.delete(u1)
s.commit()
assert u1 not in s
s.rollback()
assert u1 not in s
def test_update_deleted_on_rollback_cascade(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
u1 = User(name="ed", addresses=[Address(email_address="foo")])
s.add(u1)
s.commit()
s.delete(u1)
assert u1 in s.deleted
assert u1.addresses[0] in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
assert u1.addresses[0] not in s.deleted
def test_update_deleted_on_rollback_orphan(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
u1 = User(name="ed", addresses=[Address(email_address="foo")])
s.add(u1)
s.commit()
a1 = u1.addresses[0]
u1.addresses.remove(a1)
s.flush()
eq_(s.query(Address).filter(Address.email_address == "foo").all(), [])
s.rollback()
assert a1 not in s.deleted
assert u1.addresses == [a1]
def test_commit_pending(self):
User = self.classes.User
sess = fixture_session()
u1 = User(name="newuser")
sess.add(u1)
sess.flush()
sess.commit()
eq_(u1.name, "newuser")
def test_concurrent_commit_pending(self):
User = self.classes.User
s1 = fixture_session()
u1 = User(name="edward")
s1.add(u1)
s1.commit()
s2 = fixture_session()
u2 = s2.query(User).filter(User.name == "edward").one()
u2.name = "will"
s2.commit()
assert u1.name == "will"
| AutoExpireTest |
python | scrapy__scrapy | tests/test_spidermiddleware_output_chain.py | {
"start": 6454,
"end": 6544
} | class ____(_GeneratorDoNothingMiddleware):
pass
| GeneratorDoNothingAfterFailureMiddleware |
python | pytorch__pytorch | test/inductor/test_native_matmul.py | {
"start": 585,
"end": 4796
} | class ____(TestCase):
def _check_equal(
self,
f: Callable,
example_inputs: tuple[torch.Tensor],
):
compiled = torch.compile(f)
actual = compiled(*example_inputs)
expect = f(*example_inputs)
self.assertTrue(same(expect, actual))
def _check_code(
self,
f: Callable,
example_inputs: tuple[torch.Tensor],
kernel_count: int,
dot_count: int,
):
f = torch.compile(f)
code = run_and_get_triton_code(f, *example_inputs)
FileCheck().check_regex(r"triton.*mm.*\.run\(").run(code)
FileCheck().check_count(
"@triton.jit",
kernel_count,
).check_count(
"tl.dot",
dot_count,
).run(code)
def test_matmul(self):
def f(x, y):
z = x @ y
return z
M, K, N = 128, 128, 128
x = rand_strided((M, K), (K, 1), device=GPU_TYPE)
y = rand_strided((K, N), (N, 1), device=GPU_TYPE)
self._check_equal(f, (x, y))
self._check_code(f, (x, y), 1, 1)
def test_mm_1d_expand(self):
def f(x, y, M, K):
z = x[:, None].expand(M, K) @ y
return z
M, K, N = 128, 128, 128
x = rand_strided((M,), (1,), device=GPU_TYPE)
y = rand_strided((K, N), (N, 1), device=GPU_TYPE)
self._check_equal(f, (x, y, M, K))
self._check_code(f, (x, y, M, K), 1, 1)
def test_mm_2_expand(self):
def f(x, y, M, K):
z = x[:, None].expand(M, K) @ y
return z
M, K, N = 128, 128, 128
x = rand_strided((1,), (0,), device=GPU_TYPE)
y = rand_strided((K, N), (N, 1), device=GPU_TYPE)
self._check_equal(f, (x, y, M, K))
self._check_code(f, (x, y, M, K), 1, 1)
def test_matmul_fp16(self):
def f(x, y):
z = x @ y.to(x.dtype)
return z
M, K, N = 128, 128, 128
x = rand_strided((M, K), (K, 1), dtype=torch.float16, device=GPU_TYPE)
y = rand_strided((K, N), (N, 1), dtype=torch.float32, device=GPU_TYPE)
self._check_equal(f, (x, y))
self._check_code(f, (x, y), 1, 1)
def test_reduction_mask_zeroout(self):
def f(x, y):
return (x + 1) @ (y - 2)
M, K, N = 62, 62, 62
x = rand_strided((M, K), (K, 1), device=GPU_TYPE)
y = rand_strided((K, N), (N, 1), device=GPU_TYPE)
self._check_equal(f, (x, y))
self._check_code(f, (x, y), 1, 1)
@skipIfXpu(
msg="Intel triton issue: https://github.com/intel/intel-xpu-backend-for-triton/issues/5394"
)
def test_3mm_add(self):
def f(x, y, z, w, r, t):
return x @ y + z @ w + r @ t
M, K, N = 128, 128, 128
x = rand_strided((M, K), (K, 1), device=GPU_TYPE)
y = rand_strided((K, N), (N, 1), device=GPU_TYPE)
w = rand_strided((M, K), (K, 1), device=GPU_TYPE)
z = rand_strided((K, N), (N, 1), device=GPU_TYPE)
r = rand_strided((M, K), (K, 1), device=GPU_TYPE)
t = rand_strided((K, N), (N, 1), device=GPU_TYPE)
self._check_equal(f, (x, y, z, w, r, t))
self._check_code(f, (x, y, z, w, r, t), 1, 3)
def test_mm_complex(self):
def f(x, y, z, w):
return x[z] @ y + w + 3
M, K, N = 128, 128, 128
x = rand_strided((M, K), (K, 1), device=GPU_TYPE)
y = rand_strided((K, N), (N, 1), device=GPU_TYPE)
z = torch.randint(M, (M, K), dtype=torch.long, device=GPU_TYPE)
w = rand_strided((M, N), (N, 1), device=GPU_TYPE)
self._check_equal(f, (x, y, z, w))
self._check_code(f, (x, y, z, w), 1, 1)
def test_batchmatmul(self):
def f(x, y):
z = torch.bmm(x, y)
return z
B, M, K, N = 256, 128, 128, 128
x = rand_strided((B, M, K), (M * K, K, 1), device=GPU_TYPE)
y = rand_strided((B, K, N), (K * N, N, 1), device=GPU_TYPE)
self._check_equal(f, (x, y))
self._check_code(f, (x, y), 1, 1)
if HAS_GPU:
torch.set_default_device(GPU_TYPE)
if __name__ == "__main__":
if HAS_GPU:
run_tests()
| TestTritonDotReduction |
python | wandb__wandb | wandb/vendor/pygments/formatters/img.py | {
"start": 19131,
"end": 19455
} | class ____(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
.. versionadded:: 1.0
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
| JpgImageFormatter |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 435246,
"end": 449366
} | class ____(ParallelStatNode):
"""
This node represents a 'for i in cython.parallel.prange():' construct.
target NameNode the target iteration variable
else_clause Node or None the else clause of this loop
"""
child_attrs = ['body', 'target', 'else_clause', 'args', 'num_threads',
'chunksize', 'threading_condition']
body = target = else_clause = args = None
start = stop = step = None
is_prange = True
nogil = None
schedule = None
valid_keyword_arguments = ['schedule', 'nogil', 'num_threads', 'chunksize', 'use_threads_if']
class DummyIteratorNode(Node):
child_attrs = ["args"]
def __init__(self, pos, **kwds):
super().__init__(pos, **kwds)
# Pretend to be a ForInStatNode for control flow analysis,
# ensuring that the args get visited when the iterator would be.
self.iterator = self.DummyIteratorNode(pos, args=self.args)
def analyse_declarations(self, env):
super().analyse_declarations(env)
self.target.analyse_target_declaration(env)
if self.else_clause is not None:
self.else_clause.analyse_declarations(env)
if not self.args or len(self.args) > 3:
error(self.pos, "Invalid number of positional arguments to prange")
return
if len(self.args) == 1:
self.stop, = self.args
elif len(self.args) == 2:
self.start, self.stop = self.args
else:
self.start, self.stop, self.step = self.args
if self.schedule not in (None, 'static', 'dynamic', 'guided', 'runtime'):
error(self.pos, "Invalid schedule argument to prange: %s" % (self.schedule,))
def analyse_expressions(self, env):
was_nogil = env.nogil
if self.nogil:
env.nogil = True
if self.target is None:
error(self.pos, "prange() can only be used as part of a for loop")
return self
self.target = self.target.analyse_target_types(env)
if not self.target.type.is_numeric:
# Not a valid type, assume one for now anyway
if not self.target.type.is_pyobject:
# nogil_check will catch the is_pyobject case
error(self.target.pos,
"Must be of numeric type, not %s" % self.target.type)
self.index_type = PyrexTypes.c_py_ssize_t_type
else:
self.index_type = self.target.type
# Setup start, stop and step, allocating temps if needed
self.names = 'start', 'stop', 'step'
start_stop_step = self.start, self.stop, self.step
for node, name in zip(start_stop_step, self.names):
if node is not None:
node.analyse_types(env)
if not node.type.is_numeric:
error(node.pos, "%s argument must be numeric" % name)
continue
if not node.is_literal:
node = node.coerce_to_temp(env)
setattr(self, name, node)
# As we range from 0 to nsteps, computing the index along the
# way, we need a fitting type for 'i' and 'nsteps'
self.index_type = PyrexTypes.widest_numeric_type(
self.index_type, node.type)
if self.else_clause is not None:
self.else_clause = self.else_clause.analyse_expressions(env)
# Although not actually an assignment in this scope, it should be
# treated as such to ensure it is unpacked if a closure temp, and to
# ensure lastprivate behaviour and propagation. If the target index is
# not a NameNode, it won't have an entry, and an error was issued by
# ParallelRangeTransform
target_entry = getattr(self.target, 'entry', None)
if target_entry:
self.assignments[self.target.entry] = self.target.pos, None
node = super().analyse_expressions(env)
if node.chunksize:
if not node.schedule:
error(node.chunksize.pos,
"Must provide schedule with chunksize")
elif node.schedule == 'runtime':
error(node.chunksize.pos,
"Chunksize not valid for the schedule runtime")
elif (node.chunksize.type.is_int and
node.chunksize.is_literal and
node.chunksize.compile_time_value(env) <= 0):
error(node.chunksize.pos, "Chunksize must not be negative")
node.chunksize = node.chunksize.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
if node.nogil:
env.nogil = was_nogil
node.is_nested_prange = node.parent and node.parent.is_prange
if node.is_nested_prange:
parent = node
while parent.parent and parent.parent.is_prange:
parent = parent.parent
parent.assignments.update(node.assignments)
parent.privates.update(node.privates)
parent.assigned_nodes.extend(node.assigned_nodes)
return node
def nogil_check(self, env):
names = 'start', 'stop', 'step', 'target', 'use_threads_if'
nodes = self.start, self.stop, self.step, self.target, self.threading_condition
self._parameters_nogil_check(env, names, nodes)
def generate_execution_code(self, code):
"""
Generate code in the following steps
1) copy any closure variables determined thread-private
into temporaries
2) allocate temps for start, stop and step
3) generate a loop that calculates the total number of steps,
which then computes the target iteration variable for every step:
for i in prange(start, stop, step):
...
becomes
nsteps = (stop - start) / step;
i = start;
#pragma omp parallel for lastprivate(i)
for (temp = 0; temp < nsteps; temp++) {
i = start + step * temp;
...
}
Note that accumulation of 'i' would have a data dependency
between iterations.
Also, you can't do this
for (i = start; i < stop; i += step)
...
as the '<' operator should become '>' for descending loops.
'for i from x < i < y:' does not suffer from this problem
as the relational operator is known at compile time!
4) release our temps and write back any private closure variables
"""
self.declare_closure_privates(code)
# This can only be a NameNode
target_index_cname = self.target.entry.cname
# This will be used as the dict to format our code strings, holding
# the start, stop , step, temps and target cnames
fmt_dict = {
'target': target_index_cname,
'target_type': self.target.type.empty_declaration_code()
}
# Setup start, stop and step, allocating temps if needed
start_stop_step = self.start, self.stop, self.step
defaults = '0', '0', '1'
for node, name, default in zip(start_stop_step, self.names, defaults):
if node is None:
result = default
elif node.is_literal:
result = node.get_constant_c_result_code()
else:
node.generate_evaluation_code(code)
result = node.result()
fmt_dict[name] = result
if self.threading_condition is not None:
self.threading_condition.generate_evaluation_code(code)
fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False)
fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False)
# TODO: check if the step is 0 and if so, raise an exception in a
# 'with gil' block. For now, just abort
if self.step is not None and self.step.has_constant_result() and self.step.constant_result == 0:
error(node.pos, "Iteration with step 0 is invalid.")
elif not fmt_dict['step'].isdigit() or int(fmt_dict['step']) == 0:
code.putln("if (((%(step)s) == 0)) abort();" % fmt_dict)
self.setup_parallel_control_flow_block(code) # parallel control flow block
# Note: nsteps is private in an outer scope if present
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStdlibH", "ModuleSetupCode.c"))
code.putln("%(nsteps)s = (%(stop)s - %(start)s + %(step)s - %(step)s/abs(%(step)s)) / %(step)s;" % fmt_dict)
# The target iteration variable might not be initialized, do it only if
# we are executing at least 1 iteration, otherwise we should leave the
# target unaffected. The target iteration variable is firstprivate to
# shut up compiler warnings caused by lastprivate, as the compiler
# erroneously believes that nsteps may be <= 0, leaving the private
# target index uninitialized
code.putln("if (%(nsteps)s > 0)" % fmt_dict)
code.begin_block() # if block
self.generate_loop(code, fmt_dict)
code.end_block() # end if block
self.restore_labels(code)
if self.else_clause:
if self.breaking_label_used:
code.put("if (%s < 2)" % Naming.parallel_why)
code.begin_block() # else block
code.putln("/* else */")
self.else_clause.generate_execution_code(code)
code.end_block() # end else block
# ------ cleanup ------
self.end_parallel_control_flow_block(code) # end parallel control flow block
# And finally, release our privates and write back any closure
# variables
for temp in start_stop_step + (self.chunksize, self.threading_condition):
if temp is not None:
temp.generate_disposal_code(code)
temp.free_temps(code)
code.funcstate.release_temp(fmt_dict['i'])
code.funcstate.release_temp(fmt_dict['nsteps'])
self.release_closure_privates(code)
def generate_loop(self, code, fmt_dict):
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
if not self.is_parallel:
code.put("#pragma omp for")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.parent.privatization_insertion_point
else:
code.put("#pragma omp parallel")
if self.threading_condition is not None:
code.put(" if(%s)" % self.threading_condition.result())
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.privatization_insertion_point
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # pragma omp parallel begin block
# Initialize the GIL if needed for this thread
self.begin_parallel_block(code)
if self.is_nested_prange:
code.putln("#if 0")
else:
code.putln("#ifdef _OPENMP")
code.put("#pragma omp for")
for entry, op in sorted(self.privates.items()):
# Don't declare the index variable as a reduction
if op and op in "+*-&^|" and entry != self.target.entry:
if entry.type.is_pyobject:
error(self.pos, "Python objects cannot be reductions")
else:
#code.put(" reduction(%s:%s)" % (op, entry.cname))
# This is the only way reductions + nesting works in gcc4.5
reduction_codepoint.put(
" reduction(%s:%s)" % (op, entry.cname))
else:
if not entry.type.is_pyobject:
code.put(" firstprivate(%s)" % entry.cname)
code.put(" lastprivate(%s)" % entry.cname)
if self.schedule:
if self.chunksize:
chunksize = ", %s" % self.evaluate_before_block(code, self.chunksize)
else:
chunksize = ""
code.put(" schedule(%s%s)" % (self.schedule, chunksize))
self.put_num_threads(reduction_codepoint)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict)
code.begin_block() # for loop block
guard_around_body_codepoint = code.insertion_point()
# Start if guard block around the body. This may be unnecessary, but
# at least it doesn't spoil indentation
code.begin_block()
code.putln("%(target)s = (%(target_type)s)(%(start)s + %(step)s * %(i)s);" % fmt_dict)
if self.is_parallel and not self.is_nested_prange:
# nested pranges are not omp'ified, temps go to outer loops
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code, should_flush=True)
if self.is_parallel and not self.is_nested_prange:
# nested pranges are not omp'ified, temps go to outer loops
self.privatize_temps(code)
if self.breaking_label_used:
# Put a guard around the loop body in case return, break or
# exceptions might be used
guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why)
code.end_block() # end guard around loop body
code.end_block() # end for loop block
if self.is_parallel:
# Release the GIL and deallocate the thread state
self.end_parallel_block(code)
code.end_block() # pragma omp parallel end block
| ParallelRangeNode |
python | pytorch__pytorch | torch/distributed/argparse_util.py | {
"start": 2221,
"end": 3903
} | class ____(Action):
"""
Check whether the env var ``PET_{dest}`` exists before defaulting to the given ``default`` value.
Equivalent to
``store_true`` argparse built-in action except that the argument can
be omitted from the commandline if the env var is present and has a
non-zero value.
.. note:: it is redundant to pass ``default=True`` for arguments
that use this action because a flag should be ``True``
when present and ``False`` otherwise.
Example:
::
parser.add_argument("--verbose", action=check_env)
./program -> args.verbose=False
./program --verbose -> args.verbose=True
PET_VERBOSE=1 ./program -> args.verbose=True
PET_VERBOSE=0 ./program -> args.verbose=False
PET_VERBOSE=0 ./program --verbose -> args.verbose=True
Anti-pattern (don't do this):
::
parser.add_argument("--verbose", action=check_env, default=True)
./program -> args.verbose=True
./program --verbose -> args.verbose=True
PET_VERBOSE=1 ./program -> args.verbose=True
PET_VERBOSE=0 ./program -> args.verbose=False
"""
def __init__(self, dest, default=False, **kwargs) -> None:
env_name = f"PET_{dest.upper()}"
default = bool(int(os.environ.get(env_name, "1" if default else "0")))
super().__init__(dest=dest, const=True, default=default, nargs=0, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
| check_env |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-metal/llama_index/readers/metal/base.py | {
"start": 146,
"end": 2317
} | class ____(BaseReader):
"""
Metal reader.
Args:
api_key (str): Metal API key.
client_id (str): Metal client ID.
index_id (str): Metal index ID.
"""
def __init__(self, api_key: str, client_id: str, index_id: str):
import_err_msg = (
"`metal_sdk` package not found, please run `pip install metal_sdk`"
)
try:
import metal_sdk # noqa
except ImportError:
raise ImportError(import_err_msg)
from metal_sdk.metal import Metal
"""Initialize with parameters."""
self._api_key = api_key
self._client_id = client_id
self._index_id = index_id
self.metal_client = Metal(api_key, client_id, index_id)
def load_data(
self,
limit: int,
query_embedding: Optional[List[float]] = None,
filters: Optional[Dict[str, Any]] = None,
separate_documents: bool = True,
**query_kwargs: Any,
) -> List[Document]:
"""
Load data from Metal.
Args:
query_embedding (Optional[List[float]]): Query embedding for search.
limit (int): Number of results to return.
filters (Optional[Dict[str, Any]]): Filters to apply to the search.
separate_documents (Optional[bool]): Whether to return separate
documents per retrieved entry. Defaults to True.
**query_kwargs: Keyword arguments to pass to the search.
Returns:
List[Document]: A list of documents.
"""
payload = {
"embedding": query_embedding,
"filters": filters,
}
response = self.metal_client.search(payload, limit=limit, **query_kwargs)
documents = []
for item in response["data"]:
text = item["text"] or (item["metadata"] and item["metadata"]["text"])
documents.append(Document(text=text))
if not separate_documents:
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
| MetalReader |
python | kamyu104__LeetCode-Solutions | Python/number-of-beautiful-integers-in-the-range.py | {
"start": 3176,
"end": 4528
} | class ____(object):
def numberOfBeautifulIntegers(self, low, high, k):
"""
:type low: int
:type high: int
:type k: int
:rtype: int
"""
def f(x):
digits = map(int, str(x))
lookup = [[[[[-1]*k for _ in xrange(2*len(digits)+1)] for _ in xrange(2)] for _ in xrange(2)] for _ in xrange(len(digits))]
def memoization(i, zero, tight, diff, total):
if i == len(digits):
return int(zero == diff == total == 0)
if lookup[i][zero][tight][diff][total] == -1:
result = 0
for d in xrange((digits[i] if tight else 9)+1):
new_zero = int(zero and d == 0)
new_tight = int(tight and d == digits[i])
new_diff = diff+((1 if d%2 == 0 else -1) if new_zero == 0 else 0)
new_total = (total*10+d)%k
result += memoization(i+1, new_zero, new_tight, new_diff, new_total)
lookup[i][zero][tight][diff][total] = result
return lookup[i][zero][tight][diff][total]
return memoization(0, 1, 1, 0, 0)
return f(high)-f(low-1)
# Time: O(n^2 * k), n = len(str(high))
# Space: O(n * k)
# dp (slower but less space)
| Solution3 |
python | getsentry__sentry | src/sentry/interfaces/debug_meta.py | {
"start": 120,
"end": 1228
} | class ____(Interface):
"""
Holds debug meta information for processing stacktraces
and similar things. This information is deleted after event processing.
Currently two attributes exist:
``sdk_info``:
sets the SDK that is used for the system. This affects the lookup
for system symbols. If not defined, system symbols are not looked up.
``images``:
a list of debug images and their mappings.
"""
ephemeral = False
path = "debug_meta"
external_type = "debugmeta"
@classmethod
def to_python(cls, data, **kwargs):
return super().to_python(
{
"images": data.get("images", None) or [],
"sdk_info": data.get("sdk_info"),
"is_debug_build": data.get("is_debug_build"),
},
**kwargs,
)
def to_json(self):
return prune_empty_keys(
{
"images": self.images or None,
"sdk_info": self.sdk_info or None,
"is_debug_build": self.is_debug_build,
}
)
| DebugMeta |
python | spulec__freezegun | freezegun/api.py | {
"start": 15967,
"end": 17013
} | class ____:
def __init__(self, time_to_freeze: datetime.datetime, start: datetime.datetime):
self.time_to_freeze = time_to_freeze
self.start = start
def __call__(self) -> datetime.datetime:
return self.time_to_freeze + (real_datetime.now() - self.start)
def tick(self, delta: Union[datetime.timedelta, float]=datetime.timedelta(seconds=1)) -> datetime.datetime:
if isinstance(delta, numbers.Integral):
self.move_to(self.time_to_freeze + datetime.timedelta(seconds=int(delta)))
elif isinstance(delta, numbers.Real):
self.move_to(self.time_to_freeze + datetime.timedelta(seconds=float(delta)))
else:
self.move_to(self.time_to_freeze + delta) # type: ignore
return self.time_to_freeze
def move_to(self, target_datetime: _Freezable) -> None:
"""Moves frozen date to the given ``target_datetime``"""
self.start = real_datetime.now()
self.time_to_freeze = _parse_time_to_freeze(target_datetime)
| TickingDateTimeFactory |
python | tensorflow__tensorflow | tensorflow/dtensor/python/input_util.py | {
"start": 6659,
"end": 15142
} | class ____(iterator_ops.IteratorSpec):
"""Type specification for `_DTensorIterator`."""
__slots__ = ['_global_element_spec', '_layouts_str']
def __init__(
self, global_element_spec: tensor_spec.TensorSpec, layouts_str: Any):
super().__init__(global_element_spec)
self._global_element_spec = global_element_spec
self._layouts_str = layouts_str
@property
def value_type(self):
return _DTensorIterator
def _serialize(self):
return (self._global_element_spec, self._layouts_str)
@property
def _component_specs(self):
return (tensor_spec.TensorSpec([], dtypes.resource),)
def _to_components(self, value):
return (value._iterator_resource_dtensor,) # pylint: disable=protected-access
def _from_components(self, components):
layouts = nest.map_structure(
layout_lib.Layout.from_string, self._layouts_str)
return _DTensorIterator(
dtensor_components=components,
global_element_spec=self._global_element_spec,
layouts=layouts)
@classmethod
def from_value(cls, value):
return cls(value._global_element_spec, value._layouts_str) # pylint: disable=protected-access
def _validate_input(flattened_layouts: Sequence[layout_lib.Layout],
flattened_elem_spec: Sequence[tensor_spec.TensorSpec],
dataset_already_batched: bool):
"""Checks that the dataset's layouts and element specs are compatible.
Args:
flattened_layouts: the flattened list of layouts used to distribute the
dataset.
flattened_elem_spec: the flattened list of element specs used in the
dataset's components.
dataset_already_batched: whether the dataset to be validated is already
batched.
Raises:
ValueError: if the dataset's inputs are incompatible.
"""
if not flattened_elem_spec:
raise ValueError(
'Expected input element spec of at least one element, was empty.')
first_elem_shape = flattened_elem_spec[0].shape
for layout, elem_spec in zip(flattened_layouts, flattened_elem_spec):
if elem_spec.shape.rank is None:
raise ValueError(
'Dataset element shape must have a valid rank, got spec %s.' %
elem_spec)
# Check that layout's rank matches the element's rank. If dataset is not yet
# batched, then the layout's rank must be one greater than the element's
# rank.
expected_rank = elem_spec.shape.rank
if not dataset_already_batched:
expected_rank += 1
if layout.rank != expected_rank:
raise ValueError(
('Expected layout with rank %d for element spec %s, got layout %s. '
'Check that the dataset is not batched before passing to '
'DTensorDataset.') %
(expected_rank, elem_spec, layout.sharding_specs))
if dataset_already_batched:
# Check that the batch dimension size of all dataset elements match.
batch_dim_size = first_elem_shape.as_list()[0]
if batch_dim_size is None:
raise ValueError(
('Size of batch dimension of element spec %s is None. Ensure '
'drop_remainder=True when batching the dataset.') % elem_spec)
if elem_spec.shape.as_list()[0] != batch_dim_size:
raise ValueError(
('Size of batch dimension of element spec %s does not match '
'expected size %d.') % (elem_spec, batch_dim_size))
def _shard_counts(layout: layout_lib.Layout,
batch_dim: Optional[str] = None) -> List[int]:
"""Computes a list of the number of shards in each dimension of the layout.
The shard counts are used to slice each dataset element. The batch dimension's
count is overridden to 1 since we only consider how many shards to make
locally (within each local replica). Sharding across clients is handled by
either tf.data.Dataset's shard transformation (in the single-client case) or
tf.data service's distribute function (in the multi-client case).
Args:
layout: the layout to compute the shard counts for.
batch_dim: the name of the batch dimension of the layout, if present.
Returns:
A list of shard counts, one element per dimension of the layout.
"""
shard_counts = []
for spec in layout.sharding_specs:
if spec in (batch_dim, layout_lib.UNSHARDED):
shard_counts.append(1)
else:
shard_counts.append(layout.mesh.dim_size(spec))
return shard_counts
def _index_matrix(layout: layout_lib.Layout,
elem_spec: tensor_spec.TensorSpec) -> tensor.Tensor:
"""Computes a utility matrix to derive device-based slice offsets.
This function builds a matrix of shape `[mesh.rank, layout.rank]` for each
dataset element. This matrix can be used to slice the DTensor components
returned by the iterator according to the local device that component is to be
placed on. This can be done by multiplying the device offsets of shape
`[1, mesh.rank]` with this index matrix to get a `[1, layout.rank]` shape
tensor containing the slice offsets.
Note: the index on the batch dim is always 0 since sharding on the batch
dimension is handled by either tf.data.Dataset's shard transformation (in the
single-client case) or tf.data service's distribute function (in the
multi-client case). If there is no sharding on the batch dimension (or any
other dimension), the slice index remains 0.
Args:
layout: the layout of the dataset element.
elem_spec: the spec of the dataset element.
Returns:
The index matrix as a tensor.
"""
matrix = []
for dim in layout.mesh.dim_names:
row = [0]
for layout_idx, spec in enumerate(layout.sharding_specs[1:]):
if spec == layout_lib.UNSHARDED or spec != dim:
row.append(0)
else:
row.append(elem_spec.shape[layout_idx] // layout.mesh.dim_size(dim))
matrix.append(row)
return constant_op.constant(matrix, dtype=dtypes.int32)
def _pack_iterator_resource_dtensor(
datasets: List[Tuple[int, data_types.DatasetV2]],
layouts: Any,
mesh: layout_lib.Mesh,
num_local_devices_per_replica: int):
"""Creates a DTensor iterator resource for the per-replica datasets.
Given a list of replica ID to tf.data.Dataset mappings, this function creates
iterators for each device and then packs the underlying iterator resource
tensors into a single DTensor. This resource tensor is used by the
IteratorGetNext op to retrieve the next element in the dataset.
Args:
datasets: a list of tuples of each unique local replica ID to the dataset
object whose elements will be placed on the devices corresponding to that
replica.
layouts: a structure of DTensor layouts to be applied to the elements
returned by the underlying iterators. This can be a single layout or
(possibly nested) tuples or dictionaries of layouts, and the structure
must match the structure of the iterator elements.
mesh: the DTensor mesh to place the iterator batches on.
num_local_devices_per_replica: the number of devices in each data-parallel
replica.
Returns:
A DTensor of the underlying iterator resource tensors.
"""
host_mesh_devices = mesh.host_mesh().local_devices()
device_idx = 0
iterators = []
for _, dataset in datasets:
for idx in range(num_local_devices_per_replica):
with ops.device_v2(host_mesh_devices[device_idx]):
device_dataset = dataset.shard(
num_shards=num_local_devices_per_replica, index=idx)
iterators.append(iter(device_dataset))
device_idx += 1
if device_idx != len(host_mesh_devices):
raise ValueError(
'The `datasets` argument does not have the correct number of'
f' underlying datasets, found {device_idx} but expected'
f' {len(host_mesh_devices)}.')
host_layouts = nest.map_structure(
lambda l: layout_lib.Layout(l.sharding_specs, mesh.host_mesh()), layouts)
# Pack the iterator resource tensors into a replicated 0-dimensional DTensor
# and set the element layouts.
iterator_resources = [it._iterator_resource for it in iterators] # pylint: disable=protected-access
d_iterator_resource = api.pack(
iterator_resources,
layout_lib.Layout.replicated(mesh=mesh.host_mesh(), rank=0))
api._dtensor_device().set_iterator_element_layouts( # pylint: disable=protected-access
d_iterator_resource, nest.flatten(host_layouts))
return d_iterator_resource
@tf_export('experimental.dtensor.DTensorDataset', v1=[])
| _DTensorIteratorSpec |
python | huggingface__transformers | src/transformers/models/unispeech_sat/modeling_unispeech_sat.py | {
"start": 8993,
"end": 10493
} | class ____(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [UniSpeechSatGroupNormConvLayer(config, layer_id=0)] + [
UniSpeechSatNoLayerNormConvLayer(config, layer_id=i + 1)
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
UniSpeechSatLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
| UniSpeechSatFeatureEncoder |
python | ZoranPandovski__al-go-rithms | games/Python/paddleball.py | {
"start": 49,
"end": 1831
} | class ____:
def __init__(self, canvas, paddle, score, color): #function saved as __init__ in it to create oval. Takes parameters canvas and color
self.canvas = canvas #saves canvas var as "Canvas" to make values of canvas(tk, height=400, width=50, bd(border)= 0 and highlightthickness = 0(to make look like "Game")(see at line 26)
self.paddle = paddle
self.score = score
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)#self.id = identifier for oval if in oval class and __init__ function(oval or rect)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]#changing direction
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:
self.score.hit()
self.x += self.paddle.x
return True
return False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id) #pos is var and coords is function of tkinter(current cooordinates of (self.id(identifier(red oval)))
if pos[1] <= 0: #changes 'y' depending on coords
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if self.hit_paddle(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
| Ball |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/shrinking/choicetree.py | {
"start": 4607,
"end": 4951
} | class ____:
def __init__(self) -> None:
self.children: dict[int, TreeNode] = defaultdict(TreeNode)
self.live_child_count: int | None = None
self.n: int | None = None
@property
def exhausted(self) -> bool:
return self.live_child_count == 0
DeadNode = TreeNode()
DeadNode.live_child_count = 0
| TreeNode |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum11.py | {
"start": 516,
"end": 788
} | class ____(Enum):
def __init__(self, value: int):
self._value_ = value
RED = 1
# This should generate an error because of a type mismatch.
GREEN = "green"
# This should generate an error because of a type mismatch.
BLUE = (1, "blue")
| Enum3 |
python | getsentry__sentry | src/sentry/grouping/enhancer/rules.py | {
"start": 202,
"end": 291
} | class ____(TypedDict):
match: dict[str, str]
actions: list[str]
| EnhancementRuleDict |
python | getsentry__sentry | src/sentry/monitors/processing_errors/errors.py | {
"start": 2708,
"end": 2888
} | class ____(TypedDict):
"""
Monitor was disabled and we couldn't assign a seat
"""
type: Literal[ProcessingErrorType.MONITOR_DISABLED_NO_QUOTA]
| MonitorDisabledNoQuota |
python | getsentry__sentry | src/sentry/relay/config/__init__.py | {
"start": 14661,
"end": 42724
} | class ____(TypedDict):
pattern: str
expiry: str
scope: SpanDescriptionScope
redaction: SpanDescriptionRuleRedaction
def _should_extract_abnormal_mechanism(project: Project) -> bool:
return sample_modulo(
"sentry-metrics.releasehealth.abnormal-mechanism-extraction-rate",
project.organization_id,
)
def _get_desktop_browser_performance_profiles(
organization: Organization,
) -> list[dict[str, Any]]:
return [
{
"name": "Chrome",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 900.0,
"p50": 1600.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.30,
"p10": 1200.0,
"p50": 2400.0,
"optional": True,
},
{
"measurement": "cls",
"weight": 0.15,
"p10": 0.1,
"p50": 0.25,
"optional": True,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 200.0,
"p50": 400.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Chrome",
},
},
{
"name": "Firefox",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 900.0,
"p50": 1600.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.30,
"p10": 1200.0,
"p50": 2400.0,
"optional": True, # Only available on Firefox 122 and beyond
},
{
"measurement": "cls",
"weight": 0.0,
"p10": 0.1,
"p50": 0.25,
"optional": False,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 200.0,
"p50": 400.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Firefox",
},
},
{
"name": "Safari",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 900.0,
"p50": 1600.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.0,
"p10": 1200.0,
"p50": 2400.0,
"optional": False,
},
{
"measurement": "cls",
"weight": 0.0,
"p10": 0.1,
"p50": 0.25,
"optional": False,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 200.0,
"p50": 400.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Safari",
},
},
{
"name": "Edge",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 900.0,
"p50": 1600.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.30,
"p10": 1200.0,
"p50": 2400.0,
"optional": True,
},
{
"measurement": "cls",
"weight": 0.15,
"p10": 0.1,
"p50": 0.25,
"optional": True,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 200.0,
"p50": 400.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Edge",
},
},
{
"name": "Opera",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 900.0,
"p50": 1600.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.30,
"p10": 1200.0,
"p50": 2400.0,
"optional": True,
},
{
"measurement": "cls",
"weight": 0.15,
"p10": 0.1,
"p50": 0.25,
"optional": True,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 200.0,
"p50": 400.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Opera",
},
},
{
"name": "Chrome INP",
"scoreComponents": [
{
"measurement": "inp",
"weight": 1.0,
"p10": 200.0,
"p50": 500.0,
"optional": False,
},
],
"condition": {
"op": "or",
"inner": [
{
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Chrome",
},
{
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Google Chrome",
},
],
},
},
{
"name": "Edge INP",
"scoreComponents": [
{
"measurement": "inp",
"weight": 1.0,
"p10": 200.0,
"p50": 500.0,
"optional": False,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Edge",
},
},
{
"name": "Opera INP",
"scoreComponents": [
{
"measurement": "inp",
"weight": 1.0,
"p10": 200.0,
"p50": 500.0,
"optional": False,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Opera",
},
},
]
def _get_mobile_browser_performance_profiles(
organization: Organization,
) -> list[dict[str, Any]]:
return [
{
"name": "Chrome Mobile",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 1800.0,
"p50": 3000.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.30,
"p10": 2500.0,
"p50": 4000.0,
"optional": True,
},
{
"measurement": "cls",
"weight": 0.15,
"p10": 0.1,
"p50": 0.25,
"optional": True,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 800.0,
"p50": 1800.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Chrome Mobile",
},
},
{
"name": "Firefox Mobile",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 1800.0,
"p50": 3000.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.30,
"p10": 2500.0,
"p50": 4000.0,
"optional": True, # Only available on Firefox 122 and beyond
},
{
"measurement": "cls",
"weight": 0.0,
"p10": 0.1,
"p50": 0.25,
"optional": False,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 800.0,
"p50": 1800.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Firefox Mobile",
},
},
{
"name": "Safari Mobile",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 1800.0,
"p50": 3000.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.0,
"p10": 2500.0,
"p50": 4000.0,
"optional": False,
},
{
"measurement": "cls",
"weight": 0.0,
"p10": 0.1,
"p50": 0.25,
"optional": False,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 800.0,
"p50": 1800.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Mobile Safari",
},
},
{
"name": "Edge Mobile",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 1800.0,
"p50": 3000.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.30,
"p10": 2500.0,
"p50": 4000.0,
"optional": True,
},
{
"measurement": "cls",
"weight": 0.15,
"p10": 0.1,
"p50": 0.25,
"optional": True,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 800.0,
"p50": 1800.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Edge Mobile",
},
},
{
"name": "Opera Mobile",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 1800.0,
"p50": 3000.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.30,
"p10": 2500.0,
"p50": 4000.0,
"optional": True,
},
{
"measurement": "cls",
"weight": 0.15,
"p10": 0.1,
"p50": 0.25,
"optional": True,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 800.0,
"p50": 1800.0,
"optional": True,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Opera Mobile",
},
},
{
"name": "Chrome Mobile INP",
"scoreComponents": [
{
"measurement": "inp",
"weight": 1.0,
"p10": 200.0,
"p50": 500.0,
"optional": False,
},
],
"condition": {
"op": "or",
"inner": [
{
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Chrome Mobile",
},
],
},
},
{
"name": "Edge Mobile INP",
"scoreComponents": [
{
"measurement": "inp",
"weight": 1.0,
"p10": 200.0,
"p50": 500.0,
"optional": False,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Edge Mobile",
},
},
{
"name": "Opera Mobile INP",
"scoreComponents": [
{
"measurement": "inp",
"weight": 1.0,
"p10": 200.0,
"p50": 500.0,
"optional": False,
},
],
"condition": {
"op": "eq",
"name": "event.contexts.browser.name",
"value": "Opera Mobile",
},
},
]
def _get_default_browser_performance_profiles(
organization: Organization,
) -> list[dict[str, Any]]:
return [
{
"name": "Default",
"scoreComponents": [
{
"measurement": "fcp",
"weight": 0.15,
"p10": 900.0,
"p50": 1600.0,
"optional": True,
},
{
"measurement": "lcp",
"weight": 0.30,
"p10": 1200.0,
"p50": 2400.0,
"optional": True,
},
{
"measurement": "cls",
"weight": 0.15,
"p10": 0.1,
"p50": 0.25,
"optional": True,
},
{
"measurement": "ttfb",
"weight": 0.10,
"p10": 200.0,
"p50": 400.0,
"optional": True,
},
],
"condition": {
"op": "and",
"inner": [],
},
},
{
"name": "Default INP",
"scoreComponents": [
{
"measurement": "inp",
"weight": 1.0,
"p10": 200.0,
"p50": 500.0,
"optional": False,
},
],
"condition": {
"op": "and",
"inner": [],
},
},
]
def _get_mobile_performance_profiles(
organization: Organization,
) -> list[dict[str, Any]]:
if not features.has(
"organizations:performance-calculate-mobile-perf-score-relay", organization
):
return []
return [
{
"name": "Mobile",
"version": "mobile.alpha",
"scoreComponents": [
{
"measurement": "time_to_initial_display",
"weight": 0.25,
"p10": 1800.0,
"p50": 3000.0,
"optional": True,
},
{
"measurement": "time_to_full_display",
"weight": 0.25,
"p10": 2500.0,
"p50": 4000.0,
"optional": True,
},
{
"measurement": "app_start_warm",
"weight": 0.25,
"p10": 200.0,
"p50": 500.0,
"optional": True,
},
{
"measurement": "app_start_cold",
"weight": 0.25,
"p10": 200.0,
"p50": 500.0,
"optional": True,
},
],
"condition": {
"op": "and",
"inner": [
{
"op": "or",
"inner": [
{
"op": "eq",
"name": "event.sdk.name",
"value": "sentry.cocoa",
},
{
"op": "eq",
"name": "event.sdk.name",
"value": "sentry.java.android",
},
],
},
{"op": "eq", "name": "event.contexts.trace.op", "value": "ui.load"},
],
},
}
]
def _get_project_config(
project: Project, project_keys: Iterable[ProjectKey] | None = None
) -> ProjectConfig:
if project.status != ObjectStatus.ACTIVE:
return ProjectConfig(project, disabled=True)
public_keys = get_public_key_configs(project_keys=project_keys)
with sentry_sdk.start_span(op="get_public_config"):
now = datetime.now(timezone.utc)
cfg = {
"disabled": False,
"slug": project.slug,
"lastFetch": now,
"lastChange": now,
"rev": uuid.uuid4().hex,
"publicKeys": public_keys,
"config": {
"allowedDomains": list(get_origins(project)),
"trustedRelays": [
r["public_key"]
for r in project.organization.get_option("sentry:trusted-relays", [])
if r
],
"piiConfig": get_pii_config(project),
"datascrubbingSettings": get_datascrubbing_settings(project),
},
"organizationId": project.organization_id,
"projectId": project.id, # XXX: Unused by Relay, required by Python store
}
config = cfg["config"]
if features.has("organizations:ingest-through-trusted-relays-only", project.organization):
config["trustedRelaySettings"] = {
"verifySignature": project.organization.get_option(
"sentry:ingest-through-trusted-relays-only",
INGEST_THROUGH_TRUSTED_RELAYS_ONLY_DEFAULT,
)
}
with sentry_sdk.start_span(op="get_exposed_features"):
if exposed_features := get_exposed_features(project):
config["features"] = exposed_features
# NOTE: Omitting dynamicSampling because of a failure increases the number
# of events forwarded by Relay, because dynamic sampling will stop filtering
# anything.
add_experimental_config(config, "sampling", get_dynamic_sampling_config, project)
# Rules to replace high cardinality transaction names
if not features.has("projects:transaction-name-clustering-disabled", project):
add_experimental_config(config, "txNameRules", get_transaction_names_config, project)
# Mark the project as ready if it has seen >= 10 clusterer runs.
# This prevents projects from prematurely marking all URL transactions as sanitized.
if get_clusterer_meta(ClustererNamespace.TRANSACTIONS, project)["runs"] >= MIN_CLUSTERER_RUNS:
config["txNameReady"] = True
config["breakdownsV2"] = project.get_option("sentry:breakdowns")
add_experimental_config(config, "metrics", get_metrics_config, project)
if _should_extract_transaction_metrics(project):
add_experimental_config(
config,
"transactionMetrics",
get_transaction_metrics_settings,
project,
config.get("breakdownsV2"),
)
# This config key is technically not specific to _transaction_ metrics,
# is however currently both only applied to transaction metrics in
# Relay, and only used to tag transaction metrics in Sentry.
add_experimental_config(
config,
"metricConditionalTagging",
get_metric_conditional_tagging_rules,
project,
)
if metric_extraction := get_metric_extraction_config(project):
config["metricExtraction"] = metric_extraction
config["sessionMetrics"] = {
"version": (
EXTRACT_ABNORMAL_MECHANISM_VERSION
if _should_extract_abnormal_mechanism(project)
else EXTRACT_METRICS_VERSION
),
}
performance_score_profiles = [
*_get_desktop_browser_performance_profiles(project.organization),
*_get_mobile_browser_performance_profiles(project.organization),
*_get_mobile_performance_profiles(project.organization),
*_get_default_browser_performance_profiles(project.organization),
]
if performance_score_profiles:
config["performanceScore"] = {"profiles": performance_score_profiles}
with sentry_sdk.start_span(op="get_filter_settings"):
if filter_settings := get_filter_settings(project):
config["filterSettings"] = filter_settings
with sentry_sdk.start_span(op="get_grouping_config_dict_for_project"):
grouping_config = get_grouping_config_dict_for_project(project)
if grouping_config is not None:
config["groupingConfig"] = grouping_config
with sentry_sdk.start_span(op="get_event_retention"):
event_retention = quotas.backend.get_event_retention(project.organization)
if event_retention is not None:
config["eventRetention"] = event_retention
with sentry_sdk.start_span(op="get_downsampled_event_retention"):
downsampled_event_retention = quotas.backend.get_downsampled_event_retention(
project.organization
)
if downsampled_event_retention is not None:
config["downsampledEventRetention"] = downsampled_event_retention
with sentry_sdk.start_span(op="get_retentions"):
retentions = quotas.backend.get_retentions(project.organization)
retentions_config = {
RETENTIONS_CONFIG_MAPPING[c]: v.to_object()
for c, v in retentions.items()
if c in RETENTIONS_CONFIG_MAPPING
}
if retentions_config:
config["retentions"] = retentions_config
with sentry_sdk.start_span(op="get_all_quotas"):
if quotas_config := get_quotas(project, keys=project_keys):
config["quotas"] = quotas_config
if features.has("organizations:log-project-config", project.organization):
try:
logger.info(
"log-project-config - get_project_config: Logging sampling feature flags for project %s in org %s.",
project.id,
project.organization.id,
extra={
"project_id": str(project.id),
"org_id": str(project.organization.id),
"sampling_rule_count": (
len(config["sampling"]["rules"]) if "sampling" in config else None
),
"dynamic_sampling_feature_flag": features.has(
"organizations:dynamic-sampling", project.organization
),
"dynamic_sampling_custom_feature_flag": features.has(
"organizations:dynamic-sampling-custom", project.organization
),
"dynamic_sampling_mode": project.organization.get_option(
"sentry:sampling_mode", None
),
"dynamic_sampling_org_target_rate": project.organization.get_option(
"sentry:target_sample_rate", None
),
"dynamic_sampling_biases": project.get_option(
"sentry:dynamic_sampling_biases", None
),
"low_volume_projects_sample_rate": get_boost_low_volume_projects_sample_rate(
org_id=project.organization.id,
project_id=project.id,
error_sample_rate_fallback=None,
),
},
)
logger.info(
"log-project-config - get_project_config: Logging project sampling config for project %s in org %s.",
project.id,
project.organization.id,
extra={
"project_sampling_config": config["sampling"] if "sampling" in config else None,
"project_id": str(project.id),
"org_id": str(project.organization.id),
"dynamic_sampling_feature_flag": features.has(
"organizations:dynamic-sampling", project.organization
),
"dynamic_sampling_custom_feature_flag": features.has(
"organizations:dynamic-sampling-custom", project.organization
),
"dynamic_sampling_mode": project.organization.get_option(
"sentry:sampling_mode", None
),
"dynamic_sampling_org_target_rate": project.organization.get_option(
"sentry:target_sample_rate", None
),
},
)
except Exception:
capture_exception()
return ProjectConfig(project, **cfg)
| SpanDescriptionRule |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 13688,
"end": 14113
} | class ____(HTTPSuccessful):
"""
subclass of :class:`~HTTPSuccessful`
This indicates that the returned metainformation in the entity-header is
not the definitive set as available from the origin server, but is
gathered from a local or a third-party copy.
code: 203, title: Non-Authoritative Information
"""
code = 203
title = 'Non-Authoritative Information'
| HTTPNonAuthoritativeInformation |
python | miyuchina__mistletoe | mistletoe/latex_token.py | {
"start": 75,
"end": 202
} | class ____(span_token.SpanToken):
pattern = re.compile(r'(\${1,2})([^$]+?)\1')
parse_inner = False
parse_group = 0
| Math |
python | apache__airflow | providers/apache/kafka/tests/integration/apache/kafka/operators/test_produce.py | {
"start": 1873,
"end": 3519
} | class ____:
"""
test ProduceToTopicOperator
"""
def test_producer_operator_test_1(self):
GROUP = "operator.producer.test.integration.test_1"
TOPIC = "operator.producer.test.integration.test_1"
t = ProduceToTopicOperator(
kafka_config_id="kafka_default",
task_id="produce_to_topic",
topic=TOPIC,
producer_function="integration.apache.kafka.operators.test_produce._producer_function",
)
t.execute(context={})
config = {
"bootstrap.servers": "broker:29092",
"group.id": GROUP,
"enable.auto.commit": False,
"auto.offset.reset": "beginning",
}
c = Consumer(config)
c.subscribe([TOPIC])
msg = c.consume()
assert msg[0].key() == b"0"
assert msg[0].value() == b"1"
def test_producer_operator_test_2(self):
GROUP = "operator.producer.test.integration.test_2"
TOPIC = "operator.producer.test.integration.test_2"
t = ProduceToTopicOperator(
kafka_config_id="kafka_default",
task_id="produce_to_topic",
topic=TOPIC,
producer_function=_producer_function,
)
t.execute(context={})
config = {
"bootstrap.servers": "broker:29092",
"group.id": GROUP,
"enable.auto.commit": False,
"auto.offset.reset": "beginning",
}
c = Consumer(config)
c.subscribe([TOPIC])
msg = c.consume()
assert msg[0].key() == b"0"
assert msg[0].value() == b"1"
| TestProduceToTopic |
python | fluentpython__example-code-2e | 05-data-classes/dataclass/resource_repr.py | {
"start": 1337,
"end": 1432
} | class ____(Enum):
BOOK = auto()
EBOOK = auto()
VIDEO = auto()
@dataclass
| ResourceType |
python | Textualize__textual | docs/examples/widgets/data_table_fixed.py | {
"start": 83,
"end": 645
} | class ____(App):
CSS = "DataTable {height: 1fr}"
def compose(self) -> ComposeResult:
yield DataTable()
def on_mount(self) -> None:
table = self.query_one(DataTable)
table.focus()
table.add_columns("A", "B", "C")
for number in range(1, 100):
table.add_row(str(number), str(number * 2), str(number * 3))
table.fixed_rows = 2
table.fixed_columns = 1
table.cursor_type = "row"
table.zebra_stripes = True
app = TableApp()
if __name__ == "__main__":
app.run()
| TableApp |
python | sympy__sympy | sympy/testing/runtests.py | {
"start": 70770,
"end": 74644
} | class ____(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occurring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub(r'(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub(r'(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
| SymPyOutputChecker |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Data.py | {
"start": 12075,
"end": 12438
} | class ____(CtrlNode):
"""Calculate the mean of an array across an axis.
"""
nodeName = 'Mean'
uiTemplate = [
('axis', 'intSpin', {'value': 0, 'min': -1, 'max': 1000000}),
]
def processData(self, data):
s = self.stateGroup.state()
ax = None if s['axis'] == -1 else s['axis']
return data.mean(axis=ax)
| Mean |
python | pypa__warehouse | tests/unit/cache/test_services.py | {
"start": 246,
"end": 2278
} | class ____:
def test_interface_matches(self):
assert verifyClass(IQueryResultsCache, RedisQueryResults)
def test_create_service(self):
request = pretend.stub(
registry=pretend.stub(settings={"db_results_cache.url": "redis://"})
)
# Create the service
service = RedisQueryResults.create_service(None, request)
assert isinstance(service, RedisQueryResults)
def test_get_missing(self, query_results_cache_service):
# Attempt to get a value that doesn't exist in the cache
result = query_results_cache_service.get("missing_key")
assert result is None
def test_set_get_simple(self, query_results_cache_service):
# Set a value in the cache
query_results_cache_service.set("test_key", {"foo": "bar"})
# Get the value from the cache
result = query_results_cache_service.get("test_key")
assert result == {"foo": "bar"}
def test_set_get_complex(self, query_results_cache_service):
# Construct a complex object to store in the cache
obj = {
"uuid": uuid.uuid4(),
"datetime": datetime.datetime.now(),
"list": [1, 2, 3],
"dict": {"key": "value"},
}
# Set the complex object in the cache
query_results_cache_service.set("complex_key", obj)
# Get the complex object from the cache
result = query_results_cache_service.get("complex_key")
# Check that the result is the "same" as the original object, except
# for the UUID and datetime, which are now strings
assert result["list"] == obj["list"]
assert result["dict"] == obj["dict"]
assert result["uuid"] == str(obj["uuid"])
assert result["datetime"] == obj["datetime"].isoformat()
assert isinstance(result["list"], list)
assert isinstance(result["dict"], dict)
assert isinstance(result["uuid"], str)
assert isinstance(result["datetime"], str)
| TestRedisQueryResults |
python | kamyu104__LeetCode-Solutions | Python/longest-unequal-adjacent-groups-subsequence-ii.py | {
"start": 68,
"end": 899
} | class ____(object):
def getWordsInLongestSubsequence(self, n, words, groups):
"""
:type n: int
:type words: List[str]
:type groups: List[int]
:rtype: List[str]
"""
def check(s1, s2):
return len(s1) == len(s2) and sum(a != b for a, b in itertools.izip(s1, s2)) == 1
dp = [[1, -1] for _ in xrange(n)]
for i in reversed(xrange(n)):
for j in xrange(i+1, n):
if groups[i] != groups[j] and check(words[j], words[i]):
dp[i] = max(dp[i], [dp[j][0]+1, j])
result = []
i = max(xrange(n), key=lambda x: dp[x])
while i != -1:
result.append(words[i])
i = dp[i][1]
return result
# Time: O(n^2)
# Space: O(n)
import itertools
# dp, backtracing
| Solution |
python | aimacode__aima-python | agents.py | {
"start": 26133,
"end": 27709
} | class ____(Environment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super().__init__()
self.status = {loc_A: random.choice(['Clean', 'Dirty']),
loc_B: random.choice(['Clean', 'Dirty'])}
def thing_classes(self):
return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent, TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""Returns the agent's location, and the location status (Dirty/Clean)."""
return agent.location, self.status[agent.location]
def execute_action(self, agent, action):
"""Change agent's location and/or location's status; track performance.
Score 10 for each dirt cleaned; -1 for each move."""
if action == 'Right':
agent.location = loc_B
agent.performance -= 1
elif action == 'Left':
agent.location = loc_A
agent.performance -= 1
elif action == 'Suck':
if self.status[agent.location] == 'Dirty':
agent.performance += 10
self.status[agent.location] = 'Clean'
def default_location(self, thing):
"""Agents start in either location at random."""
return random.choice([loc_A, loc_B])
# ______________________________________________________________________________
# The Wumpus World
| TrivialVacuumEnvironment |
python | django__django | tests/admin_views/admin.py | {
"start": 11609,
"end": 11749
} | class ____(admin.ModelAdmin):
list_display = ("name", "released")
list_editable = ("released",)
ordering = ("name",)
| VodcastAdmin |
python | google__jax | tests/pallas/pallas_test.py | {
"start": 90174,
"end": 92751
} | class ____(PallasBaseTest):
def test_simple_symbolic_matmul_export(self):
if jtu.test_device_matches(["gpu"]):
self.skipTest("Not supported on GPU.")
def sym_matmul(x, y, symbolic_grid):
symbolic_grid = symbolic_grid.shape[0]
symbolic_x_0 = x.shape[0] // symbolic_grid
symbolic_y_1 = y.shape[1] // symbolic_grid
def x_ref_block_spec_mapping(i, j):
return (i, 0)
def y_ref_block_spec_mapping(i, j):
return (0, j)
def sym_matmul_kernel(x_ref, y_ref, z_ref):
z_ref[...] = x_ref[...] @ y_ref[...]
return pl.pallas_call(
sym_matmul_kernel,
out_shape=jax.ShapeDtypeStruct((symbolic_x_0, symbolic_y_1), x.dtype),
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
in_specs=[
pl.BlockSpec(
(symbolic_x_0, x.shape[1]), x_ref_block_spec_mapping
),
pl.BlockSpec(
(y.shape[0], symbolic_y_1),
y_ref_block_spec_mapping,
),
],
out_specs=pl.BlockSpec(
(symbolic_x_0, symbolic_y_1),
lambda i, j: (i, j),
),
grid=(symbolic_grid, symbolic_grid),
),
)(x, y)
a, b, c, d, e = jax.export.symbolic_shape(
"m_dim, k_dim, n_dim, grid_size, unused_dim",
constraints=(
"mod(floordiv(m_dim, grid_size), 8) == 0",
"mod(k_dim, 128) == 0",
"mod(floordiv(n_dim, grid_size), 128) == 0",
),
)
x = jax.ShapeDtypeStruct((a, b), jax.numpy.float32)
y = jax.ShapeDtypeStruct((b, c), jax.numpy.float32)
dummy_d = jax.ShapeDtypeStruct((d, e), jax.numpy.float32)
exported_module = pl.lower_as_mlir(
jax.jit(sym_matmul), x, y, dummy_d, dynamic_shapes=True
)
assert exported_module is not None
self.assertIn(
"%arg0: tensor<?x?xf32> loc(unknown), %arg1: tensor<?x?xf32>"
" loc(unknown), %arg2: tensor<?x?xf32>",
str(exported_module),
)
x = jax.ShapeDtypeStruct((128, 1024), jax.numpy.float32)
y = jax.ShapeDtypeStruct((1024, 512), jax.numpy.float32)
dummy_d = jax.ShapeDtypeStruct((1, 1), jax.numpy.float32)
exported_module = pl.lower_as_mlir(
jax.jit(sym_matmul), x, y, dummy_d, dynamic_shapes=False
)
assert exported_module is not None
self.assertIn(
"call @sym_matmul(%arg0, %arg1)",
str(exported_module),
)
| SymbolicPallasTest |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_integration.py | {
"start": 145,
"end": 2082
} | class ____:
"""Integration tests for the main CLI entry point."""
def setup_method(self):
"""Set up test fixtures."""
self.runner = CliRunner()
def test_main_help_command(self):
"""Test that main CLI help works."""
result = self.runner.invoke(main, ["--help"])
assert result.exit_code == 0
assert "Dagster documentation tools" in result.output
assert "ls" in result.output
assert "check" in result.output
assert "watch" in result.output
def test_main_ls_symbols_integration(self):
"""Test complete ls symbols command through main CLI."""
result = self.runner.invoke(main, ["ls", "symbols", "--package", "dagster"])
assert result.exit_code == 0
assert "dagster.Component" in result.output
def test_main_check_docstrings_symbol_integration(self):
"""Test complete check docstrings command through main CLI."""
result = self.runner.invoke(main, ["check", "docstrings", "--symbol", "dagster.asset"])
assert result.exit_code == 0
assert "Validating docstring for: dagster.asset" in result.output
def test_main_check_docstrings_package_integration(self):
"""Test complete check docstrings package command through main CLI."""
result = self.runner.invoke(
main, ["check", "docstrings", "--package", "automation.dagster_docs"]
)
assert result.exit_code == 0
assert "Validating" in result.output
assert "public symbols in automation.dagster_docs" in result.output
def test_main_invalid_command_fails(self):
"""Test that invalid commands fail gracefully."""
result = self.runner.invoke(main, ["invalid-command"])
assert result.exit_code == 2 # Click's standard exit code for usage errors
assert "No such command" in result.output or "Usage:" in result.output
| TestMainCLIIntegration |
python | huggingface__transformers | src/transformers/data/processors/utils.py | {
"start": 1758,
"end": 2793
} | class ____:
"""
A single set of features of data. Property names are the same names as the corresponding inputs to a model.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
tokens.
token_type_ids: (Optional) Segment token indices to indicate first and second
portions of the inputs. Only some models use them.
label: (Optional) Label corresponding to the input. Int for classification problems,
float for regression problems.
"""
input_ids: list[int]
attention_mask: list[int] | None = None
token_type_ids: list[int] | None = None
label: int | float | None = None
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(dataclasses.asdict(self)) + "\n"
| InputFeatures |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/connectors/test_callback_connector.py | {
"start": 12336,
"end": 12489
} | class ____(Callback):
def state_dict(self):
return {"state": 1}
# Test with multiple stateful callbacks with unique state keys
| StatefulCallback |
python | huggingface__transformers | src/transformers/models/canine/modeling_canine.py | {
"start": 23121,
"end": 23701
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
| CanineIntermediate |
python | PrefectHQ__prefect | tests/test_futures.py | {
"start": 10442,
"end": 12168
} | class ____:
def test_resolve_futures_to_results_with_no_futures(self):
expr = [1, 2, 3]
result = resolve_futures_to_results(expr)
assert result == [1, 2, 3]
@pytest.mark.parametrize("_type", [list, tuple, set])
def test_resolve_futures_transforms_future_in_listlike_type(self, _type):
future = MockFuture(data="foo")
result = resolve_futures_to_results(_type(["a", future, "b"]))
assert result == _type(["a", "foo", "b"])
@pytest.mark.parametrize("_type", [dict, OrderedDict])
def test_resolve_futures_transforms_future_in_dictlike_type(self, _type):
key_future = MockFuture(data="foo")
value_future = MockFuture(data="bar")
result = resolve_futures_to_results(
_type([("a", 1), (key_future, value_future), ("b", 2)])
)
assert result == _type([("a", 1), ("foo", "bar"), ("b", 2)])
def test_resolve_futures_transforms_future_in_dataclass(self):
@dataclass
class Foo:
a: int
foo: str
b: int = 2
future = MockFuture(data="bar")
assert resolve_futures_to_results(Foo(a=1, foo=future)) == Foo(
a=1, foo="bar", b=2
)
def test_resolves_futures_in_nested_collections(self):
@dataclass
class Foo:
foo: str
nested_list: list
nested_dict: dict
future = MockFuture(data="bar")
assert resolve_futures_to_results(
Foo(foo=future, nested_list=[[future]], nested_dict={"key": [future]})
) == Foo(
foo="bar",
nested_list=[["bar"]],
nested_dict={"key": ["bar"]},
)
| TestResolveFuturesToResults |
python | pydantic__pydantic | pydantic/v1/env_settings.py | {
"start": 6076,
"end": 11232
} | class ____:
__slots__ = ('env_file', 'env_file_encoding', 'env_nested_delimiter', 'env_prefix_len')
def __init__(
self,
env_file: Optional[DotenvType],
env_file_encoding: Optional[str],
env_nested_delimiter: Optional[str] = None,
env_prefix_len: int = 0,
):
self.env_file: Optional[DotenvType] = env_file
self.env_file_encoding: Optional[str] = env_file_encoding
self.env_nested_delimiter: Optional[str] = env_nested_delimiter
self.env_prefix_len: int = env_prefix_len
def __call__(self, settings: BaseSettings) -> Dict[str, Any]: # noqa C901
"""
Build environment variables suitable for passing to the Model.
"""
d: Dict[str, Any] = {}
if settings.__config__.case_sensitive:
env_vars: Mapping[str, Optional[str]] = os.environ
else:
env_vars = {k.lower(): v for k, v in os.environ.items()}
dotenv_vars = self._read_env_files(settings.__config__.case_sensitive)
if dotenv_vars:
env_vars = {**dotenv_vars, **env_vars}
for field in settings.__fields__.values():
env_val: Optional[str] = None
for env_name in field.field_info.extra['env_names']:
env_val = env_vars.get(env_name)
if env_val is not None:
break
is_complex, allow_parse_failure = self.field_is_complex(field)
if is_complex:
if env_val is None:
# field is complex but no value found so far, try explode_env_vars
env_val_built = self.explode_env_vars(field, env_vars)
if env_val_built:
d[field.alias] = env_val_built
else:
# field is complex and there's a value, decode that as JSON, then add explode_env_vars
try:
env_val = settings.__config__.parse_env_var(field.name, env_val)
except ValueError as e:
if not allow_parse_failure:
raise SettingsError(f'error parsing env var "{env_name}"') from e
if isinstance(env_val, dict):
d[field.alias] = deep_update(env_val, self.explode_env_vars(field, env_vars))
else:
d[field.alias] = env_val
elif env_val is not None:
# simplest case, field is not complex, we only need to add the value if it was found
d[field.alias] = env_val
return d
def _read_env_files(self, case_sensitive: bool) -> Dict[str, Optional[str]]:
env_files = self.env_file
if env_files is None:
return {}
if isinstance(env_files, (str, os.PathLike)):
env_files = [env_files]
dotenv_vars = {}
for env_file in env_files:
env_path = Path(env_file).expanduser()
if env_path.is_file():
dotenv_vars.update(
read_env_file(env_path, encoding=self.env_file_encoding, case_sensitive=case_sensitive)
)
return dotenv_vars
def field_is_complex(self, field: ModelField) -> Tuple[bool, bool]:
"""
Find out if a field is complex, and if so whether JSON errors should be ignored
"""
if lenient_issubclass(field.annotation, JsonWrapper):
return False, False
if field.is_complex():
allow_parse_failure = False
elif is_union(get_origin(field.type_)) and field.sub_fields and any(f.is_complex() for f in field.sub_fields):
allow_parse_failure = True
else:
return False, False
return True, allow_parse_failure
def explode_env_vars(self, field: ModelField, env_vars: Mapping[str, Optional[str]]) -> Dict[str, Any]:
"""
Process env_vars and extract the values of keys containing env_nested_delimiter into nested dictionaries.
This is applied to a single field, hence filtering by env_var prefix.
"""
prefixes = [f'{env_name}{self.env_nested_delimiter}' for env_name in field.field_info.extra['env_names']]
result: Dict[str, Any] = {}
for env_name, env_val in env_vars.items():
if not any(env_name.startswith(prefix) for prefix in prefixes):
continue
# we remove the prefix before splitting in case the prefix has characters in common with the delimiter
env_name_without_prefix = env_name[self.env_prefix_len :]
_, *keys, last_key = env_name_without_prefix.split(self.env_nested_delimiter)
env_var = result
for key in keys:
env_var = env_var.setdefault(key, {})
env_var[last_key] = env_val
return result
def __repr__(self) -> str:
return (
f'EnvSettingsSource(env_file={self.env_file!r}, env_file_encoding={self.env_file_encoding!r}, '
f'env_nested_delimiter={self.env_nested_delimiter!r})'
)
| EnvSettingsSource |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_sheet_views7.py | {
"start": 301,
"end": 4197
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_sheet_views() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_views1(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.set_selection("A2")
self.worksheet.freeze_panes(1, 0, 20, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="1" topLeftCell="A21" activePane="bottomLeft" state="frozen"/><selection pane="bottomLeft" activeCell="A2" sqref="A2"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views2(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.set_selection("A1")
self.worksheet.freeze_panes(1, 0, 20, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="1" topLeftCell="A21" activePane="bottomLeft" state="frozen"/><selection pane="bottomLeft"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views3(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.set_selection("B1")
self.worksheet.freeze_panes(0, 1, 0, 4)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" topLeftCell="E1" activePane="topRight" state="frozen"/><selection pane="topRight" activeCell="B1" sqref="B1"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views4(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(0, 1, 0, 4)
self.worksheet.set_selection("A1")
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" topLeftCell="E1" activePane="topRight" state="frozen"/><selection pane="topRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views5(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.set_selection("G4")
self.worksheet.freeze_panes(3, 6, 6, 8)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="I7" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight" activeCell="G4" sqref="G4"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_sheet_views6(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.set_selection("A1")
self.worksheet.freeze_panes(3, 6, 6, 8)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="I7" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteSheetViews |
python | ZoranPandovski__al-go-rithms | data_structures/Linked_list/Python/lst_el_to_frnt.py | {
"start": 126,
"end": 1441
} | class ____:
def __init__(self):
self.head = None
# Function to add a node
# at the beginning of Linked List
def push(self, data):
new_node = Node(data)
new_node.next = self.head
self.head = new_node
# Function to print nodes in a
# given linked list
def printList(self):
tmp = self.head
while tmp is not None:
print(tmp.data, end=", ")
tmp = tmp.next
print()
# Function to bring the last node to the front
def moveToFront(self):
tmp = self.head
sec_last = None # To maintain the track of
# the second last node
# To check whether we have not received
# the empty list or list with a single node
if not tmp or not tmp.next:
return
# Iterate till the end to get
# the last and second last node
while tmp and tmp.next:
sec_last = tmp
tmp = tmp.next
# point the next of the second
# last node to None
sec_last.next = None
# Make the last node as the first Node
tmp.next = self.head
self.head = tmp
# Driver's Code
if __name__ == '__main__':
llist = LinkedList()
# swap the 2 nodes
llist.push(5)
llist.push(4)
llist.push(3)
llist.push(2)
llist.push(1)
print("Linked List before moving last to front ")
llist.printList()
# Function call
llist.moveToFront()
print("Linked List after moving last to front ")
llist.printList()
| LinkedList |
python | kennethreitz__tablib | src/tablib/packages/dbfpy/fields.py | {
"start": 7225,
"end": 8636
} | class ____(DbfFieldDef):
"""Definition of the numeric field."""
typeCode = "N"
# XXX: now I'm not sure it was a good idea to make a class field
# `defaultValue` instead of a generic method as it was implemented
# previously -- it's ok with all types except number, cuz
# if self.decimalCount is 0, we should return 0 and 0.0 otherwise.
defaultValue = 0
def decodeValue(self, value):
"""Return a number decoded from ``value``.
If decimals is zero, value will be decoded as an integer;
or as a float otherwise.
Return:
Return value is a int (long) or float instance.
"""
value = value.strip(b' \0')
if b'.' in value:
# a float (has decimal separator)
return float(value)
elif value:
# must be an integer
return int(value)
else:
return 0
def encodeValue(self, value):
"""Return string containing encoded ``value``."""
_rv = ("%*.*f" % (self.length, self.decimalCount, value))
if len(_rv) > self.length:
_ppos = _rv.find(".")
if 0 <= _ppos <= self.length:
_rv = _rv[:self.length]
else:
raise ValueError("[%s] Numeric overflow: %s (field width: %i)"
% (self.name, _rv, self.length))
return _rv
| DbfNumericFieldDef |
python | jazzband__django-model-utils | tests/models.py | {
"start": 967,
"end": 1728
} | class ____(models.Model):
# FileField is just a handy descriptor-using field. Refs #6.
non_related_field_using_descriptor = models.FileField(upload_to="test")
related = models.ForeignKey(
InheritanceManagerTestRelated, related_name="imtests", null=True,
on_delete=models.CASCADE)
normal_field = models.TextField()
related_self = models.OneToOneField(
"self", related_name="imtests_self", null=True,
on_delete=models.CASCADE)
objects: ClassVar[InheritanceManager[InheritanceManagerTestParent]] = InheritanceManager()
def __str__(self) -> str:
return "{}({})".format(
self.__class__.__name__[len('InheritanceManagerTest'):],
self.pk,
)
| InheritanceManagerTestParent |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 1778,
"end": 2303
} | class ____(
WeaviatePermissionRequired, total=False
): # Add total=False to make all fields optional
data: Optional[PermissionData]
collections: Optional[PermissionCollections]
nodes: Optional[PermissionNodes]
backups: Optional[PermissionBackup]
replicate: Optional[PermissionReplicate]
roles: Optional[PermissionRoles]
tenants: Optional[PermissionsTenants]
users: Optional[PermissionsUsers]
aliases: Optional[PermissionsAlias]
groups: Optional[PermissionsGroups]
| WeaviatePermission |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 6811,
"end": 6954
} | class ____(PydanticTypeError):
msg_template = 'ensure this value contains valid import path or valid callable: {error_message}'
| PyObjectError |
python | tensorflow__tensorflow | tensorflow/python/saved_model/load_test.py | {
"start": 124018,
"end": 125292
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
def test_no_oom_loading_large_tenor(self, use_cpp_bindings):
# TODO(b/264882686) Fix DeferredInitModuleVariablesTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
if not config.get_soft_device_placement():
self.skipTest("This test only works for soft device placement is on")
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
ncols = 16
nrows = 32
model = _TestModel(rows=nrows, cols=ncols)
x = array_ops.zeros(shape=(ncols, 2), dtype=dtypes.float32)
y = model(x)
save.save(
model,
save_dir,
options=save_options.SaveOptions(
experimental_variable_policy=save_options.VariablePolicy.SAVE_VARIABLE_DEVICES
),
)
loaded_on_cpu = test_load(
path=save_dir,
options=load_options.LoadOptions(
experimental_variable_policy=save_options.VariablePolicy.SAVE_VARIABLE_DEVICES
),
use_cpp_bindings=use_cpp_bindings,
)
loaded_on_gpu = test_load(save_dir)
self.assertIn("CPU", loaded_on_cpu.table.device)
self.assertIn("GPU", loaded_on_gpu.table.device)
if __name__ == "__main__":
test.main()
| SavedModelLoadMemoryTests |
python | pytorch__pytorch | test/quantization/core/test_quantized_op.py | {
"start": 250138,
"end": 365388
} | class ____(TestCase):
def _test_qconv_unpack_impl(self, qconv_prepack_fn, qconv_unpack_fn, inputs,
strides, i_pads, o_pads, channelwise):
(X_data, W_data, bias_data, groups, transposed) = inputs
(X, (X_scale, X_zero_point, X_qtype)) = X_data
(W, (W_scale, W_zero_point, W_qtype)) = W_data
(bias, (bias_scale, bias_zero_point, bias_qtype)) = bias_data
W = torch.from_numpy(W).float()
bias = torch.from_numpy(bias).float()
if channelwise and transposed:
# currently transposed conv and per-channel per quantization does not work
return
# ONEDNN only supports symmetric quantization of weight and zero output padding
if qengine_is_onednn():
W_zero_point = 0
o_pads = len(o_pads) * [0] if o_pads is not None else None
if channelwise:
if transposed:
output_channels = W.shape[1] # IC OC/G
else:
output_channels = W.shape[0] # OC IC/G
W_scale = torch.tensor([W_scale] * output_channels)
W_zero_point = torch.tensor([W_zero_point] * output_channels)
W_q = torch.quantize_per_channel(
W, scales=W_scale, zero_points=W_zero_point,
axis=int(transposed), dtype=W_qtype)
else:
W_q = torch.quantize_per_tensor(
W, scale=W_scale, zero_point=W_zero_point, dtype=W_qtype)
if isinstance(strides, int):
dilations = [1]
else:
dilations = (1,) * len(strides)
if transposed:
W_packed = qconv_prepack_fn(W_q, bias, strides, i_pads, o_pads,
dilations, groups)
else:
W_packed = qconv_prepack_fn(W_q, bias, strides, i_pads, dilations,
groups)
(W_unpacked, bias) = qconv_unpack_fn(W_packed)
# Assert equal
np.testing.assert_equal(W_q.int_repr().numpy(),
W_unpacked.int_repr().numpy())
if channelwise:
np.testing.assert_array_almost_equal(
np.float32(W_q.q_per_channel_scales().numpy()),
np.float32(W_unpacked.q_per_channel_scales().numpy()),
decimal=4)
np.testing.assert_equal(W_q.q_per_channel_zero_points(
).numpy(), W_unpacked.q_per_channel_zero_points().numpy())
else:
np.testing.assert_equal(np.float32(
W_q.q_scale()), np.float32(W_unpacked.q_scale()))
np.testing.assert_equal(
W_q.q_zero_point(), W_unpacked.q_zero_point())
def _make_qconv_tensors(
self, batch_size, input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels, strides, pads, dilations,
X_scale, X_zero_point, W_scale, W_zero_point,
use_bias, use_channelwise, use_transpose,
device=torch.device("cpu"),
input_dtype=torch.quint8,
weight_dtype=torch.qint8,
):
assert not (use_channelwise and use_transpose), \
"Cannot generate channelwise qconv_transpose_tensors "
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
# Padded input size should be at least as big as dilated kernel
kernels = _single(kernels)
strides = _single(strides)
pads = _single(pads)
dilations = _single(dilations)
for i in range(len(kernels)):
assume(input_feature_map_shape[i] + 2 * pads[i]
>= dilations[i] * (kernels[i] - 1) + 1)
W_scale = W_scale * output_channels
W_zero_point = W_zero_point * output_channels
# Resize W_scale and W_zero_points arrays equal to output_channels
W_scale = W_scale[:output_channels]
W_zero_point = W_zero_point[:output_channels]
# For testing, we use small values for weights and for activations
# so that no overflow occurs in vpmaddubsw instruction. If the
# overflow occurs in qconv implementation and if there is no
# overflow
# In reference we can't exactly match the results with reference.
# Please see the comment in qconv implementation file
# aten/src/ATen/native/quantized/cpu/qconv.cpp for more details.
(W_value_min, W_value_max) = (-5, 5)
# the operator expects them in the format
# (output_channels, input_channels/groups, kernel_d, kernel_h, kernel_w)
# (input_channels, output_channels/groups, kernel_d, kernel_h, kernel_w)
if use_transpose:
output_shape = (input_channels, output_channels_per_group,)
else:
output_shape = (output_channels, input_channels_per_group,)
W_init = torch.randint(
W_value_min,
W_value_max,
output_shape + kernels,
device=device,
)
b_init = torch.randint(0, 10, (output_channels,), device=device)
(X_value_min, X_value_max) = (0, 4)
X_init = torch.randint(
X_value_min,
X_value_max,
(batch_size, input_channels,) + input_feature_map_shape,
device=device
)
X = X_scale * (X_init - X_zero_point).float()
if use_channelwise:
W_shape = (-1, 1) + (1,) * len(kernels)
W_scales_tensor = torch.tensor(W_scale, dtype=torch.float, device=device)
W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float, device=device)
W = W_scales_tensor.reshape(*W_shape) * (
W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float()
b = X_scale * W_scales_tensor * b_init.float()
else:
W = W_scale[0] * (W_init - W_zero_point[0]).float()
b = X_scale * W_scale[0] * b_init.float()
X_q = torch.quantize_per_tensor(
X, scale=X_scale, zero_point=X_zero_point, dtype=input_dtype)
if use_channelwise:
W_q = torch.quantize_per_channel(
W, W_scales_tensor, W_zero_points_tensor.long(), 0,
dtype=weight_dtype)
else:
W_q = torch.quantize_per_tensor(
W, scale=W_scale[0], zero_point=W_zero_point[0],
dtype=weight_dtype)
bias_float = b if use_bias else None
return (X, W), (X_q, W_q), bias_float
def _test_qconv_impl(
self, qconv_fn, qconv_prepack_fn, conv_op, batch_size,
input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels, strides, pads, o_pads,
dilations, X_scale, X_zero_point, W_scale, W_zero_point, Y_scale,
Y_zero_point, use_bias, post_op, use_channelwise, use_transpose,
device=torch.device("cpu"),
input_dtype=torch.quint8,
weight_dtype=torch.qint8,
output_dtype=torch.quint8,
X2_scale=1.0,
X2_zero_point=128
):
# ONEDNN only supports symmetric quantization of weight
if qengine_is_onednn() and W_zero_point is not None:
W_zero_point = len(W_zero_point) * [0]
(X, W), (X_q, W_q), bias_float = self._make_qconv_tensors(
batch_size, input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels,
strides, pads, dilations, X_scale, X_zero_point, W_scale,
W_zero_point, use_bias, use_channelwise, use_transpose,
device=device, input_dtype=input_dtype, weight_dtype=weight_dtype)
if bias_float is not None:
bias_float = bias_float.to(device)
# Assign weights
W = W_q.dequantize()
X = X_q.dequantize()
conv_op.weight = torch.nn.Parameter(W, requires_grad=False)
conv_op.bias = torch.nn.Parameter(
bias_float, requires_grad=False) if use_bias else None
result_ref = conv_op(X)
if post_op == 'relu':
assert not use_transpose, "Cannot fuse ReLU with ConvTranspose"
relu = torch.nn.ReLU()
result_ref = relu(result_ref)
elif post_op == 'add':
(X_value_min, X_value_max) = (0, 4)
X2_init = torch.randint(
X_value_min,
X_value_max,
result_ref.size(),
device=device
)
X2 = X2_scale * (X2_init - X2_zero_point).float()
X2_q = torch.quantize_per_tensor(
X2, scale=X2_scale, zero_point=X2_zero_point, dtype=input_dtype)
result_ref = result_ref + X2
elif post_op == 'add_relu':
(X_value_min, X_value_max) = (0, 4)
X2_init = torch.randint(
X_value_min,
X_value_max,
result_ref.size(),
device=device
)
X2 = X2_scale * (X2_init - X2_zero_point).float()
X2_q = torch.quantize_per_tensor(
X2, scale=X2_scale, zero_point=X2_zero_point, dtype=input_dtype)
result_ref = result_ref + X2
relu = torch.nn.ReLU()
result_ref = relu(result_ref)
# Quantize reference results for comparison
result_ref_q = torch.quantize_per_tensor(
result_ref, scale=Y_scale, zero_point=Y_zero_point,
dtype=output_dtype)
if qconv_prepack_fn is not None:
if use_transpose:
W_prepack = qconv_prepack_fn(
W_q, bias_float, strides, pads, o_pads, dilations, groups)
else:
W_prepack = qconv_prepack_fn(
W_q, bias_float, strides, pads, dilations, groups)
if post_op == 'add' or post_op == 'add_relu':
Y_q = qconv_fn(
X_q,
X2_q,
W_prepack,
Y_scale,
Y_zero_point,
)
else:
Y_q = qconv_fn(
X_q,
W_prepack,
Y_scale,
Y_zero_point,
)
else:
# quantized conv op without prepacking
Y_q = qconv_fn(X_q, W_q, bias_float, strides, pads, dilations, groups, Y_scale, Y_zero_point)
# Make sure the results match
# assert_array_almost_equal compares using the following formula:
# abs(desired-actual) < 1.5 * 10**(-decimal)
# (https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_almost_equal.html)
# We use decimal = 0 to ignore off-by-1 differences between
# reference and test. Off-by-1 differences arise due to the order of
# round and zero_point addition operation, i.e., if addition
# followed by round is used by reference and round followed by
# addition is used by test, the results may differ by 1.
# For example, the result of round(2.5) + 1 is 3 while
# round(2.5 + 1) is 4 assuming the rounding mode is
# round-to-nearest, ties-to-even.
np.testing.assert_array_almost_equal(
result_ref_q.int_repr().cpu().numpy(), Y_q.int_repr().cpu().numpy(), decimal=0,
err_msg=f'''X: {X_q}, W: {W_q}, b: {bias_float}, strides: {strides},
pads: {pads}, o_pads: {o_pads}, dilations: {dilations},
groups: {groups}, y_s: {Y_scale}, y_zp: {Y_zero_point}''')
# Return the quantized data for later reuse
return X_q, W_q, bias_float
"""Tests the correctness of quantized convolution op."""
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(10, 16),
width=st.integers(7, 14),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 300),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans())
@override_qengines
def test_qconv2d(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv2d
qconv_prepack = torch.ops.quantized.conv2d_prepack
conv_op = torch.nn.Conv2d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supports qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, None,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "none", use_channelwise, False, input_dtype=X_qdtype, output_dtype=X_qdtype)
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(10, 16),
width=st.integers(7, 14),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 300),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans())
@override_qengines
def test_qconv2d_relu(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv2d_relu
qconv_prepack = torch.ops.quantized.conv2d_prepack
conv_op = torch.nn.Conv2d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supports qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, None,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "relu", use_channelwise, False, input_dtype=X_qdtype, output_dtype=X_qdtype)
@skipIfNoONEDNN
def test_qconv2d_add(self):
batch_size = 3
groups_list = [1, 10]
input_channels_per_group = 2
output_channels_per_group = 2
height = 10
width = 10
kernel_h = 3
kernel_w = 3
stride_h = 2
stride_w = 2
pad_h = 1
pad_w = 1
dilation = 1
X_scale = 1.5
X_zero_point = 2
W_scale = [1.5]
W_zero_point = [-3]
Y_scale = 4.2
Y_zero_point = 0
use_bias_list = [False, True]
use_channelwise_list = [False, True]
X2_scale = 1.2
X2_zero_point_list = [0, 4]
options = itertools.product(groups_list, use_bias_list, use_channelwise_list, X2_zero_point_list)
for groups, use_bias, use_channelwise, X2_zero_point in options:
with override_quantized_engine('onednn'):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv2d_add
qconv_prepack = torch.ops.quantized.conv2d_prepack
conv_op = torch.nn.Conv2d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
X_qdtype = torch.quint8
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, None,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "add", use_channelwise, False,
input_dtype=X_qdtype, output_dtype=X_qdtype, X2_scale=X2_scale, X2_zero_point=X2_zero_point)
@skipIfNoONEDNN
def test_qconv2d_add_relu(self):
batch_size = 3
height = 10
width = 10
groups_list = [1, 10]
input_channels_per_group = 2
output_channels_per_group = 2
kernel_h = 3
kernel_w = 3
stride_h = 2
stride_w = 2
pad_h = 1
pad_w = 1
dilation = 1
X_scale = 1.5
X_zero_point = 2
W_scale = [1.5]
W_zero_point = [-3]
Y_scale = 4.2
Y_zero_point = 0
use_bias_list = [False, True]
use_channelwise_list = [False, True]
X2_scale = 1.2
X2_zero_point_list = [0, 4]
options = itertools.product(groups_list, use_bias_list, use_channelwise_list, X2_zero_point_list)
for groups, use_bias, use_channelwise, X2_zero_point in options:
with override_quantized_engine('onednn'):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv2d_add_relu
qconv_prepack = torch.ops.quantized.conv2d_prepack
conv_op = torch.nn.Conv2d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
X_qdtype = torch.quint8
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, None,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "add_relu", use_channelwise, False,
input_dtype=X_qdtype, output_dtype=X_qdtype, X2_scale=X2_scale, X2_zero_point=X2_zero_point)
# TODO: merge this test with test_qconv2d when CUDNN runtime flags becomes available
"""Tests the correctness of quantized 2D convolution cudnn op."""
@given(batch_size=st.integers(1, 3),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
input_channels_per_group=st.integers(1, 32),
height=st.integers(10, 16),
width=st.integers(7, 14),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 1), # currently padding only supports groups=1
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
# result for dilation == 2 is not correct
# dilation=st.integers(1, 2),
# currently cudnn has only been verified to work for dilation = 1
# TODO: check backend works for dilation > 1
dilation=st.integers(1, 1),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.sampled_from([0]),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(0, 0), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.sampled_from([0]),
use_bias=st.booleans(),
# TODO: enable channelwise
use_channelwise=st.sampled_from([False]))
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skipIf(not SM80OrLater, "requires sm80 or later.")
@unittest.skipIf(TEST_ROCM, "not supported on rocm.")
@unittest.skip("not currently working and feature isn't used")
def test_qconv2d_cudnn(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv2d
conv_op = torch.nn.Conv2d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
).to(torch.device("cuda"))
self._test_qconv_impl(
qconv, torch.ops.quantized.conv2d_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, None,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "none", use_channelwise, False,
device=torch.device("cuda"),
input_dtype=torch.qint8, weight_dtype=torch.qint8, output_dtype=torch.qint8)
@given(batch_size=st.integers(1, 3),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
input_channels_per_group=st.integers(1, 32),
height=st.integers(10, 16),
width=st.integers(7, 14),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 1), # currently padding only supports groups=1
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
# result for dilation == 2 is not correct
# dilation=st.integers(1, 2),
# currently cudnn has only been verified to work for dilation = 1
# TODO: check backend works for dilation > 1
dilation=st.integers(1, 1),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.sampled_from([0]),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(0, 0), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.sampled_from([0]),
use_bias=st.booleans(),
# TODO: enable channelwise
use_channelwise=st.sampled_from([False]))
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skipIf(not SM80OrLater, "requires sm80 or later.")
@unittest.skipIf(TEST_ROCM, "not supported on rocm.")
@unittest.skip("not currently working and feature isn't used")
def test_qconv2d_relu_cudnn(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv2d_relu
conv_op = torch.nn.Conv2d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
).to(torch.device("cuda"))
self._test_qconv_impl(
qconv, torch.ops.quantized.conv2d_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, None,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "relu", use_channelwise, False,
device=torch.device("cuda"),
input_dtype=torch.qint8, weight_dtype=torch.qint8, output_dtype=torch.qint8)
@unittest.skip("used for local benchmarking, comment when we want to run it")
def test_benchmark(self):
batch_size = 16
in_channel = 64
out_channel = 64
kernel_size = 3
height = 256
width = 256
print(
"parameters:",
"batch_size:", batch_size,
"in_channel:", in_channel,
"out_channel:", out_channel,
"kernel_size:", kernel_size,
"height:", height,
"width:", width
)
conv = torch.nn.Conv2d(in_channel, out_channel, kernel_size).cuda()
input = torch.randn((batch_size, in_channel, height, width), device='cuda')
weight = conv.weight.detach()
stride = (1, 1)
padding = (0, 0)
dilation = (1, 1)
groups = 1
conv_op = torch.nn.functional.conv2d
# profile
from torch.profiler import profile, ProfilerActivity
def trace_handler(p):
output = p.key_averages().table(sort_by="self_cpu_time_total", row_limit=10)
p.export_chrome_trace("/tmp/trace_" + str(p.step_num) + ".json")
my_schedule = torch.profiler.schedule(
wait=5,
warmup=5,
active=20)
# fp32 benchmark
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
schedule=my_schedule,
on_trace_ready=trace_handler) as prof:
for _ in range(30):
conv_op(input, weight, None, stride, padding, dilation, groups)
prof.step()
print("fp32 benchmark result:")
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10))
# fp16 benchmark
input_fp16 = input.to(torch.float16)
weight_fp16 = input.to(torch.float16)
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
schedule=my_schedule,
on_trace_ready=trace_handler) as prof:
for _ in range(30):
conv_op(input_fp16, weight_fp16, None, stride, padding, dilation, groups)
prof.step()
print("fp16 benchmark result:")
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10))
input_int8 = torch.quantize_per_tensor(input, 1, 0, torch.qint8).contiguous(memory_format=torch.channels_last)
weight_int8 = torch.quantize_per_tensor(weight, 1, 0, torch.qint8).contiguous(memory_format=torch.channels_last)
scale = 1.0
zero_point = 0
conv_op = torch.ops.quantized.conv2d
weight_prepacked = torch.ops.quantized.conv2d_prepack(weight_int8, None, stride, padding, dilation, groups)
with profile(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
schedule=my_schedule,
on_trace_ready=trace_handler) as prof:
for _ in range(30):
conv_op(input_int8, weight_prepacked, scale, zero_point)
prof.step()
print("int8 benchmark result:")
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10))
"""Tests the correctness of quantized convolution op."""
@override_qengines
def test_qconv_transpose1d(self):
if not qengine_is_qnnpack():
return # Currently only the QNNPACK is supported
if qengine_is_qnnpack() and IS_PPC:
return # QNNPACK doesn't support these
batch_size = 2
input_channels_per_group_list = [2, 32]
width = 14
output_channels_per_group_list = [2, 8]
groups_list = [1, 3]
kernel_list = [1, 7]
stride_list = [1, 2]
pad = 2
o_pad = 0
dilation = 1
X_scale = 1.2
X_zero_point = 1
W_scale = [1.2]
W_zero_point = [1]
Y_scale = 4.2
Y_zero_point = 2
use_bias_list = [True, False]
test_cases = itertools.product(
input_channels_per_group_list, output_channels_per_group_list,
groups_list, kernel_list, stride_list, use_bias_list)
for input_channels_per_group, output_channels_per_group, \
groups, kernel, stride, use_bias in test_cases:
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel,)
strides = (stride,)
pads = (pad,)
o_pads = (o_pad,)
dilations = (dilation,)
qconv = torch.ops.quantized.conv_transpose1d
qconv_prepack = torch.ops.quantized.conv_transpose1d_prepack
conv_op = torch.nn.ConvTranspose1d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supports qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
X_q, W_q, bias_float = self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (width, ),
output_channels_per_group, groups, kernels, strides, pads, o_pads,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, post_op="none",
use_channelwise=False, use_transpose=True, input_dtype=X_qdtype, output_dtype=X_qdtype)
# check that this doesn't error
test_conv = torch.ao.nn.quantized.ConvTranspose1d(input_channels, output_channels, 1)
test_conv.scale = Y_scale
test_conv(X_q)
# Test the module implementation
qconv_op = torch.ao.nn.quantized.ConvTranspose1d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
qconv_op.scale = Y_scale
qconv_op.zero_point = Y_zero_point
qconv_op.set_weight_bias(W_q, bias_float)
Y_dq_ref = conv_op(X_q.dequantize())
Y_q_ref = torch.quantize_per_tensor(Y_dq_ref, scale=Y_scale,
zero_point=Y_zero_point,
dtype=X_qdtype)
Y_q = qconv_op(X_q)
self.assertEqual(Y_q_ref, Y_q)
"""Tests the correctness of quantized convolution op."""
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(10, 16),
width=st.integers(7, 14),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 300),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
o_pad_h=st.integers(0, 2),
o_pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans())
@override_qengines
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_qconv_transpose2d(
self,
batch_size,
input_channels_per_group,
height,
width,
output_channels_per_group,
groups,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
o_pad_h,
o_pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias):
if qengine_is_qnnpack() and IS_PPC:
return # QNNPACK doesn't support these
# ONEDNN does not support output paddings
if qengine_is_onednn() and (o_pad_h, o_pad_w) != (0, 0):
return
assume(o_pad_h < stride_h and o_pad_h < dilation)
assume(o_pad_w < stride_w and o_pad_w < dilation)
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_h, kernel_w)
strides = (stride_h, stride_w)
pads = (pad_h, pad_w)
o_pads = (o_pad_h, o_pad_w)
dilations = (dilation, dilation)
qconv = torch.ops.quantized.conv_transpose2d
qconv_prepack = torch.ops.quantized.conv_transpose2d_prepack
conv_op = torch.nn.ConvTranspose2d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supports qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
X_q, W_q, bias_float = self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (height, width),
output_channels_per_group, groups, kernels, strides, pads, o_pads,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, post_op="none",
use_channelwise=False, use_transpose=True, input_dtype=X_qdtype, output_dtype=X_qdtype)
# check that this doesn't error
test_conv = torch.ao.nn.quantized.ConvTranspose2d(input_channels, output_channels, 1)
test_conv.scale = Y_scale
test_conv(X_q)
# Test the module implementation
qconv_op = torch.ao.nn.quantized.ConvTranspose2d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
qconv_op.scale = Y_scale
qconv_op.zero_point = Y_zero_point
qconv_op.set_weight_bias(W_q, bias_float)
Y_dq_ref = conv_op(X_q.dequantize())
Y_q_ref = torch.quantize_per_tensor(Y_dq_ref, scale=Y_scale,
zero_point=Y_zero_point,
dtype=X_qdtype)
Y_q = qconv_op(X_q)
self.assertEqual(Y_q_ref, Y_q)
"""Tests the correctness of quantized convolution op."""
@given(batch_size=st.integers(1, 3),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
time=st.integers(2, 5),
height=st.integers(10, 16),
width=st.integers(7, 14),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16, 32]),
groups=st.integers(1, 300),
kernel_t=st.integers(1, 7),
kernel_h=st.integers(1, 7),
kernel_w=st.integers(1, 7),
stride_t=st.integers(1, 2),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_t=st.integers(0, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
o_pad_t=st.integers(0, 2),
o_pad_h=st.integers(0, 2),
o_pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans())
@override_qengines
@unittest.skip(
"this is broken without changes to any relevant code, "
"we need to remove hypothesis testing in CI")
def test_qconv_transpose3d(
self,
batch_size,
input_channels_per_group,
time,
height,
width,
output_channels_per_group,
groups,
kernel_t,
kernel_h,
kernel_w,
stride_t,
stride_h,
stride_w,
pad_t,
pad_h,
pad_w,
o_pad_t,
o_pad_h,
o_pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias):
if qengine_is_qnnpack():
return # QNNPACK doesn't support this
# ONEDNN doesn't support output paddings
if qengine_is_onednn() and (o_pad_t, o_pad_h, o_pad_w) != (0, 0, 0):
return
assume(o_pad_t < stride_t or o_pad_t < dilation)
assume(o_pad_h < stride_h or o_pad_h < dilation)
assume(o_pad_w < stride_w or o_pad_w < dilation)
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_t, kernel_h, kernel_w)
strides = (stride_t, stride_h, stride_w)
pads = (pad_t, pad_h, pad_w)
o_pads = (o_pad_t, o_pad_h, o_pad_w)
dilations = (dilation, dilation, dilation)
qconv = torch.ops.quantized.conv_transpose3d
qconv_prepack = torch.ops.quantized.conv_transpose3d_prepack
conv_op = torch.nn.ConvTranspose3d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
X_q, W_q, bias_float = self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (time, height, width),
output_channels_per_group, groups, kernels, strides, pads, o_pads,
dilations, X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, post_op="none",
use_channelwise=False, use_transpose=True)
# check that this doesn't error
test_conv = torch.ao.nn.quantized.ConvTranspose3d(input_channels, output_channels, 1)
test_conv.scale = Y_scale
test_conv(X_q)
# Test the module implementation
qconv_op = torch.ao.nn.quantized.ConvTranspose3d(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=kernels,
stride=strides,
padding=pads,
output_padding=o_pads,
groups=groups,
dilation=dilations,
bias=use_bias
)
qconv_op.scale = Y_scale
qconv_op.zero_point = Y_zero_point
qconv_op.set_weight_bias(W_q, bias_float)
Y_dq_ref = conv_op(X_q.dequantize())
Y_q_ref = torch.quantize_per_tensor(Y_dq_ref, scale=Y_scale,
zero_point=Y_zero_point,
dtype=torch.quint8)
Y_q = qconv_op(X_q)
self.assertEqual(Y_q_ref, Y_q)
@given(
inputs=hu.tensor_conv(
spatial_dim=1, batch_size_range=(1, 3),
input_channels_per_group_range=(1, 4),
output_channels_per_group_range=(1, 4), feature_map_range=(4, 8),
kernel_range=(1, 4), max_groups=4,
can_be_transposed=False,
qparams=[hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint32,
zero_point_min=0,
zero_point_max=0)]),
stride=st.integers(1, 3),
pad=st.integers(1, 2),
o_pad=st.integers(1, 2),
channelwise=st.booleans())
@override_qengines
def test_qconv1d_unpack(self, inputs, stride, pad, o_pad, channelwise):
transposed = inputs[-1]
qengine = torch.backends.quantized.engine
if qengine not in supported_qengines:
return
if qengine == 'qnnpack':
assume(not channelwise) # QNNPACK doesn't support channelwise
else:
assume(not transposed) # Only QNNPACK supports transposed conv
if transposed:
qconv_prepack = torch.ops.quantized.conv_transpose1d_prepack
qconv_unpack = torch.ops.quantized.conv_transpose1d_unpack
else:
qconv_prepack = torch.ops.quantized.conv1d_prepack
qconv_unpack = torch.ops.quantized.conv1d_unpack
self._test_qconv_unpack_impl(
qconv_prepack, qconv_unpack, inputs, [stride],
[pad], [o_pad], channelwise)
@given(
inputs=hu.tensor_conv(
spatial_dim=2, batch_size_range=(1, 3),
input_channels_per_group_range=(1, 4),
output_channels_per_group_range=(1, 4), feature_map_range=(4, 8),
kernel_range=(1, 4), max_groups=4,
can_be_transposed=True,
qparams=[hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint32,
zero_point_min=0,
zero_point_max=0)]),
stride=st.integers(1, 3),
pad=st.integers(0, 2),
o_pad=st.integers(0, 2),
channelwise=st.booleans())
@override_qengines
def test_qconv2d_unpack(self, inputs, stride, pad, o_pad, channelwise):
transposed = inputs[-1]
qengine = torch.backends.quantized.engine
if qengine not in supported_qengines:
return
if qengine == 'qnnpack':
assume(not channelwise) # QNNPACK doesn't support channelwise
if transposed:
qconv_prepack = torch.ops.quantized.conv_transpose2d_prepack
qconv_unpack = torch.ops.quantized.conv_transpose2d_unpack
else:
qconv_prepack = torch.ops.quantized.conv2d_prepack
qconv_unpack = torch.ops.quantized.conv2d_unpack
self._test_qconv_unpack_impl(
qconv_prepack, qconv_unpack, inputs, [stride, stride],
[pad, pad], [o_pad, o_pad], channelwise)
"""Tests the correctness of quantized 1D convolution op."""
@given(batch_size=st.integers(1, 6),
input_channels_per_group=st.sampled_from((2, 4, 5, 8, 16, 32)),
output_channels_per_group=st.sampled_from((2, 4, 5, 8, 16, 32)),
groups=st.integers(1, 3),
length=st.integers(4, 16),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans())
@override_qengines
def test_qconv1d(
self,
batch_size,
input_channels_per_group,
output_channels_per_group,
groups,
length,
kernel,
stride,
pad,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
if torch.backends.quantized.engine == 'qnnpack':
use_channelwise = False
conv1d = torch.nn.Conv1d(
input_channels,
output_channels,
kernel,
stride,
pad,
dilation,
groups,
)
qconv_prepack = torch.ops.quantized.conv1d_prepack
qconv = torch.ops.quantized.conv1d
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supports qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
self._test_qconv_impl(
qconv, qconv_prepack, conv1d, batch_size,
input_channels_per_group, (length, ),
output_channels_per_group, groups, kernel, [stride], [pad], None,
[dilation], X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "none", use_channelwise, False,
input_dtype=X_qdtype, output_dtype=X_qdtype)
@given(batch_size=st.integers(1, 6),
input_channels_per_group=st.sampled_from((2, 4, 5, 8, 16, 32)),
output_channels_per_group=st.sampled_from((2, 4, 5, 8, 16, 32)),
groups=st.integers(1, 3),
length=st.integers(4, 16),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans())
@override_qengines
def test_qconv1d_relu(
self,
batch_size,
input_channels_per_group,
output_channels_per_group,
groups,
length,
kernel,
stride,
pad,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
if torch.backends.quantized.engine == 'qnnpack':
use_channelwise = False
conv1d = torch.nn.Conv1d(
input_channels,
output_channels,
kernel,
stride,
pad,
dilation,
groups,
)
qconv_prepack = torch.ops.quantized.conv1d_prepack
qconv = torch.ops.quantized.conv1d_relu
act_qdtypes = [torch.quint8]
# Only qnnpack qengine supports qint8
if qengine_is_qnnpack() and torch.backends.xnnpack.enabled:
act_qdtypes.append(torch.qint8)
for X_qdtype in act_qdtypes:
if X_qdtype == torch.qint8:
W_zero_point = [0 for i in range(len(W_zero_point))]
self._test_qconv_impl(
qconv, qconv_prepack, conv1d, batch_size,
input_channels_per_group, (length, ),
output_channels_per_group, groups, kernel, [stride], [pad], None,
[dilation], X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "relu", use_channelwise, False,
input_dtype=X_qdtype, output_dtype=X_qdtype)
# TODO: merge this test with test_qconv1d when CUDNN runtime flags becomes available
"""Tests the correctness of quantized 1D convolution cudnn op."""
@given(batch_size=st.integers(1, 6),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
input_channels_per_group=st.integers(1, 32),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 1), # currently padding only supports groups=1
length=st.integers(4, 16),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
# currently cudnn has only been verified to work for dilation = 1
# TODO: check backend works for dilation > 1
dilation=st.integers(1, 1),
X_scale=st.floats(1.2, 1.6),
# currently conv cudnn backend is only implemented for int8 symmetric
X_zero_point=st.sampled_from([0]),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
# currently conv cudnn backend is only implemented for int8 symmetric
W_zero_point=st.lists(st.integers(0, 0), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
# currently conv cudnn backend is only implemented for int8 symmetric
Y_zero_point=st.sampled_from([0]),
use_bias=st.booleans(),
# TODO: enable channelwise
use_channelwise=st.sampled_from([False]))
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skipIf(not SM80OrLater, "requires sm80 or later.")
@unittest.skipIf(TEST_ROCM, "not supported on rocm.")
@unittest.skip("not currently working and feature isn't used")
def test_qconv1d_cudnn(
self,
batch_size,
input_channels_per_group,
output_channels_per_group,
groups,
length,
kernel,
stride,
pad,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
conv1d = torch.nn.Conv1d(
input_channels,
output_channels,
kernel,
stride,
pad,
dilation,
groups,
).to(torch.device("cuda"))
qconv_prepack = torch.ops.quantized.conv1d_prepack
qconv = torch.ops.quantized.conv1d
self._test_qconv_impl(
qconv, qconv_prepack, conv1d, batch_size,
input_channels_per_group, (length, ),
output_channels_per_group, groups, kernel, [stride], [pad], None,
[dilation], X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "none", use_channelwise, False,
device=torch.device("cuda"),
input_dtype=torch.qint8, weight_dtype=torch.qint8, output_dtype=torch.qint8)
@given(batch_size=st.integers(1, 6),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
input_channels_per_group=st.integers(1, 32),
# cudnn only supports multiples of 4, but we have explicitly added padding on the backend
output_channels_per_group=st.integers(1, 32),
groups=st.integers(1, 1), # currently padding only supports groups=1
length=st.integers(4, 16),
kernel=st.integers(1, 7),
stride=st.integers(1, 2),
pad=st.integers(0, 2),
# currently cudnn has only been verified to work for dilation = 1
# TODO: check backend works for dilation > 1
dilation=st.integers(1, 1),
X_scale=st.floats(1.2, 1.6),
# currently conv cudnn backend is only implemented for int8 symmetric
X_zero_point=st.sampled_from([0]),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
# currently conv cudnn backend is only implemented for int8 symmetric
W_zero_point=st.lists(st.integers(0, 0), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
# currently conv cudnn backend is only implemented for int8 symmetric
Y_zero_point=st.sampled_from([0]),
use_bias=st.booleans(),
# TODO: enable channelwise
use_channelwise=st.sampled_from([False]))
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDNN, "cudnn is not enabled.")
@unittest.skipIf(not SM80OrLater, "requires sm80 or later.")
@unittest.skipIf(TEST_ROCM, "not supported on rocm.")
@unittest.skip("not currently working and feature isn't used")
def test_qconv1d_relu_cudnn(
self,
batch_size,
input_channels_per_group,
output_channels_per_group,
groups,
length,
kernel,
stride,
pad,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
):
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
conv1d = torch.nn.Conv1d(
input_channels,
output_channels,
kernel,
stride,
pad,
dilation,
groups,
).to(torch.device("cuda"))
qconv_prepack = torch.ops.quantized.conv1d_prepack
qconv = torch.ops.quantized.conv1d_relu
self._test_qconv_impl(
qconv, qconv_prepack, conv1d, batch_size,
input_channels_per_group, (length, ),
output_channels_per_group, groups, kernel, [stride], [pad], None,
[dilation], X_scale, X_zero_point, W_scale, W_zero_point,
Y_scale, Y_zero_point, use_bias, "relu", use_channelwise, False,
device=torch.device("cuda"),
input_dtype=torch.qint8, weight_dtype=torch.qint8, output_dtype=torch.qint8)
@given(batch_size=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
D=st.integers(4, 8),
H=st.integers(4, 8),
W=st.integers(4, 8),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
groups=st.integers(1, 3),
kernel_d=st.integers(1, 4),
kernel_h=st.integers(1, 4),
kernel_w=st.integers(1, 4),
stride_d=st.integers(1, 2),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_d=st.integers(0, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("qnnpack", "fbgemm")))
def test_qconv3d(
self,
batch_size,
input_channels_per_group,
D,
H,
W,
output_channels_per_group,
groups,
kernel_d,
kernel_h,
kernel_w,
stride_d,
stride_h,
stride_w,
pad_d,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
qengine
):
if qengine not in supported_qengines:
return
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_d, kernel_h, kernel_w)
strides = (stride_d, stride_h, stride_w)
pads = (pad_d, pad_h, pad_w)
dilations = (dilation, dilation, dilation)
with override_quantized_engine(qengine):
qconv = torch.ops.quantized.conv3d
qconv_prepack = torch.ops.quantized.conv3d_prepack
conv_op = torch.nn.Conv3d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (D, H, W), output_channels_per_group,
groups, kernels, strides, pads, None, dilations, X_scale,
X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, "none", use_channelwise, use_transpose=False)
@given(batch_size=st.integers(1, 4),
input_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
D=st.integers(4, 8),
H=st.integers(4, 8),
W=st.integers(4, 8),
output_channels_per_group=st.sampled_from([2, 4, 5, 8, 16]),
groups=st.integers(1, 3),
kernel_d=st.integers(1, 4),
kernel_h=st.integers(1, 4),
kernel_w=st.integers(1, 4),
stride_d=st.integers(1, 2),
stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_d=st.integers(0, 2),
pad_h=st.integers(0, 2),
pad_w=st.integers(0, 2),
dilation=st.integers(1, 2),
X_scale=st.floats(1.2, 1.6),
X_zero_point=st.integers(0, 4),
W_scale=st.lists(st.floats(0.2, 1.6), min_size=1, max_size=2),
W_zero_point=st.lists(st.integers(-5, 5), min_size=1, max_size=2),
Y_scale=st.floats(4.2, 5.6),
Y_zero_point=st.integers(0, 4),
use_bias=st.booleans(),
use_channelwise=st.booleans(),
qengine=st.sampled_from(("qnnpack", "fbgemm")))
def test_qconv3d_relu(
self,
batch_size,
input_channels_per_group,
D,
H,
W,
output_channels_per_group,
groups,
kernel_d,
kernel_h,
kernel_w,
stride_d,
stride_h,
stride_w,
pad_d,
pad_h,
pad_w,
dilation,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
Y_scale,
Y_zero_point,
use_bias,
use_channelwise,
qengine
):
if qengine not in supported_qengines:
return
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
kernels = (kernel_d, kernel_h, kernel_w)
strides = (stride_d, stride_h, stride_w)
pads = (pad_d, pad_h, pad_w)
dilations = (dilation, dilation, dilation)
with override_quantized_engine(qengine):
qconv = torch.ops.quantized.conv3d_relu
qconv_prepack = torch.ops.quantized.conv3d_prepack
conv_op = torch.nn.Conv3d(
input_channels,
output_channels,
kernels,
strides,
pads,
dilations,
groups,
)
self._test_qconv_impl(
qconv, qconv_prepack, conv_op, batch_size,
input_channels_per_group, (D, H, W), output_channels_per_group,
groups, kernels, strides, pads, None, dilations, X_scale,
X_zero_point, W_scale, W_zero_point, Y_scale, Y_zero_point,
use_bias, "relu", use_channelwise, use_transpose=False)
"""Tests the correctness of the quantized::qconv3d_unpack op."""
@given(
inputs=hu.tensor_conv(
spatial_dim=3, batch_size_range=(1, 3),
input_channels_per_group_range=(1, 3),
output_channels_per_group_range=(1, 3), feature_map_range=(3, 6),
kernel_range=(1, 3), max_groups=3,
qparams=[hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint8,
zero_point_min=0,
zero_point_max=0),
hu.qparams(dtypes=torch.qint32,
zero_point_min=0,
zero_point_max=0)]),
stride_d=st.integers(1, 2), stride_h=st.integers(1, 2),
stride_w=st.integers(1, 2),
pad_d=st.integers(1, 2), pad_h=st.integers(1, 2),
pad_w=st.integers(1, 2),
o_pad=st.integers(0, 2),
channelwise=st.booleans())
@override_qengines
def test_qconv3d_unpack(
self, inputs, stride_d, stride_h, stride_w, pad_d, pad_h, pad_w, o_pad,
channelwise
):
if qengine_is_qnnpack():
return # QNNPACK doesn't support this
transposed = inputs[-1]
if transposed:
qconv_prepack = torch.ops.quantized.conv_transpose3d_prepack
qconv_unpack = torch.ops.quantized.conv_transpose3d_unpack
else:
qconv_prepack = torch.ops.quantized.conv3d_prepack
qconv_unpack = torch.ops.quantized.conv3d_unpack
self._test_qconv_unpack_impl(
qconv_prepack, qconv_unpack, inputs,
(stride_d, stride_h, stride_w), (pad_d, pad_h, pad_w), (o_pad, o_pad, o_pad),
channelwise)
def test_conv_reorder_issue_onednn(self):
""" Ensure reorder failure issue in conv is fixed for onednn backend.
Onednn backend used to encounter reorder failure
when running conv with dynamic input shapes.
Solved by https://github.com/pytorch/pytorch/pull/86876
"""
if 'onednn' not in supported_qengines:
return
with override_quantized_engine('onednn'):
bs = 1
ic, oc = 128, 512
kh, kw = 1, 1
bias = None
strides, paddings, dilates = (1, 1), (0, 0), (1, 1)
for groups in [1, 2]:
ih, iw = 28, 28
w = torch.randn((oc * groups, ic, kh, kw))
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
x = torch.randn((bs, ic * groups, ih, iw))
qx = torch.quantize_per_tensor(x, scale=1.0, zero_point=0, dtype=torch.quint8)
w_packed = torch.ops.quantized.conv2d_prepack(
qw, bias, strides, paddings, dilates, groups
)
torch.ops.quantized.conv2d(qx, w_packed, output_scale=1.0, output_zero_point=0)
ih, iw = 5, 4
x = torch.randn((bs, ic * groups, ih, iw))
qx = torch.quantize_per_tensor(x, scale=1.0, zero_point=0, dtype=torch.quint8)
# The following should pass when input shape is changed
torch.ops.quantized.conv2d(qx, w_packed, output_scale=1.0, output_zero_point=0)
@skipIfNoONEDNN
def test_conv_transpose_reorder_issue_onednn(self):
with override_quantized_engine('onednn'):
bs = 1
ic, oc = 16, 33
kh, kw = 3, 3
ih, iw = 50, 100
bias = None
strides, paddings, output_paddings, dilates, groups = [2, 2], [0, 0], [0, 0], [1, 1], 1
w = torch.randn((ic, oc, kh, kw))
qw = torch.quantize_per_tensor(w, scale=1.0, zero_point=0, dtype=torch.qint8)
x = torch.randn((bs, ic, ih, iw))
qx = torch.quantize_per_tensor(x, scale=1.0, zero_point=0, dtype=torch.quint8)
w_packed = torch.ops.quantized.conv_transpose2d_prepack(
qw, bias, strides, paddings, output_paddings, dilates, groups
)
torch.ops.quantized.conv_transpose2d(qx, w_packed, output_scale=1.0, output_zero_point=0)
ih, iw = 5, 4
x = torch.randn((bs, ic, ih, iw))
qx = torch.quantize_per_tensor(x, scale=1.0, zero_point=0, dtype=torch.quint8)
# The following should pass when input shape is changed
torch.ops.quantized.conv_transpose2d(qx, w_packed, output_scale=1.0, output_zero_point=0)
def _test_qconv_impl_cpu_tensor(
self,
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=2,
input_feature_map_shape=(),
output_channels_per_group=2,
groups=1,
kernels=3,
strides=(),
pads=(),
dilations=(),
X_scale=1.3,
X_zero_point=2,
W_scale=(1.0,),
W_zero_point=(0,),
Y_scale=3.2,
Y_zero_point=0,
use_bias=True,
post_op=PointwisePostOp(),
use_channelwise=True,
X2_scale=1.2,
X2_zero_point=0,
qconv_output_dtype=None, # None, torch.float32, torch.bfloat16
weight_in_channel_last_format=False,
qconv_x2_dtype=None,
):
# ONEDNN only supports symmetric quantization of weight
if W_zero_point is not None:
W_zero_point = len(W_zero_point) * [0]
fp32_output = qconv_output_dtype is torch.float32
bfloat16_output = qconv_output_dtype is torch.bfloat16
if fp32_output or bfloat16_output:
Y_scale = 1.0
Y_zero_point = 0
X2_scale = 1.0
X2_zero_point = 0
batch_size = 3
o_pads = None
device = torch.device("cpu")
input_dtype = torch.quint8
weight_dtype = torch.qint8
output_dtype = torch.quint8
use_transpose = False
(X, W), (X_q, W_q), bias_float = self._make_qconv_tensors(
batch_size,
input_channels_per_group,
input_feature_map_shape,
output_channels_per_group,
groups,
kernels,
strides,
pads,
dilations,
X_scale,
X_zero_point,
W_scale,
W_zero_point,
use_bias,
use_channelwise,
use_transpose,
device=device,
input_dtype=input_dtype,
weight_dtype=weight_dtype,
)
if bias_float is not None:
bias_float = bias_float.to(device)
# Assign weights
W = W_q.dequantize()
X = X_q.dequantize()
conv_op.weight = torch.nn.Parameter(W, requires_grad=False)
conv_op.bias = (
torch.nn.Parameter(bias_float, requires_grad=False) if use_bias else None
)
result_ref = conv_op(X)
X2_q = None
if post_op.binary_attr == "sum":
(X_value_min, X_value_max) = (0, 4)
X2_init = torch.randint(
X_value_min, X_value_max, result_ref.size(), device=device
)
X2 = X2_scale * ((X2_init - X2_zero_point).float())
X2_q = torch.quantize_per_tensor(
X2, scale=X2_scale, zero_point=X2_zero_point, dtype=input_dtype
)
result_ref = result_ref + X2
if post_op.unary_attr == "relu":
relu = torch.nn.ReLU()
result_ref = relu(result_ref)
elif post_op.unary_attr == "relu":
assert not use_transpose, "Cannot fuse ReLU with ConvTranspose"
relu = torch.nn.ReLU()
result_ref = relu(result_ref)
elif post_op.unary_attr == "hardtanh":
assert not use_transpose, "Cannot fuse hardtanh with ConvTranspose"
assert len(post_op.scalars) == 2, "For post op hardtanh, expect 2 parameters passed in"
hardtanh = torch.nn.Hardtanh(min_val=post_op.scalars[0], max_val=post_op.scalars[1])
result_ref = hardtanh(result_ref)
elif post_op.unary_attr == "hardswish":
assert not use_transpose, "Cannot fuse hardswish with ConvTranspose"
hardswish = torch.nn.Hardswish()
result_ref = hardswish(result_ref)
elif post_op.unary_attr == "swish":
assert not use_transpose, "Cannot fuse silu with ConvTranspose"
silu = torch.nn.SiLU()
result_ref = silu(result_ref)
# Quantize reference results for comparison
result_ref_q = torch.quantize_per_tensor(
result_ref, scale=Y_scale, zero_point=Y_zero_point, dtype=output_dtype
)
# Calculate the result for 2.X path
X_q_cpu_tensor = X_q.int_repr()
W_q_cpu_tensor = W_q.int_repr()
weight_scale = (
W_q.q_per_channel_scales()
if use_channelwise
else torch.tensor(W_q.q_scale(), dtype=torch.double, device=device)
)
weight_zero_point = (
W_q.q_per_channel_zero_points()
if use_channelwise
else torch.tensor(W_q.q_zero_point(), dtype=torch.int64, device=device)
)
if weight_in_channel_last_format:
if W_q_cpu_tensor.dim() == 5:
W_q_cpu_tensor = W_q_cpu_tensor.to(memory_format=torch.channels_last_3d)
elif W_q_cpu_tensor.dim() == 4:
W_q_cpu_tensor = W_q_cpu_tensor.to(memory_format=torch.channels_last)
packed_weight = qconv_prepack(
W_q_cpu_tensor,
weight_scale,
X_scale,
X_zero_point,
strides,
pads,
dilations,
groups,
X_q_cpu_tensor.size(),
)
if post_op.binary_attr == "sum":
X2_cpu_tensor = (
X2_q.int_repr()
if qconv_output_dtype is None
else X2_q.dequantize().to(qconv_x2_dtype)
).contiguous(memory_format=torch.channels_last)
Y_q_cpu_tensor = qconv(
X_q_cpu_tensor,
X_scale,
X_zero_point,
packed_weight,
weight_scale,
weight_zero_point,
X2_cpu_tensor,
bias_float,
strides,
pads,
dilations,
groups,
Y_scale,
Y_zero_point,
qconv_output_dtype,
X2_scale,
X2_zero_point,
post_op.binary_attr,
post_op.alpha,
post_op.unary_attr,
post_op.scalars,
post_op.algorithm,
)
else:
Y_q_cpu_tensor = qconv(
X_q_cpu_tensor,
X_scale,
X_zero_point,
packed_weight,
weight_scale,
weight_zero_point,
bias_float,
strides,
pads,
dilations,
groups,
Y_scale,
Y_zero_point,
qconv_output_dtype,
post_op.unary_attr,
post_op.scalars,
post_op.algorithm,
)
if fp32_output or bfloat16_output:
self.assertTrue(Y_q_cpu_tensor.dtype == qconv_output_dtype)
Y_q_cpu_tensor = torch.quantize_per_tensor(
Y_q_cpu_tensor
if fp32_output
else Y_q_cpu_tensor.to(torch.float32), scale=Y_scale, zero_point=Y_zero_point, dtype=output_dtype
).int_repr()
# Make sure the results match
# assert_array_almost_equal compares using the following formula:
# abs(desired-actual) < 1.5 * 10**(-decimal)
# (https://numpy.org/doc/stable/reference/generated/numpy.testing.assert_almost_equal.html)
# We use decimal = 0 to ignore off-by-1 differences between
# reference and test. Off-by-1 differences arise due to the order of
# round and zero_point addition operation, i.e., if addition
# followed by round is used by reference and round followed by
# addition is used by test, the results may differ by 1.
# For example, the result of round(2.5) + 1 is 3 while
# round(2.5 + 1) is 4 assuming the rounding mode is
# round-to-nearest, ties-to-even.
np.testing.assert_array_almost_equal(
result_ref_q.int_repr().cpu().numpy(),
Y_q_cpu_tensor.cpu().numpy(),
decimal=0,
err_msg=f"""X: {X_q}, W: {W_q}, b: {bias_float}, strides: {strides},
pads: {pads}, o_pads: {o_pads}, dilations: {dilations},
groups: {groups}, y_s: {Y_scale}, y_zp: {Y_zero_point}, X2: {X2_q}""",
)
# Return the quantized data for later reuse
return X_q, W_q, bias_float
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv1d_pt2e(self):
groups_list = [1, 3]
input_channels_per_group = 2
output_channels_per_group = 2
length = 4
kernel = 3
stride = 1
pad = 1
dilation = 1
W_scale = [1.5]
W_zero_point = [0]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
options = itertools.product(groups_list, use_bias_list, use_channelwise_list, output_dtype_list)
for groups, use_bias, use_channelwise, output_dtype in options:
if output_dtype is not None and not (use_bias and use_channelwise):
# Remove some test combination to reduce UT test time
continue
conv1d = torch.nn.Conv1d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernel,
stride,
pad,
dilation,
groups,
)
qconv = torch.ops.onednn.qconv1d_pointwise
qconv_prepack = torch.ops.onednn.qconv_prepack
pointwise_post_op = PointwisePostOp()
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv1d,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=(length,),
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernel,
strides=[stride],
pads=[pad],
dilations=[dilation],
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
qconv_output_dtype=output_dtype,
)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_pt2e(self):
groups_list = [1, 3]
input_channels_per_group = 2
output_channels_per_group = 2
input_feature_map_shape = (10, 10)
kernels = (3, 3)
strides = (2, 2)
pads = (1, 1)
dilations = (1, 1)
W_scale = [1.5]
W_zero_point = [0]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
channel_last_weight_format_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
options = itertools.product(
groups_list,
use_bias_list,
use_channelwise_list,
channel_last_weight_format_list,
output_dtype_list,
)
for groups, use_bias, use_channelwise, channel_last_weight_format, output_dtype in options:
if (output_dtype is not None or channel_last_weight_format) and not (use_bias and use_channelwise):
# Remove some test combination to reduce UT test time
continue
qconv = torch.ops.onednn.qconv_pointwise
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv2d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = PointwisePostOp()
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
qconv_output_dtype=output_dtype,
weight_in_channel_last_format=channel_last_weight_format,
)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv3d_pt2e(self):
input_channels_per_group = 2
input_feature_map_shape = (6, 6, 6)
output_channels_per_group = 2
groups_list = [1, 3]
kernels = (3, 3, 3)
strides = (2, 2, 2)
pads = (1, 1, 1)
dilations = (1, 1, 1)
W_scale = [1.5]
W_zero_point = [0]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
channel_last_weight_format_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
options = itertools.product(
groups_list,
use_bias_list,
use_channelwise_list,
channel_last_weight_format_list,
output_dtype_list,
)
for groups, use_bias, use_channelwise, channel_last_weight_format, output_dtype in options:
if (output_dtype is not None or channel_last_weight_format) and not (use_bias and use_channelwise):
# Remove some test combination to reduce UT test time
continue
qconv = torch.ops.onednn.qconv3d_pointwise
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv3d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = PointwisePostOp()
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
qconv_output_dtype=output_dtype,
weight_in_channel_last_format=channel_last_weight_format,
)
# Test qconv with post op relu
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_relu_pt2e(self):
input_channels_per_group = 2
output_channels_per_group = 2
groups_list = [1, 10]
input_feature_map_shape = (10, 10)
kernels = (3, 3)
strides = (2, 2)
pads = (1, 1)
dilations = (1, 1)
W_scale = [1.5]
W_zero_point = [0]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
options = itertools.product(groups_list, use_bias_list, use_channelwise_list, output_dtype_list)
for groups, use_bias, use_channelwise, output_dtype in options:
qconv = torch.ops.onednn.qconv_pointwise
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv2d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = PointwisePostOp(unary_attr="relu")
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
qconv_output_dtype=output_dtype,
)
# Test qconv with post op hardtanh
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_hardtanh_pt2e(self):
input_channels_per_group = 2
output_channels_per_group = 2
groups_list = [1, 10]
input_feature_map_shape = (10, 10)
kernels = (3, 3)
strides = (2, 2)
pads = (1, 1)
dilations = (1, 1)
W_scale = [1.5]
W_zero_point = [0]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
options = itertools.product(groups_list, use_bias_list, use_channelwise_list, output_dtype_list)
for groups, use_bias, use_channelwise, output_dtype in options:
qconv = torch.ops.onednn.qconv_pointwise
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv2d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = PointwisePostOp(unary_attr="hardtanh", scalars=[0.0, 6.0])
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
qconv_output_dtype=output_dtype,
)
# Test qconv with post op swish
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_swish_pt2e(self):
input_channels_per_group = 2
output_channels_per_group = 2
groups_list = [1, 10]
input_feature_map_shape = (10, 10)
kernels = (3, 3)
strides = (2, 2)
pads = (1, 1)
dilations = (1, 1)
W_scale = [1.5]
W_zero_point = [0]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
options = itertools.product(groups_list, use_bias_list, use_channelwise_list, output_dtype_list)
for groups, use_bias, use_channelwise, output_dtype in options:
qconv = torch.ops.onednn.qconv_pointwise
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv2d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = PointwisePostOp(unary_attr="swish")
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
qconv_output_dtype=output_dtype,
)
# Test qconv with post op hardswish
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_hardswish_pt2e(self):
input_channels_per_group = 2
output_channels_per_group = 2
groups_list = [1, 10]
input_feature_map_shape = (10, 10)
kernels = (3, 3)
strides = (2, 2)
pads = (1, 1)
dilations = (1, 1)
W_scale = [1.5]
W_zero_point = [0]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
options = itertools.product(groups_list, use_bias_list, use_channelwise_list, output_dtype_list)
for groups, use_bias, use_channelwise, output_dtype in options:
qconv = torch.ops.onednn.qconv_pointwise
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv2d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = PointwisePostOp(unary_attr="hardswish")
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
qconv_output_dtype=output_dtype,
)
# Test qconv with post op sum
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_sum_pt2e(self):
groups_list = [1, 3]
input_channels_per_group = 2
output_channels_per_group = 2
input_feature_map_shape = (10, 10)
kernels = (3, 3)
strides = (2, 2)
pads = (1, 1)
dilations = (1, 1)
W_scale = [1.5]
W_zero_point = [-3]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
X2_zero_point_list = [0, 1]
options = itertools.product(
groups_list, use_bias_list, use_channelwise_list, X2_zero_point_list, output_dtype_list
)
for groups, use_bias, use_channelwise, X2_zero_point, output_dtype in options:
qconv = torch.ops.onednn.qconv2d_pointwise.binary
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv2d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = PointwisePostOp(binary_attr="sum")
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
X2_zero_point=X2_zero_point,
qconv_output_dtype=output_dtype,
qconv_x2_dtype=output_dtype,
)
# Test qconv with post op sum relu
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_sum_relu_pt2e(self):
groups_list = [1, 3]
input_channels_per_group = 2
output_channels_per_group = 2
input_feature_map_shape = (10, 10)
kernels = (3, 3)
strides = (2, 2)
pads = (1, 1)
dilations = (1, 1)
W_scale = [1.5]
W_zero_point = [-3]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
X2_zero_point_list = [0, 1]
options = itertools.product(
groups_list, use_bias_list, use_channelwise_list, X2_zero_point_list
)
for groups, use_bias, use_channelwise, X2_zero_point in options:
qconv = torch.ops.onednn.qconv2d_pointwise.binary
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv2d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = PointwisePostOp(binary_attr="sum", unary_attr="relu")
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
X2_zero_point=X2_zero_point,
)
# Test qconv with post op sum
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_sum_relu_float_output_pt2e(self):
groups = 1
input_channels_per_group = 2
output_channels_per_group = 2
input_feature_map_shape = (10, 10)
kernels = (3, 3)
strides = (2, 2)
pads = (1, 1)
dilations = (1, 1)
W_scale = [1.5]
W_zero_point = [-3]
use_bias_list = [False, True]
use_channelwise = True
output_dtype_list = [torch.float32, torch.bfloat16]
X2_zero_point = 0
use_relu_list = [True, False]
options = itertools.product(
use_bias_list, output_dtype_list, use_relu_list
)
for use_bias, output_dtype, use_relu in options:
qconv_x2_dtype = output_dtype
qconv = torch.ops.onednn.qconv2d_pointwise.binary
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv2d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = (
PointwisePostOp(binary_attr="sum", unary_attr="relu")
if use_relu
else PointwisePostOp(binary_attr="sum")
)
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
X2_zero_point=X2_zero_point,
qconv_output_dtype=output_dtype,
qconv_x2_dtype=qconv_x2_dtype,
)
# Test qconv1d with post op relu
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv1d_relu_pt2e(self):
input_channels_per_group = 2
output_channels_per_group = 2
groups_list = [1, 10]
input_feature_map_shape = (10,)
kernels = (3,)
strides = (2,)
pads = (1,)
dilations = (1,)
W_scale = [1.5]
W_zero_point = [0]
use_bias_list = [False, True]
use_channelwise_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
options = itertools.product(groups_list, use_bias_list, use_channelwise_list, output_dtype_list)
for groups, use_bias, use_channelwise, output_dtype in options:
qconv = torch.ops.onednn.qconv_pointwise
qconv_prepack = torch.ops.onednn.qconv_prepack
conv_op = torch.nn.Conv1d(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernels,
strides,
pads,
dilations,
groups,
)
pointwise_post_op = PointwisePostOp(unary_attr="relu")
self._test_qconv_impl_cpu_tensor(
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=input_feature_map_shape,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=kernels,
strides=strides,
pads=pads,
dilations=dilations,
W_scale=W_scale,
W_zero_point=W_zero_point,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
qconv_output_dtype=output_dtype,
)
def _make_qconv_tensors_fp8(
self, batch_size, input_channels_per_group, input_feature_map_shape,
output_channels_per_group, groups, kernels, strides, pads, dilations,
use_bias, use_channelwise, use_transpose, bfloat16_output,
device=torch.device("cpu"),
):
assert not (use_channelwise and use_transpose), \
"Cannot generate channelwise qconv_transpose_tensors "
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
# Padded input size should be at least as big as dilated kernel
kernels = _single(kernels)
strides = _single(strides)
pads = _single(pads)
dilations = _single(dilations)
for i in range(len(kernels)):
assume(input_feature_map_shape[i] + 2 * pads[i]
>= dilations[i] * (kernels[i] - 1) + 1)
# the operator expects them in the format
# (output_channels, input_channels/groups, kernel_d, kernel_h, kernel_w)
# (input_channels, output_channels/groups, kernel_d, kernel_h, kernel_w)
if use_transpose:
output_shape = (input_channels, output_channels_per_group,)
else:
output_shape = (output_channels, input_channels_per_group,)
X = torch.rand(
(batch_size, input_channels,) + input_feature_map_shape,
device=device,
)
X_q, X_scale = _quantize_fp8e4m3(X, channelwise=False)
W = torch.randn(output_shape + kernels, device=device) * 0.1
W_q, W_scale = _quantize_fp8e4m3(W, channelwise=use_channelwise)
bias_dtype = torch.bfloat16 if bfloat16_output else torch.float
bias = torch.randn((output_channels,), dtype=bias_dtype, device=device) if use_bias else None
return X, W, X_q, W_q, X_scale, W_scale, bias
def _test_qconv_impl_cpu_tensor_fp8(
self,
qconv,
qconv_prepack,
conv_op,
input_channels_per_group=2,
input_feature_map_shape=(),
output_channels_per_group=2,
groups=1,
kernels=3,
strides=(),
pads=(),
dilations=(),
Y_scale=0.002,
use_bias=True,
post_op=PointwisePostOp(),
use_channelwise=True,
X2_scale=0.02,
qconv_output_dtype=None, # None, torch.float32, torch.bfloat16
weight_in_channel_last_format=False,
):
# We assume FP8 quantization is always symmetric
fp32_output = qconv_output_dtype is torch.float32
bfloat16_output = qconv_output_dtype is torch.bfloat16
if fp32_output or bfloat16_output:
Y_scale = 1.0
X2_scale = 1.0
batch_size = 3
device = torch.device("cpu")
use_transpose = False
X, W, X_q, W_q, X_scale, W_scale, bias = self._make_qconv_tensors_fp8(
batch_size,
input_channels_per_group,
input_feature_map_shape,
output_channels_per_group,
groups,
kernels,
strides,
pads,
dilations,
use_bias,
use_channelwise,
use_transpose,
bfloat16_output,
device=device,
)
# Assign weights
dqW = _dequantize_fp8e4m3(W_q, W_scale)
dqX = _dequantize_fp8e4m3(X_q, X_scale)
bias_float = bias.float() if use_bias and bfloat16_output else bias
conv_op.weight = torch.nn.Parameter(dqW, requires_grad=False)
conv_op.bias = (
torch.nn.Parameter(bias_float, requires_grad=False) if use_bias else None
)
result_ref = conv_op(dqX)
X2 = None
X2_q = None
X2_scale = 1.0
if post_op.binary_attr == "sum":
X2_dtype = qconv_output_dtype if qconv_output_dtype else torch.float32
X2 = torch.rand_like(result_ref, device=device, dtype=X2_dtype)
if qconv_output_dtype is None:
X2_q, X2_scale = _quantize_fp8e4m3(X2, channelwise=False)
X2_dq = _dequantize_fp8e4m3(X2_q, X2_scale)
X2_scale = X2_scale.item()
else:
X2_dq = X2
result_ref = result_ref + X2_dq
if post_op.unary_attr == "relu":
relu = torch.nn.ReLU()
result_ref = relu(result_ref)
elif post_op.unary_attr == "relu":
assert not use_transpose, "Cannot fuse ReLU with ConvTranspose"
relu = torch.nn.ReLU()
result_ref = relu(result_ref)
elif post_op.unary_attr == "hardtanh":
assert not use_transpose, "Cannot fuse hardtanh with ConvTranspose"
assert len(post_op.scalars) == 2, "For post op hardtanh, expect 2 parameters passed in"
hardtanh = torch.nn.Hardtanh(min_val=post_op.scalars[0], max_val=post_op.scalars[1])
result_ref = hardtanh(result_ref)
elif post_op.unary_attr == "hardswish":
assert not use_transpose, "Cannot fuse hardswish with ConvTranspose"
hardswish = torch.nn.Hardswish()
result_ref = hardswish(result_ref)
elif post_op.unary_attr == "swish":
assert not use_transpose, "Cannot fuse silu with ConvTranspose"
silu = torch.nn.SiLU()
result_ref = silu(result_ref)
# Quantize reference results for comparison
if qconv_output_dtype is None:
result_ref = _quantize_fp8e4m3(result_ref, False, Y_scale)[0]
else:
result_ref = result_ref.to(qconv_output_dtype)
# Calculate the result for PT2E path
if weight_in_channel_last_format:
if W_q.dim() == 5:
W_q = W_q.to(memory_format=torch.channels_last_3d)
elif W_q.dim() == 4:
W_q = W_q.to(memory_format=torch.channels_last)
X_scale_scalar = X_scale.item()
packed_weight = qconv_prepack(
W_q,
W_scale,
X_scale_scalar,
0, # X_zero_point
strides,
pads,
dilations,
groups,
X_q.size(),
)
if post_op.binary_attr == "sum":
accum = (
X2_q.contiguous(memory_format=torch.channels_last)
if X2_q is not None
else X2.contiguous(memory_format=torch.channels_last)
)
result = qconv(
X_q,
X_scale_scalar,
0, # X_zero_point
packed_weight,
W_scale,
torch.zeros([], dtype=torch.int8), # W_zero_point
accum,
bias,
strides,
pads,
dilations,
groups,
Y_scale,
0, # Y_zero_point
qconv_output_dtype,
X2_scale,
0, # X2_zero_point
post_op.binary_attr,
post_op.alpha,
post_op.unary_attr,
post_op.scalars,
post_op.algorithm,
)
else:
result = qconv(
X_q,
X_scale_scalar,
0, # X_zero_point
packed_weight,
W_scale,
torch.zeros([], dtype=torch.int8), # W_zero_point
bias,
strides,
pads,
dilations,
groups,
Y_scale,
0, # Y_zero_point
qconv_output_dtype,
post_op.unary_attr,
post_op.scalars,
post_op.algorithm,
)
if fp32_output or bfloat16_output:
self.assertTrue(result.dtype == qconv_output_dtype)
self.assertEqual(result.float(), result_ref.float(), atol=1e-6, rtol=1e-5)
assert not torch.isnan(result).any()
def _test_qconv_fp8_helper(self, nd, pointwise_post_op):
# nd = 1,2,3 -> conv1d/2d/3d
if pointwise_post_op.binary_attr != "none":
# Only conv2d supports binary post op
assert nd == 2
groups_list = [1, 3]
input_channels_per_group = 2
output_channels_per_group = 2
length = 4
kernel = 3
stride = 1
pad = 1
dilation = 1
use_bias_list = [False, True]
use_channelwise_list = [False, True]
output_dtype_list = [None, torch.float32, torch.bfloat16]
options = itertools.product(groups_list, use_bias_list, use_channelwise_list, output_dtype_list)
for groups, use_bias, use_channelwise, output_dtype in options:
if output_dtype is not None and not (use_bias and use_channelwise):
# Remove some test combination to reduce UT test time
continue
conv_mod = getattr(torch.nn, f"Conv{nd}d")(
input_channels_per_group * groups,
output_channels_per_group * groups,
kernel,
stride,
pad,
dilation,
groups,
)
qconv = (
torch.ops.onednn.qconv_pointwise
if pointwise_post_op.binary_attr == "none"
else torch.ops.onednn.qconv2d_pointwise.binary
)
qconv_prepack = torch.ops.onednn.qconv_prepack
self._test_qconv_impl_cpu_tensor_fp8(
qconv,
qconv_prepack,
conv_mod,
input_channels_per_group=input_channels_per_group,
input_feature_map_shape=(length,) * nd,
output_channels_per_group=output_channels_per_group,
groups=groups,
kernels=[kernel] * nd,
strides=[stride] * nd,
pads=[pad] * nd,
dilations=[dilation] * nd,
use_bias=use_bias,
post_op=pointwise_post_op,
use_channelwise=use_channelwise,
qconv_output_dtype=output_dtype,
)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv1d_fp8(self):
pointwise_post_op = PointwisePostOp()
self._test_qconv_fp8_helper(1, pointwise_post_op)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv1d_relu_fp8(self):
pointwise_post_op = PointwisePostOp(unary_attr="relu")
self._test_qconv_fp8_helper(1, pointwise_post_op)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_fp8(self):
pointwise_post_op = PointwisePostOp()
self._test_qconv_fp8_helper(2, pointwise_post_op)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_relu_fp8(self):
pointwise_post_op = PointwisePostOp(unary_attr="relu")
self._test_qconv_fp8_helper(2, pointwise_post_op)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_hardtanh_fp8(self):
pointwise_post_op = PointwisePostOp(unary_attr="hardtanh", scalars=[0.0, 6.0])
self._test_qconv_fp8_helper(2, pointwise_post_op)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_swish_fp8(self):
pointwise_post_op = PointwisePostOp(unary_attr="swish")
self._test_qconv_fp8_helper(2, pointwise_post_op)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_hardswish_fp8(self):
pointwise_post_op = PointwisePostOp(unary_attr="hardswish")
self._test_qconv_fp8_helper(2, pointwise_post_op)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_sum_fp8(self):
pointwise_post_op = PointwisePostOp(binary_attr="sum")
self._test_qconv_fp8_helper(2, pointwise_post_op)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv2d_sum_relu_fp8(self):
pointwise_post_op = PointwisePostOp(binary_attr="sum", unary_attr="relu")
self._test_qconv_fp8_helper(2, pointwise_post_op)
@unittest.skipIf(IS_FBCODE, "Skip pt2e ops in fbcode")
@skipIfNoONEDNN
def test_qconv3d_fp8(self):
pointwise_post_op = PointwisePostOp()
torch.manual_seed(0) # For reproducibility in 3D conv tests
self._test_qconv_fp8_helper(3, pointwise_post_op)
| TestQuantizedConv |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 115675,
"end": 115963
} | class ____(sgqlc.types.Enum):
"""Properties by which star connections can be ordered.
Enumeration Choices:
* `STARRED_AT`: Allows ordering a list of stars by when they were
created.
"""
__schema__ = github_schema
__choices__ = ("STARRED_AT",)
| StarOrderField |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.