language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/2920. Maximum Points After Collecting Coins From All Nodes/2920.py | {
"start": 0,
"end": 821
} | class ____:
def maximumPoints(
self,
edges: list[list[int]],
coins: list[int],
k: int,
) -> int:
MAX_COIN = 10000
MAX_HALVED = int(MAX_COIN).bit_length()
n = len(coins)
graph = [[] for _ in range(n)]
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
@functools.lru_cache(None)
def dfs(u: int, prev: int, halved: int) -> int:
# All the children will be 0, so no need to explore.
if halved > MAX_HALVED:
return 0
val = coins[u] // (1 << halved)
takeAll = val - k
takeHalf = math.floor(val / 2)
for v in graph[u]:
if v == prev:
continue
takeAll += dfs(v, u, halved)
takeHalf += dfs(v, u, halved + 1)
return max(takeAll, takeHalf)
return dfs(0, -1, 0)
| Solution |
python | pytorch__pytorch | torch/_dynamo/callback.py | {
"start": 1695,
"end": 5622
} | class ____:
start_callbacks: list[Callable[[CallbackArgs], None]] = field(default_factory=list)
end_callbacks: list[Callable[[CallbackArgs], None]] = field(default_factory=list)
__pending_callbacks_counter: int = field(default=0, init=False, repr=False)
__pending_callbacks_counter_lock: threading.Lock = field(
default_factory=threading.Lock, init=False, repr=False
)
def register_start_callback(
self, callback: Callable[[CallbackArgs], None]
) -> Callable[[CallbackArgs], None]:
"""
Register a callback function to be called when the compilation starts.
Args:
- callback (Callable): The callback function to register.
"""
self.start_callbacks.append(callback)
return callback
def register_end_callback(
self, callback: Callable[[CallbackArgs], None]
) -> Callable[[CallbackArgs], None]:
"""
Register a callback function to be called when the compilation ends.
Args:
- callback (Callable): The callback function to register.
"""
self.end_callbacks.append(callback)
return callback
def remove_start_callback(self, callback: Callable[[CallbackArgs], None]) -> None:
"""
Remove a registered start callback function.
Args:
- callback (Callable): The callback function to remove.
"""
self.start_callbacks.remove(callback)
def remove_end_callback(self, callback: Callable[[CallbackArgs], None]) -> None:
"""
Remove a registered end callback function.
Args:
- callback (Callable): The callback function to remove.
"""
self.end_callbacks.remove(callback)
def run_start_callbacks(self, args: CallbackArgs) -> None:
"""
Execute all registered start callbacks.
"""
for callback in self.start_callbacks:
callback(args)
def run_end_callbacks(self, args: CallbackArgs) -> None:
"""
Execute all registered end callbacks.
"""
for callback in self.end_callbacks:
callback(args)
@contextmanager
def install_callbacks(
self, trigger: CallbackTrigger, compile_id: str
) -> Generator[None, Any, Any]:
"""
Context manager to install the callbacks and run them when the context is exited.
"""
args = CallbackArgs(trigger, compile_id)
try:
with self.__pending_callbacks_counter_lock:
self.__pending_callbacks_counter += 1
if self.__pending_callbacks_counter == 1:
self.run_start_callbacks(args)
yield
finally:
with self.__pending_callbacks_counter_lock:
assert self.__pending_callbacks_counter > 0, (
"Pending callbacks counter cannot become negative."
)
if self.__pending_callbacks_counter == 1:
self.run_end_callbacks(args)
self.__pending_callbacks_counter -= 1
def clear(self) -> None:
"""
Clear all registered callbacks.
"""
self.start_callbacks.clear()
self.end_callbacks.clear()
assert self.__pending_callbacks_counter == 0
callback_handler = CompilationCallbackHandler()
def on_compile_start(
callback: Callable[[CallbackArgs], None],
) -> Callable[[CallbackArgs], None]:
"""
Decorator to register a callback function for the start of the compilation.
"""
callback_handler.register_start_callback(callback)
return callback
def on_compile_end(
callback: Callable[[CallbackArgs], None],
) -> Callable[[CallbackArgs], None]:
"""
Decorator to register a callback function for the end of the compilation.
"""
callback_handler.register_end_callback(callback)
return callback
| CompilationCallbackHandler |
python | PrefectHQ__prefect | src/prefect/server/schemas/actions.py | {
"start": 27167,
"end": 27599
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update a block type."""
logo_url: Optional[str] = Field(None) # TODO: HttpUrl
documentation_url: Optional[str] = Field(None) # TODO: HttpUrl
description: Optional[str] = Field(None)
code_example: Optional[str] = Field(None)
@classmethod
def updatable_fields(cls) -> set[str]:
return get_class_fields_only(cls)
| BlockTypeUpdate |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 44230,
"end": 44928
} | class ____(nn.Module):
def __init__(self, config: Sam3TrackerVideoConfig, in_channels: int, out_channels: int):
super().__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=config.mask_downsampler_kernel_size,
stride=config.mask_downsampler_stride,
padding=config.mask_downsampler_padding,
)
self.layer_norm = Sam3TrackerVideoLayerNorm(out_channels, eps=1e-6, data_format="channels_first")
self.activation = ACT2FN[config.mask_downsampler_hidden_act]
def forward(self, x):
return self.activation(self.layer_norm(self.conv(x)))
| Sam3TrackerVideoMaskDownSamplerLayer |
python | django__django | django/core/cache/backends/base.py | {
"start": 1271,
"end": 14291
} | class ____:
_missing_key = object()
def __init__(self, params):
timeout = params.get("timeout", params.get("TIMEOUT", 300))
if timeout is not None:
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get("OPTIONS", {})
max_entries = params.get("max_entries", options.get("MAX_ENTRIES", 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get("cull_frequency", options.get("CULL_FREQUENCY", 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get("KEY_PREFIX", "")
self.version = params.get("VERSION", 1)
self.key_func = get_key_func(params.get("KEY_FUNCTION"))
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Return the timeout value usable by this backend based upon the provided
timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
elif timeout == 0:
# ticket 21147 - avoid time.time() related precision issues
timeout = -1
return None if timeout is None else time.time() + timeout
def make_key(self, key, version=None):
"""
Construct the key used by all other methods. By default, use the
key_func to generate a key (which, by default, prepends the
`key_prefix' and 'version'). A different key function can be provided
at the time of cache construction; alternatively, you can subclass the
cache backend to provide custom key making behavior.
"""
if version is None:
version = self.version
return self.key_func(key, self.key_prefix, version)
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
for warning in memcache_key_warnings(key):
warnings.warn(warning, CacheKeyWarning)
def make_and_validate_key(self, key, version=None):
"""Helper to make and validate keys."""
key = self.make_key(key, version=version)
self.validate_key(key)
return key
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, use that timeout for the key; otherwise use the
default cache timeout.
Return True if the value was stored, False otherwise.
"""
raise NotImplementedError(
"subclasses of BaseCache must provide an add() method"
)
async def aadd(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
return await sync_to_async(self.add, thread_sensitive=True)(
key, value, timeout, version
)
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError("subclasses of BaseCache must provide a get() method")
async def aget(self, key, default=None, version=None):
return await sync_to_async(self.get, thread_sensitive=True)(
key, default, version
)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache. If timeout is given, use that timeout for the
key; otherwise use the default cache timeout.
"""
raise NotImplementedError("subclasses of BaseCache must provide a set() method")
async def aset(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
return await sync_to_async(self.set, thread_sensitive=True)(
key, value, timeout, version
)
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
"""
Update the key's expiry time using timeout. Return True if successful
or False if the key does not exist.
"""
raise NotImplementedError(
"subclasses of BaseCache must provide a touch() method"
)
async def atouch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
return await sync_to_async(self.touch, thread_sensitive=True)(
key, timeout, version
)
def delete(self, key, version=None):
"""
Delete a key from the cache and return whether it succeeded, failing
silently.
"""
raise NotImplementedError(
"subclasses of BaseCache must provide a delete() method"
)
async def adelete(self, key, version=None):
return await sync_to_async(self.delete, thread_sensitive=True)(key, version)
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Return a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, self._missing_key, version=version)
if val is not self._missing_key:
d[k] = val
return d
async def aget_many(self, keys, version=None):
"""See get_many()."""
d = {}
for k in keys:
val = await self.aget(k, self._missing_key, version=version)
if val is not self._missing_key:
d[k] = val
return d
def get_or_set(self, key, default, timeout=DEFAULT_TIMEOUT, version=None):
"""
Fetch a given key from the cache. If the key does not exist,
add the key and set it to the default value. The default value can
also be any callable. If timeout is given, use that timeout for the
key; otherwise use the default cache timeout.
Return the value of the key stored or retrieved.
"""
val = self.get(key, self._missing_key, version=version)
if val is self._missing_key:
if callable(default):
default = default()
self.add(key, default, timeout=timeout, version=version)
# Fetch the value again to avoid a race condition if another caller
# added a value between the first get() and the add() above.
return self.get(key, default, version=version)
return val
async def aget_or_set(self, key, default, timeout=DEFAULT_TIMEOUT, version=None):
"""See get_or_set()."""
val = await self.aget(key, self._missing_key, version=version)
if val is self._missing_key:
if callable(default):
default = default()
await self.aadd(key, default, timeout=timeout, version=version)
# Fetch the value again to avoid a race condition if another caller
# added a value between the first aget() and the aadd() above.
return await self.aget(key, default, version=version)
return val
def has_key(self, key, version=None):
"""
Return True if the key is in the cache and has not expired.
"""
return (
self.get(key, self._missing_key, version=version) is not self._missing_key
)
async def ahas_key(self, key, version=None):
return (
await self.aget(key, self._missing_key, version=version)
is not self._missing_key
)
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, self._missing_key, version=version)
if value is self._missing_key:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
async def aincr(self, key, delta=1, version=None):
"""See incr()."""
value = await self.aget(key, self._missing_key, version=version)
if value is self._missing_key:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
await self.aset(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist,
raise a ValueError exception.
"""
return self.incr(key, -delta, version=version)
async def adecr(self, key, delta=1, version=None):
return await self.aincr(key, -delta, version=version)
def __contains__(self, key):
"""
Return True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, use that timeout for the key; otherwise use the
default cache timeout.
On backends that support it, return a list of keys that failed
insertion, or an empty list if all keys were inserted successfully.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
return []
async def aset_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
for key, value in data.items():
await self.aset(key, value, timeout=timeout, version=version)
return []
def delete_many(self, keys, version=None):
"""
Delete a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
async def adelete_many(self, keys, version=None):
for key in keys:
await self.adelete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError(
"subclasses of BaseCache must provide a clear() method"
)
async def aclear(self):
return await sync_to_async(self.clear, thread_sensitive=True)()
def incr_version(self, key, delta=1, version=None):
"""
Add delta to the cache version for the supplied key. Return the new
version.
"""
if version is None:
version = self.version
value = self.get(key, self._missing_key, version=version)
if value is self._missing_key:
raise ValueError("Key '%s' not found" % key)
self.set(key, value, version=version + delta)
self.delete(key, version=version)
return version + delta
async def aincr_version(self, key, delta=1, version=None):
"""See incr_version()."""
if version is None:
version = self.version
value = await self.aget(key, self._missing_key, version=version)
if value is self._missing_key:
raise ValueError("Key '%s' not found" % key)
await self.aset(key, value, version=version + delta)
await self.adelete(key, version=version)
return version + delta
def decr_version(self, key, delta=1, version=None):
"""
Subtract delta from the cache version for the supplied key. Return the
new version.
"""
return self.incr_version(key, -delta, version)
async def adecr_version(self, key, delta=1, version=None):
return await self.aincr_version(key, -delta, version)
def close(self, **kwargs):
"""Close the cache connection"""
pass
async def aclose(self, **kwargs):
pass
memcached_error_chars_re = _lazy_re_compile(r"[\x00-\x20\x7f]")
def memcache_key_warnings(key):
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
yield (
"Cache key will cause errors if used with memcached: %r "
"(longer than %s)" % (key, MEMCACHE_MAX_KEY_LENGTH)
)
if memcached_error_chars_re.search(key):
yield (
"Cache key contains characters that will cause errors if used with "
f"memcached: {key!r}"
)
| BaseCache |
python | numpy__numpy | numpy/lib/tests/test_shape_base.py | {
"start": 4305,
"end": 10206
} | class ____:
def test_simple(self):
a = np.ones((20, 10), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))
def test_simple101(self):
a = np.ones((10, 101), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))
def test_3d(self):
a = np.arange(27).reshape((3, 3, 3))
assert_array_equal(apply_along_axis(np.sum, 0, a),
[[27, 30, 33], [36, 39, 42], [45, 48, 51]])
def test_preserve_subclass(self):
def double(row):
return row * 2
class MyNDArray(np.ndarray):
pass
m = np.array([[0, 1], [2, 3]]).view(MyNDArray)
expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)
result = apply_along_axis(double, 0, m)
assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
result = apply_along_axis(double, 1, m)
assert_(isinstance(result, MyNDArray))
assert_array_equal(result, expected)
def test_subclass(self):
class MinimalSubclass(np.ndarray):
data = 1
def minimal_function(array):
return array.data
a = np.zeros((6, 3)).view(MinimalSubclass)
assert_array_equal(
apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1])
)
def test_scalar_array(self, cls=np.ndarray):
a = np.ones((6, 3)).view(cls)
res = apply_along_axis(np.sum, 0, a)
assert_(isinstance(res, cls))
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
def test_0d_array(self, cls=np.ndarray):
def sum_to_0d(x):
""" Sum x, returning a 0d array of the same class """
assert_equal(x.ndim, 1)
return np.squeeze(np.sum(x, keepdims=True))
a = np.ones((6, 3)).view(cls)
res = apply_along_axis(sum_to_0d, 0, a)
assert_(isinstance(res, cls))
assert_array_equal(res, np.array([6, 6, 6]).view(cls))
res = apply_along_axis(sum_to_0d, 1, a)
assert_(isinstance(res, cls))
assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls))
def test_axis_insertion(self, cls=np.ndarray):
def f1to2(x):
"""produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
return (x[::-1] * x[1:, None]).view(cls)
a2d = np.arange(6 * 3).reshape((6, 3))
# 2d insertion along first axis
actual = apply_along_axis(f1to2, 0, a2d)
expected = np.stack([
f1to2(a2d[:, i]) for i in range(a2d.shape[1])
], axis=-1).view(cls)
assert_equal(type(actual), type(expected))
assert_equal(actual, expected)
# 2d insertion along last axis
actual = apply_along_axis(f1to2, 1, a2d)
expected = np.stack([
f1to2(a2d[i, :]) for i in range(a2d.shape[0])
], axis=0).view(cls)
assert_equal(type(actual), type(expected))
assert_equal(actual, expected)
# 3d insertion along middle axis
a3d = np.arange(6 * 5 * 3).reshape((6, 5, 3))
actual = apply_along_axis(f1to2, 1, a3d)
expected = np.stack([
np.stack([
f1to2(a3d[i, :, j]) for i in range(a3d.shape[0])
], axis=0)
for j in range(a3d.shape[2])
], axis=-1).view(cls)
assert_equal(type(actual), type(expected))
assert_equal(actual, expected)
def test_subclass_preservation(self):
class MinimalSubclass(np.ndarray):
pass
self.test_scalar_array(MinimalSubclass)
self.test_0d_array(MinimalSubclass)
self.test_axis_insertion(MinimalSubclass)
def test_axis_insertion_ma(self):
def f1to2(x):
"""produces an asymmetric non-square matrix from x"""
assert_equal(x.ndim, 1)
res = x[::-1] * x[1:, None]
return np.ma.masked_where(res % 5 == 0, res)
a = np.arange(6 * 3).reshape((6, 3))
res = apply_along_axis(f1to2, 0, a)
assert_(isinstance(res, np.ma.masked_array))
assert_equal(res.ndim, 3)
assert_array_equal(res[:, :, 0].mask, f1to2(a[:, 0]).mask)
assert_array_equal(res[:, :, 1].mask, f1to2(a[:, 1]).mask)
assert_array_equal(res[:, :, 2].mask, f1to2(a[:, 2]).mask)
def test_tuple_func1d(self):
def sample_1d(x):
return x[1], x[0]
res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]]))
assert_array_equal(res, np.array([[2, 1], [4, 3]]))
def test_empty(self):
# can't apply_along_axis when there's no chance to call the function
def never_call(x):
assert_(False) # should never be reached
a = np.empty((0, 0))
assert_raises(ValueError, np.apply_along_axis, never_call, 0, a)
assert_raises(ValueError, np.apply_along_axis, never_call, 1, a)
# but it's sometimes ok with some non-zero dimensions
def empty_to_1(x):
assert_(len(x) == 0)
return 1
a = np.empty((10, 0))
actual = np.apply_along_axis(empty_to_1, 1, a)
assert_equal(actual, np.ones(10))
assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a)
def test_with_iterable_object(self):
# from issue 5248
d = np.array([
[{1, 11}, {2, 22}, {3, 33}],
[{4, 44}, {5, 55}, {6, 66}]
])
actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)
expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])
assert_equal(actual, expected)
# issue 8642 - assert_equal doesn't detect this!
for i in np.ndindex(actual.shape):
assert_equal(type(actual[i]), type(expected[i]))
| TestApplyAlongAxis |
python | walkccc__LeetCode | solutions/391. Perfect Rectangle/391.py | {
"start": 0,
"end": 823
} | class ____:
def isRectangleCover(self, rectangles: list[list[int]]) -> bool:
area = 0
x1 = math.inf
y1 = math.inf
x2 = -math.inf
y2 = -math.inf
corners: set[tuple[int, int]] = set()
for x, y, a, b in rectangles:
area += (a - x) * (b - y)
x1 = min(x1, x)
y1 = min(y1, y)
x2 = max(x2, a)
y2 = max(y2, b)
# the four points of the current rectangle
for point in [(x, y), (x, b), (a, y), (a, b)]:
if point in corners:
corners.remove(point)
else:
corners.add(point)
if len(corners) != 4:
return False
if ((x1, y1) not in corners or
(x1, y2) not in corners or
(x2, y1) not in corners or
(x2, y2) not in corners):
return False
return area == (x2 - x1) * (y2 - y1)
| Solution |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_oauth.py | {
"start": 49416,
"end": 81140
} | class ____(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username="eric", password="test")
self.user = User.objects.get(pk=1)
self.project = Project.objects.get(slug="pip")
self.org = RemoteOrganization.objects.create(
slug="organization",
remote_id="1234",
vcs_provider=GITHUB,
)
self.privacy = settings.DEFAULT_PRIVACY_LEVEL
self.social_github_account = get(
SocialAccount,
user=self.user,
provider=GitHubProvider.id,
)
get(
SocialToken,
account=self.social_github_account,
)
self.service = GitHubService(user=self.user, account=self.social_github_account)
self.external_version = get(Version, project=self.project, type=EXTERNAL)
self.external_build = get(
Build,
project=self.project,
version=self.external_version,
commit="1234",
)
self.integration = get(
GitHubWebhook,
project=self.project,
provider_data={"url": "https://github.com/"},
)
self.provider_data = [
{
"config": {"url": "https://example.com/webhook"},
"url": "https://api.github.com/repos/test/Hello-World/hooks/12345678",
}
]
self.repo_response_data = {
"name": "testrepo",
"full_name": "testuser/testrepo",
"id": 12345678,
"description": "Test Repo",
"git_url": "git://github.com/testuser/testrepo.git",
"private": False,
"ssh_url": "ssh://git@github.com:testuser/testrepo.git",
"html_url": "https://github.com/testuser/testrepo",
"clone_url": "https://github.com/testuser/testrepo.git",
"owner": {
"type": "User",
"id": 1234,
},
"permissions": {"admin": True, "push": True, "pull": True},
}
self.repo_with_org_response_data = copy.deepcopy(self.repo_response_data)
self.repo_with_org_response_data["owner"] = {
"login": "organization",
"id": 1234,
"node_id": "a1b2c3",
"url": "https://api.github.com/orgs/organization",
"description": "",
"name": "Organization",
"company": None,
"blog": "http://organization.org",
"location": "Portland, Oregon & Worldwide. ",
"email": None,
"is_verified": False,
"html_url": "https://github.com/organization",
"created_at": "2010-08-16T19:17:46Z",
"updated_at": "2020-08-12T14:26:39Z",
"type": "Organization",
}
self.api_url = "https://api.github.com"
def test_create_remote_repository(self):
repo = self.service.create_repository(
self.repo_response_data,
privacy=self.privacy,
)
self.assertIsInstance(repo, RemoteRepository)
self.assertEqual(repo.name, "testrepo")
self.assertEqual(repo.full_name, "testuser/testrepo")
self.assertEqual(repo.remote_id, "12345678")
self.assertEqual(repo.vcs_provider, GITHUB)
self.assertEqual(repo.description, "Test Repo")
self.assertEqual(
repo.avatar_url,
settings.OAUTH_AVATAR_USER_DEFAULT_URL,
)
self.assertIn(self.user, repo.users.all())
self.assertEqual(repo.organization, None)
self.assertEqual(
repo.clone_url,
"https://github.com/testuser/testrepo.git",
)
self.assertEqual(
repo.ssh_url,
"ssh://git@github.com:testuser/testrepo.git",
)
self.assertEqual(repo.html_url, "https://github.com/testuser/testrepo")
def test_create_remote_repository_with_organization(self):
repo = self.service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
self.assertIsInstance(repo, RemoteRepository)
self.assertEqual(repo.name, "testrepo")
self.assertEqual(repo.full_name, "testuser/testrepo")
self.assertEqual(repo.remote_id, "12345678")
self.assertEqual(repo.vcs_provider, GITHUB)
self.assertEqual(repo.description, "Test Repo")
self.assertEqual(
repo.avatar_url,
settings.OAUTH_AVATAR_USER_DEFAULT_URL,
)
self.assertIn(self.user, repo.users.all())
self.assertEqual(repo.organization, self.org)
self.assertEqual(
repo.clone_url,
"https://github.com/testuser/testrepo.git",
)
self.assertEqual(
repo.ssh_url,
"ssh://git@github.com:testuser/testrepo.git",
)
self.assertEqual(repo.html_url, "https://github.com/testuser/testrepo")
def test_create_remote_repository_with_new_organization(self):
self.org.delete()
repo = self.service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
self.assertIsInstance(repo, RemoteRepository)
self.assertEqual(repo.name, "testrepo")
self.assertEqual(repo.full_name, "testuser/testrepo")
self.assertEqual(repo.remote_id, "12345678")
self.assertEqual(repo.vcs_provider, GITHUB)
self.assertEqual(repo.description, "Test Repo")
self.assertEqual(
repo.avatar_url,
settings.OAUTH_AVATAR_USER_DEFAULT_URL,
)
self.assertIn(self.user, repo.users.all())
self.assertEqual(
repo.clone_url,
"https://github.com/testuser/testrepo.git",
)
self.assertEqual(
repo.ssh_url,
"ssh://git@github.com:testuser/testrepo.git",
)
self.assertEqual(repo.html_url, "https://github.com/testuser/testrepo")
org = repo.organization
self.assertEqual(org.remote_id, "1234")
self.assertEqual(org.slug, "organization")
self.assertEqual(org.url, "https://github.com/organization")
def test_skip_creation_remote_repository_on_private_repos(self):
self.repo_response_data["private"] = True
github_project = self.service.create_repository(
self.repo_response_data,
privacy=self.privacy,
)
self.assertIsNone(github_project)
def test_project_was_moved_from_a_personal_account_to_an_organization(self):
github_project = self.service.create_repository(
self.repo_response_data,
privacy=self.privacy,
)
self.assertEqual(github_project.organization, None)
# Project has been moved to an organization.
self.service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
github_project.refresh_from_db()
self.assertEqual(github_project.organization, self.org)
def test_project_was_moved_from_an_organization_to_a_personal_account(self):
# Project belongs to an organization.
github_project = self.service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
self.assertEqual(github_project.organization, self.org)
# Project has been moved to a personal account.
self.service.create_repository(
self.repo_response_data,
privacy=self.privacy,
)
github_project.refresh_from_db()
self.assertEqual(github_project.organization, None)
def test_project_was_moved_to_another_organization(self):
another_remote_organization = RemoteOrganization.objects.create(
slug="another",
remote_id="4321",
vcs_provider=GITHUB,
)
# Project belongs to an organization.
github_project = self.service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
self.assertEqual(github_project.organization, self.org)
# Project was moved to another organization.
self.repo_with_org_response_data["owner"]["id"] = 4321
self.service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
github_project.refresh_from_db()
self.assertEqual(github_project.organization, another_remote_organization)
def test_make_organization(self):
org_json = {
"id": 12345,
"html_url": "https://github.com/testorg",
"name": "Test Org",
"email": "test@testorg.org",
"login": "testorg",
"avatar_url": "https://images.github.com/foobar",
}
org = self.service.create_organization(org_json)
self.assertIsInstance(org, RemoteOrganization)
self.assertEqual(org.slug, "testorg")
self.assertEqual(org.name, "Test Org")
self.assertEqual(org.email, "test@testorg.org")
self.assertEqual(org.avatar_url, "https://images.github.com/foobar")
self.assertEqual(org.url, "https://github.com/testorg")
def test_import_with_no_token(self):
"""User without a GitHub SocialToken does not return a service."""
services = list(GitHubService.for_user(get(User)))
self.assertEqual(services, [])
def test_multiple_users_same_repo(self):
github_project = self.service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
user2 = User.objects.get(pk=2)
service = GitHubService(user=user2, account=get(SocialAccount, user=self.user))
github_project_2 = service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
self.assertIsInstance(github_project, RemoteRepository)
self.assertIsInstance(github_project_2, RemoteRepository)
self.assertEqual(github_project_2, github_project)
github_project_3 = self.service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
github_project_4 = service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
self.assertIsInstance(github_project_3, RemoteRepository)
self.assertIsInstance(github_project_4, RemoteRepository)
self.assertEqual(github_project, github_project_3)
self.assertEqual(github_project_2, github_project_4)
github_project_5 = self.service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
github_project_6 = service.create_repository(
self.repo_with_org_response_data,
privacy=self.privacy,
)
self.assertIsNotNone(github_project)
self.assertEqual(github_project, github_project_5)
self.assertIsNotNone(github_project_2)
self.assertEqual(github_project_2, github_project_6)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_send_build_status_successful(self, session, mock_logger, mock_structlog):
session.post.return_value.status_code = 201
success = self.service.send_build_status(
build=self.external_build,
commit=self.external_build.commit,
status=BUILD_STATUS_SUCCESS,
)
self.assertTrue(success)
mock_structlog.contextvars.bind_contextvars.assert_called_with(http_status_code=201)
mock_logger.debug.assert_called_with(
"GitHub commit status created for project.",
)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_send_build_status_on_pr_builds(self, session, mock_logger, mock_structlog):
"""Test that when status is SUCCESS but version is not built, it links to build detail page.
This happens when a build has exit code 183 (skipped) - it reports SUCCESS
to GitHub so the PR can be merged, but the version is never marked as built.
"""
# external_version.built is False by default
session.post.return_value.status_code = 201
success = self.service.send_build_status(
build=self.external_build,
commit=self.external_build.commit,
status=BUILD_STATUS_SUCCESS,
)
self.assertTrue(success)
# Verify that the target_url points to the build detail page, not the version docs
call_args = mock_structlog.contextvars.bind_contextvars.call_args_list
# Find the call with target_url
target_url = None
for call in call_args:
if 'target_url' in call[1]:
target_url = call[1]['target_url']
break
self.assertIsNotNone(target_url)
# Should link to build detail page, not version URL
self.assertIn(f'/projects/{self.project.slug}/builds/{self.external_build.pk}/', target_url)
self.assertNotIn('.readthedocs.io', target_url)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_send_build_status_404_error(self, session, mock_logger, mock_structlog):
session.post.return_value.status_code = 404
success = self.service.send_build_status(
build=self.external_build,
commit=self.external_build.commit,
status=BUILD_STATUS_SUCCESS,
)
self.assertFalse(success)
mock_structlog.contextvars.bind_contextvars.assert_called_with(http_status_code=404)
mock_logger.info.assert_called_with(
"GitHub project does not exist or user does not have permissions.",
)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_send_build_status_value_error(self, session, mock_logger, mock_structlog):
session.post.side_effect = ValueError
success = self.service.send_build_status(
build=self.external_build,
commit=self.external_build.commit,
status=BUILD_STATUS_SUCCESS,
)
self.assertFalse(success)
mock_structlog.contextvars.bind_contextvars.assert_called_with(
project_slug=self.project.slug,
commit_status="success",
user_username=self.user.username,
statuses_url="https://api.github.com/repos/pypa/pip/statuses/1234",
target_url=mock.ANY,
status="success",
)
mock_logger.exception.assert_called_with(
"GitHub commit status creation failed for project.",
)
@override_settings(DEFAULT_PRIVACY_LEVEL="private")
def test_create_public_repo_when_private_projects_are_enabled(self):
"""Test ability to import ``public`` repositories under ``private`` level."""
repo = self.service.create_repository(self.repo_with_org_response_data)
self.assertEqual(repo.organization, self.org)
self.assertEqual(repo.remote_id, str(self.repo_with_org_response_data["id"]))
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_setup_webhook_successful(self, session, mock_logger, mock_structlog):
session.post.return_value.status_code = 201
session.post.return_value.json.return_value = {}
success = self.service.setup_webhook(self.project, self.integration)
self.integration.refresh_from_db()
self.assertTrue(success)
self.assertIsNotNone(self.integration.secret)
mock_structlog.contextvars.bind_contextvars.assert_called_with(http_status_code=201)
mock_logger.debug.assert_called_with(
"GitHub webhook creation successful for project.",
)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_setup_webhook_404_error(self, session, mock_logger, mock_structlog):
session.post.return_value.status_code = 404
success = self.service.setup_webhook(self.project, self.integration)
self.integration.refresh_from_db()
self.assertFalse(success)
self.assertIsNotNone(self.integration.secret)
mock_structlog.contextvars.bind_contextvars.assert_called_with(http_status_code=404)
mock_logger.warning.assert_called_with(
"GitHub project does not exist or user does not have permissions.",
)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_setup_webhook_value_error(self, session, mock_logger, mock_structlog):
session.post.side_effect = ValueError
self.service.setup_webhook(self.project, self.integration)
self.integration.refresh_from_db()
self.assertIsNotNone(self.integration.secret)
mock_structlog.contextvars.bind_contextvars.assert_called_with(
project_slug=self.project.slug,
integration_id=self.integration.pk,
url="https://api.github.com/repos/pypa/pip/hooks",
)
mock_logger.exception.assert_called_with(
"GitHub webhook creation failed for project.",
)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_update_webhook_successful(self, session, mock_logger, mock_structlog):
session.patch.return_value.status_code = 201
session.patch.return_value.json.return_value = {}
success = self.service.update_webhook(self.project, self.integration)
self.integration.refresh_from_db()
self.assertTrue(success)
self.assertIsNotNone(self.integration.secret)
mock_structlog.contextvars.bind_contextvars.assert_called_with(
http_status_code=201,
url="https://github.com/",
)
mock_logger.info.assert_called_with(
"GitHub webhook update successful for project.",
)
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
@mock.patch("readthedocs.oauth.services.github.GitHubService.setup_webhook")
def test_update_webhook_404_error(self, setup_webhook, session):
session.patch.return_value.status_code = 404
self.service.update_webhook(self.project, self.integration)
setup_webhook.assert_called_once_with(self.project, self.integration)
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
@mock.patch("readthedocs.oauth.services.github.GitHubService.setup_webhook")
def test_update_webhook_no_provider_data(self, setup_webhook, session):
self.integration.provider_data = {}
self.integration.save()
session.patch.side_effect = AttributeError
self.service.update_webhook(self.project, self.integration)
setup_webhook.assert_called_once_with(self.project, self.integration)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_update_webhook_value_error(self, session, mock_logger, mock_structlog):
session.patch.side_effect = ValueError
self.service.update_webhook(self.project, self.integration)
self.integration.refresh_from_db()
self.assertIsNotNone(self.integration.secret)
mock_logger.exception.assert_called_with(
"GitHub webhook update failed for project."
)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_get_provider_data_successful(self, session, mock_logger, mock_structlog):
self.integration.provider_data = {}
self.integration.save()
webhook_data = self.provider_data
rtd_webhook_url = "{domain}{path}".format(
domain=settings.PUBLIC_API_URL,
path=reverse(
"api_webhook",
kwargs={
"project_slug": self.project.slug,
"integration_pk": self.integration.pk,
},
),
)
webhook_data[0]["config"]["url"] = rtd_webhook_url
session.get.return_value.status_code = 200
session.get.return_value.json.return_value = webhook_data
self.service.get_provider_data(self.project, self.integration)
self.integration.refresh_from_db()
self.assertEqual(self.integration.provider_data, webhook_data[0])
mock_structlog.contextvars.bind_contextvars.assert_called_with(
project_slug=self.project.slug,
integration_id=self.integration.pk,
url="https://api.github.com/repos/pypa/pip/hooks",
)
mock_logger.info.assert_called_with(
"GitHub integration updated with provider data for project.",
)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_get_provider_data_404_error(self, session, mock_logger, mock_structlog):
self.integration.provider_data = {}
self.integration.save()
session.get.return_value.status_code = 404
self.service.get_provider_data(self.project, self.integration)
self.integration.refresh_from_db()
self.assertEqual(self.integration.provider_data, {})
mock_logger.warning.assert_called_with(
"GitHub project does not exist or user does not have permissions.",
https_status_code=404,
)
@mock.patch("readthedocs.oauth.services.github.structlog")
@mock.patch("readthedocs.oauth.services.github.log")
@mock.patch("readthedocs.oauth.services.github.GitHubService.session")
def test_get_provider_data_attribute_error(self, session, mock_logger, mock_structlog):
self.integration.provider_data = {}
self.integration.save()
session.get.side_effect = AttributeError
self.service.get_provider_data(self.project, self.integration)
self.integration.refresh_from_db()
self.assertEqual(self.integration.provider_data, {})
mock_structlog.contextvars.bind_contextvars.assert_called_with(
project_slug=self.project.slug,
integration_id=self.integration.pk,
url="https://api.github.com/repos/pypa/pip/hooks",
)
mock_logger.exception.assert_called_with(
"GitHub webhook Listing failed for project.",
)
@requests_mock.Mocker(kw="request")
def test_remove_webhook_match_found(self, request):
assert self.project.repo == "https://github.com/pypa/pip"
assert self.project.slug == "pip"
request.get(
f"{self.api_url}/repos/pypa/pip/hooks",
json=[
{
"id": 1,
"config": {
"url": "https://readthedocs.org/api/v2/webhook/github/pip/1111/",
},
},
{
"id": 2,
"config": {
"url": "https://readthedocs.org/api/v2/webhook/pip/1111/",
},
},
{
"id": 3,
"config": {
"url": "https://app.readthedocs.org/api/v2/webhook/github/pip/1111/",
},
},
{
"id": 4,
"config": {
"url": "https://app.readthedocs.org/api/v2/webhook/pip/1111/",
},
},
{
"id": 5,
"config": {
"url": "https://readthedocs.org/api/v2/webhook/github/another-project/1111/",
},
},
{
"id": 6,
"config": {
"url": "https://example.com/dont-delete-me/",
},
},
],
)
mock_request_deletions = [
request.delete(
f"{self.api_url}/repos/pypa/pip/hooks/1",
),
request.delete(
f"{self.api_url}/repos/pypa/pip/hooks/2",
),
request.delete(
f"{self.api_url}/repos/pypa/pip/hooks/3",
),
request.delete(
f"{self.api_url}/repos/pypa/pip/hooks/4",
),
]
assert self.service.remove_webhook(self.project) is True
for mock_request_deletion in mock_request_deletions:
assert mock_request_deletion.called_once
@requests_mock.Mocker(kw="request")
def test_remove_webhook_match_found_error_to_delete(self, request):
assert self.project.repo == "https://github.com/pypa/pip"
assert self.project.slug == "pip"
request.get(
f"{self.api_url}/repos/pypa/pip/hooks",
json=[
{
"id": 1,
"config": {
"url": "https://readthedocs.org/api/v2/webhook/github/pip/1111/",
},
},
{
"id": 2,
"config": {
"url": "https://readthedocs.org/api/v2/webhook/pip/1111/",
},
},
{
"id": 3,
"config": {
"url": "https://app.readthedocs.org/api/v2/webhook/github/pip/1111/",
},
},
{
"id": 4,
"config": {
"url": "https://app.readthedocs.org/api/v2/webhook/pip/1111/",
},
},
{
"id": 5,
"config": {
"url": "https://readthedocs.org/api/v2/webhook/github/another-project/1111/",
},
},
{
"id": 6,
"config": {
"url": "https://example.com/dont-delete-me/",
},
},
],
)
mock_request_deletion = request.delete(
f"{self.api_url}/repos/pypa/pip/hooks/1",
status_code=401,
)
assert self.service.remove_webhook(self.project) is False
assert mock_request_deletion.called_once
@requests_mock.Mocker(kw="request")
def test_remove_webhook_match_not_found(self, request):
assert self.project.repo == "https://github.com/pypa/pip"
assert self.project.slug == "pip"
request.get(
f"{self.api_url}/repos/pypa/pip/hooks",
json=[
{
"id": 1,
"config": {
"url": "https://readthedocs.org/api/v2/webhook/github/another-project/1111/",
},
},
{
"id": 2,
"config": {
"url": "https://example.com/dont-delete-me/",
},
},
],
)
assert self.service.remove_webhook(self.project) is True
@requests_mock.Mocker(kw="request")
def test_update_remote_repository(self, request):
remote_repo = get(
RemoteRepository,
vcs_provider=GITHUB,
full_name="testuser/testrepo",
remote_id=self.repo_response_data["id"],
)
assert not remote_repo.users.filter(id=self.user.id).exists()
request.get(f"https://api.github.com/repositories/{remote_repo.remote_id}", json=self.repo_response_data)
self.service.update_repository(remote_repo)
remote_repo.refresh_from_db()
assert remote_repo.name == "testrepo"
assert remote_repo.full_name == "testuser/testrepo"
assert remote_repo.description == "Test Repo"
assert remote_repo.users.filter(id=self.user.id).exists()
relation = remote_repo.remote_repository_relations.get(user=self.user)
assert relation.account == self.social_github_account
assert relation.admin
@requests_mock.Mocker(kw="request")
def test_update_remote_repository_remove_user_relation(self, request):
remote_repo = get(
RemoteRepository,
vcs_provider=GITHUB,
full_name="testuser/testrepo",
remote_id=self.repo_response_data["id"],
)
get(
RemoteRepositoryRelation,
user=self.user,
account=self.social_github_account,
remote_repository=remote_repo,
admin=True,
)
assert remote_repo.users.filter(id=self.user.id).exists()
request.get(f"https://api.github.com/repositories/{remote_repo.remote_id}", status_code=404)
self.service.update_repository(remote_repo)
remote_repo.refresh_from_db()
assert remote_repo.full_name == "testuser/testrepo"
assert not remote_repo.description
assert not remote_repo.users.filter(id=self.user.id).exists()
@requests_mock.Mocker(kw="request")
def test_update_remote_repository_remove_user_relation_public_repo(self, request):
remote_repo = get(
RemoteRepository,
vcs_provider=GITHUB,
full_name="testuser/testrepo",
remote_id=self.repo_response_data["id"],
)
get(
RemoteRepositoryRelation,
user=self.user,
account=self.social_github_account,
remote_repository=remote_repo,
admin=True,
)
assert remote_repo.users.filter(id=self.user.id).exists()
for k in self.repo_response_data["permissions"]:
self.repo_response_data["permissions"][k] = False
request.get(f"https://api.github.com/repositories/{remote_repo.remote_id}", json=self.repo_response_data)
self.service.update_repository(remote_repo)
remote_repo.refresh_from_db()
assert remote_repo.name == "testrepo"
assert remote_repo.full_name == "testuser/testrepo"
assert remote_repo.description == "Test Repo"
assert not remote_repo.users.filter(id=self.user.id).exists()
| GitHubOAuthTests |
python | getsentry__sentry | src/sentry/incidents/models/alert_rule.py | {
"start": 10474,
"end": 10583
} | class ____(Enum):
ABOVE = 0
BELOW = 1
ABOVE_AND_BELOW = 2
@region_silo_model
| AlertRuleThresholdType |
python | ansible__ansible | test/units/module_utils/facts/test_ansible_collector.py | {
"start": 16212,
"end": 16679
} | class ____(TestCollectedFacts):
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
collectors = _collectors(module=module,
all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset)
c = [ExceptionThrowingCollector()] + collectors
return c
| TestExceptionCollectedFacts |
python | matplotlib__matplotlib | galleries/examples/text_labels_and_annotations/demo_text_path.py | {
"start": 603,
"end": 3928
} | class ____(PathPatch):
"""
The given image is used to draw the face of the patch. Internally,
it uses BboxImage whose clippath set to the path of the patch.
FIXME : The result is currently dpi dependent.
"""
def __init__(self, path, bbox_image, **kwargs):
super().__init__(path, **kwargs)
self.bbox_image = BboxImage(
self.get_window_extent, norm=None, origin=None)
self.bbox_image.set_data(bbox_image)
def set_facecolor(self, color):
"""Simply ignore facecolor."""
super().set_facecolor("none")
def draw(self, renderer=None):
# the clip path must be updated every draw. any solution? -JJ
self.bbox_image.set_clip_path(self._path, self.get_transform())
self.bbox_image.draw(renderer)
super().draw(renderer)
if __name__ == "__main__":
fig, (ax1, ax2) = plt.subplots(2)
# EXAMPLE 1
arr = plt.imread(get_sample_data("grace_hopper.jpg"))
text_path = TextPath((0, 0), "!?", size=150)
p = PathClippedImagePatch(text_path, arr, ec="k")
# make offset box
offsetbox = AuxTransformBox(IdentityTransform())
offsetbox.add_artist(p)
# make anchored offset box
ao = AnchoredOffsetbox(loc='upper left', child=offsetbox, frameon=True,
borderpad=0.2)
ax1.add_artist(ao)
# another text
for usetex, ypos, string in [
(False, 0.25, r"textpath supports mathtext"),
(True, 0.05, r"textpath supports \TeX"),
]:
text_path = TextPath((0, 0), string, size=20, usetex=usetex)
p1 = PathPatch(text_path, ec="w", lw=3, fc="w", alpha=0.9)
p2 = PathPatch(text_path, ec="none", fc="k")
offsetbox2 = AuxTransformBox(IdentityTransform())
offsetbox2.add_artist(p1)
offsetbox2.add_artist(p2)
ab = AnnotationBbox(offsetbox2, (0.95, ypos),
xycoords='axes fraction',
boxcoords="offset points",
box_alignment=(1., 0.),
frameon=False,
)
ax1.add_artist(ab)
ax1.imshow([[0, 1, 2], [1, 2, 3]], cmap="gist_gray_r",
interpolation="bilinear", aspect="auto")
# EXAMPLE 2
arr = np.arange(256).reshape(1, 256)
for usetex, xpos, string in [
(False, 0.25,
r"$\left[\sum_{n=1}^\infty\frac{-e^{i\pi}}{2^n}\right]$!"),
(True, 0.75,
r"$\displaystyle\left[\sum_{n=1}^\infty"
r"\frac{-e^{i\pi}}{2^n}\right]$!"),
]:
text_path = TextPath((0, 0), string, size=40, usetex=usetex)
text_patch = PathClippedImagePatch(text_path, arr, ec="none")
shadow1 = Shadow(text_patch, 1, -1, fc="none", ec="0.6", lw=3)
shadow2 = Shadow(text_patch, 1, -1, fc="0.3", ec="none")
# make offset box
offsetbox = AuxTransformBox(IdentityTransform())
offsetbox.add_artist(shadow1)
offsetbox.add_artist(shadow2)
offsetbox.add_artist(text_patch)
# place the anchored offset box using AnnotationBbox
ab = AnnotationBbox(offsetbox, (xpos, 0.5), box_alignment=(0.5, 0.5))
ax2.add_artist(ab)
ax2.set_xlim(0, 1)
ax2.set_ylim(0, 1)
plt.show()
| PathClippedImagePatch |
python | jmcnamara__XlsxWriter | xlsxwriter/sharedstrings.py | {
"start": 2824,
"end": 3979
} | class ____:
"""
A class to track Excel shared strings between worksheets.
"""
def __init__(self) -> None:
self.count = 0
self.unique_count = 0
self.string_table = {}
self.string_array = []
def _get_shared_string_index(self, string):
""" " Get the index of the string in the Shared String table."""
if string not in self.string_table:
# String isn't already stored in the table so add it.
index = self.unique_count
self.string_table[string] = index
self.count += 1
self.unique_count += 1
return index
# String exists in the table.
index = self.string_table[string]
self.count += 1
return index
def _get_shared_string(self, index):
""" " Get a shared string from the index."""
return self.string_array[index]
def _sort_string_data(self) -> None:
""" " Sort the shared string data and convert from dict to list."""
self.string_array = sorted(self.string_table, key=self.string_table.__getitem__)
self.string_table = {}
| SharedStringTable |
python | doocs__leetcode | solution/1800-1899/1855.Maximum Distance Between a Pair of Values/Solution.py | {
"start": 0,
"end": 276
} | class ____:
def maxDistance(self, nums1: List[int], nums2: List[int]) -> int:
ans = 0
nums2 = nums2[::-1]
for i, v in enumerate(nums1):
j = len(nums2) - bisect_left(nums2, v) - 1
ans = max(ans, j - i)
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/ibert/configuration_ibert.py | {
"start": 930,
"end": 5783
} | class ____(PreTrainedConfig):
"""
This is the configuration class to store the configuration of a [`IBertModel`]. It is used to instantiate a I-BERT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the IBERT
[kssteven/ibert-roberta-base](https://huggingface.co/kssteven/ibert-roberta-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the I-BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`IBertModel`]
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`IBertModel`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
quant_mode (`bool`, *optional*, defaults to `False`):
Whether to quantize the model or not.
force_dequant (`str`, *optional*, defaults to `"none"`):
Force dequantize specific nonlinear layer. Dequantized layers are then executed with full precision.
`"none"`, `"gelu"`, `"softmax"`, `"layernorm"` and `"nonlinear"` are supported. As default, it is set as
`"none"`, which does not dequantize any layers. Please specify `"gelu"`, `"softmax"`, or `"layernorm"` to
dequantize GELU, Softmax, or LayerNorm, respectively. `"nonlinear"` will dequantize all nonlinear layers,
i.e., GELU, Softmax, and LayerNorm.
"""
model_type = "ibert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
quant_mode=False,
force_dequant="none",
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.quant_mode = quant_mode
self.force_dequant = force_dequant
__all__ = ["IBertConfig"]
| IBertConfig |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/literal.py | {
"start": 96,
"end": 390
} | class ____(Enum):
a = 1
b = 2
T = TypeVar('T', bound=Literal[1234, 'abcd'])
"""docstring"""
U = TypeVar('U', bound=Literal[MyEnum.a, MyEnum.b])
"""docstring"""
def bar(x: Literal[1234, 'abcd']):
"""docstring"""
def foo(x: Literal[MyEnum.a, MyEnum.b]):
"""docstring"""
| MyEnum |
python | google__pytype | pytype/rewrite/overlays/special_builtins.py | {
"start": 1129,
"end": 1526
} | class ____(abstract.PytdFunction):
"""reveal_type implementation."""
def call_with_mapped_args(
self, mapped_args: abstract.MappedArgs[abstract.FrameType],
) -> abstract.SimpleReturn:
obj = mapped_args.argdict['obj']
stack = _stack(mapped_args.frame)
self._ctx.errorlog.reveal_type(stack, node=None, var=obj)
return abstract.SimpleReturn(self._ctx.consts[None])
| RevealType |
python | openai__openai-python | src/openai/types/video.py | {
"start": 368,
"end": 1630
} | class ____(BaseModel):
id: str
"""Unique identifier for the video job."""
completed_at: Optional[int] = None
"""Unix timestamp (seconds) for when the job completed, if finished."""
created_at: int
"""Unix timestamp (seconds) for when the job was created."""
error: Optional[VideoCreateError] = None
"""Error payload that explains why generation failed, if applicable."""
expires_at: Optional[int] = None
"""Unix timestamp (seconds) for when the downloadable assets expire, if set."""
model: VideoModel
"""The video generation model that produced the job."""
object: Literal["video"]
"""The object type, which is always `video`."""
progress: int
"""Approximate completion percentage for the generation task."""
prompt: Optional[str] = None
"""The prompt that was used to generate the video."""
remixed_from_video_id: Optional[str] = None
"""Identifier of the source video if this video is a remix."""
seconds: VideoSeconds
"""Duration of the generated clip in seconds."""
size: VideoSize
"""The resolution of the generated video."""
status: Literal["queued", "in_progress", "completed", "failed"]
"""Current lifecycle status of the video job."""
| Video |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 101773,
"end": 103692
} | class ____(GroupedElement, FromClause):
"""Represent a grouping of a FROM clause"""
_traverse_internals: _TraverseInternalsType = [
("element", InternalTraversal.dp_clauseelement)
]
element: FromClause
def __init__(self, element: FromClause):
self.element = coercions.expect(roles.FromClauseRole, element)
@util.ro_non_memoized_property
def columns(
self,
) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
return self.element.columns
@util.ro_non_memoized_property
def c(self) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
return self.element.columns
@property
def primary_key(self) -> Iterable[NamedColumn[Any]]:
return self.element.primary_key
@property
def foreign_keys(self) -> Iterable[ForeignKey]:
return self.element.foreign_keys
def is_derived_from(self, fromclause: Optional[FromClause]) -> bool:
return self.element.is_derived_from(fromclause)
def alias(
self, name: Optional[str] = None, flat: bool = False
) -> NamedFromGrouping:
return NamedFromGrouping(self.element.alias(name=name, flat=flat))
def _anonymous_fromclause(self, **kw: Any) -> FromGrouping:
return FromGrouping(self.element._anonymous_fromclause(**kw))
@util.ro_non_memoized_property
def _hide_froms(self) -> Iterable[FromClause]:
return self.element._hide_froms
@util.ro_non_memoized_property
def _from_objects(self) -> List[FromClause]:
return self.element._from_objects
def __getstate__(self) -> Dict[str, FromClause]:
return {"element": self.element}
def __setstate__(self, state: Dict[str, FromClause]) -> None:
self.element = state["element"]
if TYPE_CHECKING:
def self_group(
self, against: Optional[OperatorType] = None
) -> Self: ...
| FromGrouping |
python | pytorch__pytorch | torchgen/gen_lazy_tensor.py | {
"start": 7169,
"end": 22730
} | class ____:
node_base: str = "Node"
node_base_hdr: str | None = None
shape_inference_hdr: str = "torch/csrc/lazy/core/shape_inference.h"
tensor_class: str = "torch::lazy::LazyTensor"
tensor_class_hdr: str = "torch/csrc/lazy/core/tensor.h"
lazy_ir_generator: type[GenLazyIR] = GenLazyIR
native_func_definition_generator: type[GenLazyNativeFuncDefinition] = (
GenLazyNativeFuncDefinition
)
backend_name: str = "TorchScript"
def main() -> None:
parser = argparse.ArgumentParser(description="Generate Lazy Tensor backend files")
parser.add_argument(
"-s",
"--source-yaml",
"--source_yaml",
help="path to source yaml file containing operator external definitions",
)
parser.add_argument("-o", "--output-dir", "--output_dir", help="output directory")
parser.add_argument(
"--dry-run", "--dry_run", type=bool, default=False, help="output directory"
)
parser.add_argument(
"--impl-path",
"--impl_path",
type=str,
default=None,
help="path to the source C++ file containing kernel definitions",
)
parser.add_argument(
"--gen-ts-lowerings",
"--gen_ts_lowerings",
action="store_true",
help="Generate TorchScript lowerings in addition to Lazy IR and NativeFunctions",
)
parser.add_argument(
"--node-base",
"--node_base",
type=str,
default=default_args.node_base,
help="Name of backend specific custom Lazy IR Node base class",
)
parser.add_argument(
"--node-base-hdr",
"--node_base_hdr",
type=str,
default=default_args.node_base_hdr,
help="Path to header file defining custom Lazy IR Node base class",
)
parser.add_argument(
"--shape-inference-hdr",
"--shape_inference_hdr",
type=str,
default=default_args.shape_inference_hdr,
help="Path to header file defining custom Lazy shape inference functions",
)
parser.add_argument(
"--tensor-class",
"--tensor_class",
type=str,
default=default_args.tensor_class,
help="Name of backend specific custom Lazy Tensor class",
)
parser.add_argument(
"--tensor-class-hdr",
"--tensor_class_hdr",
type=str,
default=default_args.tensor_class_hdr,
help="Path to header file defining custom Lazy Tensor class",
)
parser.add_argument(
"--backend-name",
"--backend_name",
type=str,
default=default_args.backend_name,
help="Name of the backend to generate",
)
options = parser.parse_args()
# Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
torch_root = Path(__file__).absolute().parents[2]
aten_path = str(torch_root / "aten" / "src" / "ATen")
lazy_ir_generator: type[GenLazyIR] = default_args.lazy_ir_generator
if options.gen_ts_lowerings:
lazy_ir_generator = GenTSLazyIR
native_func_definition_generator: type[GenLazyNativeFuncDefinition] = (
default_args.native_func_definition_generator
)
run_gen_lazy_tensor(
aten_path,
options.source_yaml,
options.output_dir,
options.dry_run,
options.impl_path,
options.node_base,
options.node_base_hdr,
options.tensor_class,
options.tensor_class_hdr,
options.shape_inference_hdr,
lazy_ir_generator,
native_func_definition_generator,
options.backend_name,
)
def run_gen_lazy_tensor(
aten_path: str,
source_yaml: str,
output_dir: str,
dry_run: bool,
impl_path: str | None,
node_base: str = default_args.node_base,
node_base_hdr: str | None = default_args.node_base_hdr,
tensor_class: str = default_args.tensor_class,
tensor_class_hdr: str = default_args.tensor_class_hdr,
shape_inference_hdr: str = default_args.shape_inference_hdr,
lazy_ir_generator: type[GenLazyIR] = default_args.lazy_ir_generator,
native_func_definition_generator: type[
GenLazyNativeFuncDefinition
] = default_args.native_func_definition_generator,
# build_in_tree is true for TS backend and affects include paths
build_in_tree: bool = False,
# per_operator_headers changes whether ATen/Functions.h or individual operator headers are used
# it must match how ATen was built
per_operator_headers: bool = False,
backend_name: str = default_args.backend_name,
gen_forced_fallback_code: bool = False,
use_lazy_shape: bool = True,
# the following arguments are temporary customization points for xla backend migration.
# do not rely on them otherwise, they should be removed once migration is complete
backend_namespace: str = "torch::lazy",
get_tensorlist: str = "GetTensorList",
get_tensor_or_wrap_number: str = "GetLtcTensorOrCreateForWrappedNumber",
try_get_tensor: str = "TryGetLtcTensor",
metrics_counter: str = 'TORCH_LAZY_FN_COUNTER("lazy::")',
create_tensor: str = "LazyTensor::Create",
create_from_first_tensor: bool = False,
create_aten_from_ltc_tensor: str = "torch::lazy::CreateAtenFromLtcTensor",
tuple_aten_from_ltc_tensors: str = "torch::lazy::TupleAtenFromLtcTensors",
lazy_value_class: str = "torch::lazy::Value",
lazy_tensor_ptr: str = "LazyTensorPtr",
get_device_fn: str = "torch::lazy::GetBackendDevice",
) -> None:
lv_tokens = lazy_value_class.split("::")
lv_class = lv_tokens[-1]
lv_ns = "::".join(lv_tokens[:-1])
setValueT(BaseCppType(lv_ns, lv_class))
template_dir = os.path.join(aten_path, "templates")
def make_file_manager(install_dir: str) -> FileManager:
return FileManager(
install_dir=install_dir, template_dir=template_dir, dry_run=dry_run
)
fm = make_file_manager(output_dir)
native_yaml_path = os.path.join(aten_path, "native/native_functions.yaml")
tags_yaml_path = os.path.join(aten_path, "native/tags.yaml")
parsed_yaml = parse_native_yaml(native_yaml_path, tags_yaml_path)
native_functions, backend_indices = (
parsed_yaml.native_functions,
parsed_yaml.backend_indices,
)
grouped_native_functions = get_grouped_native_functions(native_functions)
def sort_native_function(f: NativeFunctionsGroup | NativeFunction) -> str:
"""
We sort the native function because of the note in concat_map_codegen.
TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly.
"""
func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func
return str(func.name.name)
grouped_native_functions = sorted(
grouped_native_functions, key=sort_native_function
)
parsed_backend_yaml = parse_backend_yaml(
source_yaml, grouped_native_functions, backend_indices
)
backend_key = parsed_backend_yaml.backend_key
autograd_key = parsed_backend_yaml.autograd_key
cpp_namespace = parsed_backend_yaml.cpp_namespace
backend_indices = parsed_backend_yaml.backend_indices
# the following 3 keys are all processed differently
# for full_codegen, we generate IR, kernels, etc
# for ir_gen, we generate only IR
# non_native is used to register kernels not declared in
# native_functions.yaml
full_codegen, non_native, ir_gen = parse_native_functions_keys(
source_yaml, grouped_native_functions
)
def concat_map_codegen(
func: Callable[[NativeFunction], Sequence[str]],
xs: Iterable[NativeFunctionsGroup | NativeFunction],
ops_list: list[OperatorName] = full_codegen,
) -> Iterator[str]:
"""
We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we
only code-gen additional entries for the inplace variant for the native functions.
"""
for x in xs:
fs = list(x.functions()) if isinstance(x, NativeFunctionsGroup) else [x]
for f in fs:
if f.func.name in ops_list:
yield from func(f)
selector = SelectiveBuilder.get_nop_selector()
assert backend_key is not None
class_name = backend_indices[backend_key].native_function_class_name()
if impl_path is not None:
error_on_missing_kernels(
native_functions,
backend_indices,
backend_key,
autograd_key,
class_name,
impl_path,
full_codegen,
)
""" Validate Shape Inference Definitions
Generated lazy native functions all perform shape inference, by first using a meta:: kernel
if available for that op, and otherwise using a 'compute_shape_{op}' function instead. The generator
knows the call signature for compute_shape_{op} because it matches the nativefunction (and meta::) signature,
so it just has to check whether the op is structured and generate a call for one or the other. It's up to the dev
to supply the missing compute_shape_{op} function, but the codegen at least warns you about this and provides
the expected signature which can be copy-pasted into shape_inference.h.
compute_shape_{op} functions are handwritten and should be replaced over time as ops get ported
to structured kernels.
See torch/csrc/lazy/core/shape_inference.cpp #READ THIS! for more information.
"""
if shape_inference_hdr is not None:
expected_shape_infr_decls = list(
concat_map_codegen(
dest.GenLazyShapeInferenceDefinition(
backend_indices[backend_key], tensor_class
),
grouped_native_functions,
)
)
validate_shape_inference_header(shape_inference_hdr, expected_shape_infr_decls)
assert class_name is not None
# Generate nativefunction declarations
# Note, eager registrations is set to False for the lazy TS backend as another LTC backend
# may want to register their own lazy kernels instead of registering the TS ones.
# The registration will lazily happen when init_ts_backend is called.
gen_dispatchkey_nativefunc_headers(
fm,
class_name,
cpp_namespace,
backend_indices,
grouped_native_functions,
backend_key,
autograd_key,
backend_name,
)
# Generate Dispatcher registrations which hook up the nativefunctions
for dispatch_key in (
[backend_key] if autograd_key is None else [backend_key, autograd_key]
):
gen_dispatcher_registrations(
fm,
output_dir,
class_name,
backend_indices,
grouped_native_functions,
backend_key,
dispatch_key,
selector,
build_in_tree=build_in_tree,
per_operator_headers=per_operator_headers,
backend_name=backend_name,
eager_registration=False,
)
# Generate native function impls that build IR nodes
ns_helper = NamespaceHelper(cpp_namespace)
fm.write_with_template(
f"{backend_key}NativeFunctions.cpp",
"DispatchKeyNativeFunctions.cpp",
lambda: {
"includes": [
f"#include <{path}>"
for path in [
tensor_class_hdr,
shape_inference_hdr,
"ATen/Functions.h",
"ATen/native/TensorConversions.h",
"ATen/NativeFunctions.h",
"ATen/CompositeExplicitAutogradNonFunctionalFunctions.h",
"ATen/MetaFunctions.h",
"ATen/Operators.h",
"ATen/native/CPUFallback.h",
"torch/csrc/lazy/core/ir_builder.h",
"torch/csrc/lazy/core/lazy_graph_executor.h",
"torch/csrc/lazy/core/metrics.h",
"torch/csrc/lazy/core/shape.h",
f"{output_dir}/{backend_key}NativeFunctions.h",
f"{output_dir}/LazyIr.h",
]
+ (
["torch/csrc/lazy/ts_backend/ts_eager_fallback.h"]
if gen_forced_fallback_code
else []
)
],
"helper_fns": get_ltc_helper_fns(),
"native_functions_include": "",
"namespace_prologue": ns_helper.prologue,
"namespace_epilogue": ns_helper.epilogue,
"native_function_definitions": list(
concat_map_codegen(
native_func_definition_generator(
f"{backend_key}NativeFunctions",
backend_indices[backend_key],
tensor_class,
gen_forced_fallback_code,
backend_namespace,
get_tensorlist,
get_tensor_or_wrap_number,
try_get_tensor,
metrics_counter,
create_tensor,
create_from_first_tensor,
create_aten_from_ltc_tensor,
tuple_aten_from_ltc_tensors,
lazy_tensor_ptr,
get_device_fn,
),
grouped_native_functions,
)
),
},
)
# Generate IR node classes
lazy_ir_obj = lazy_ir_generator(
backend_indices[backend_key], backend_name, node_base, use_lazy_shape
)
fm.write_with_template(
"LazyIr.h",
"LazyIr.h",
lambda: {
"lazy_ir_sysinc": [
f"#include <{path}>"
for path in [
"ATen/core/Formatting.h",
"c10/core/ScalarType.h",
"torch/csrc/lazy/core/hash.h",
"torch/csrc/lazy/core/ir.h",
"torch/csrc/lazy/core/shape.h",
"optional",
"vector",
]
],
"lazy_ir_inc": [f'#include "{node_base_hdr}"']
if node_base_hdr is not None
else [],
"ir_declarations": list(
concat_map_codegen(
lazy_ir_obj, grouped_native_functions, full_codegen + ir_gen
)
),
"namespace_prologue": ns_helper.prologue,
"namespace_epilogue": ns_helper.epilogue,
},
)
# Generate Non Native IR Node classes
fm.write_with_template(
"LazyNonNativeIr.h",
"LazyNonNativeIr.h",
lambda: {
"lazy_non_native_ir_inc": [
f"#include <{path}>"
for path in [
"torch/csrc/lazy/core/ir.h",
"torch/csrc/lazy/core/ir_builder.h",
"torch/csrc/lazy/core/internal_ops/ltc_ops.h",
"torch/csrc/lazy/core/shape_inference.h",
]
+ ([node_base_hdr] if node_base_hdr else [])
if path
],
"non_native_ir_nodes": dest.generate_non_native_lazy_ir_nodes(
non_native, lazy_ir_obj
),
"namespace_prologue": ns_helper.prologue,
"namespace_epilogue": ns_helper.epilogue,
},
)
if __name__ == "__main__":
main()
| default_args |
python | python-openxml__python-docx | src/docx/opc/part.py | {
"start": 5711,
"end": 7543
} | class ____:
"""Provides a way for client code to specify a subclass of |Part| to be constructed
by |Unmarshaller| based on its content type and/or a custom callable.
Setting ``PartFactory.part_class_selector`` to a callable object will cause that
object to be called with the parameters ``content_type, reltype``, once for each
part in the package. If the callable returns an object, it is used as the class for
that part. If it returns |None|, part class selection falls back to the content type
map defined in ``PartFactory.part_type_for``. If no class is returned from either of
these, the class contained in ``PartFactory.default_part_type`` is used to construct
the part, which is by default ``opc.package.Part``.
"""
part_class_selector: Callable[[str, str], Type[Part] | None] | None
part_type_for: dict[str, Type[Part]] = {}
default_part_type = Part
def __new__(
cls,
partname: PackURI,
content_type: str,
reltype: str,
blob: bytes,
package: Package,
):
PartClass: Type[Part] | None = None
if cls.part_class_selector is not None:
part_class_selector = cls_method_fn(cls, "part_class_selector")
PartClass = part_class_selector(content_type, reltype)
if PartClass is None:
PartClass = cls._part_cls_for(content_type)
return PartClass.load(partname, content_type, blob, package)
@classmethod
def _part_cls_for(cls, content_type: str):
"""Return the custom part class registered for `content_type`, or the default
part class if no custom class is registered for `content_type`."""
if content_type in cls.part_type_for:
return cls.part_type_for[content_type]
return cls.default_part_type
| PartFactory |
python | pypa__packaging | tests/test_version.py | {
"start": 1755,
"end": 26831
} | class ____:
@pytest.mark.parametrize("version", VERSIONS)
def test_valid_versions(self, version: str) -> None:
Version(version)
@pytest.mark.parametrize(
"version",
[
# Non sensical versions should be invalid
"french toast",
# Versions with invalid local versions
"1.0+a+",
"1.0++",
"1.0+_foobar",
"1.0+foo&asd",
"1.0+1+1",
],
)
def test_invalid_versions(self, version: str) -> None:
with pytest.raises(InvalidVersion):
Version(version)
@pytest.mark.parametrize(
("version", "normalized"),
[
# Various development release incarnations
("1.0dev", "1.0.dev0"),
("1.0.dev", "1.0.dev0"),
("1.0dev1", "1.0.dev1"),
("1.0-dev", "1.0.dev0"),
("1.0-dev1", "1.0.dev1"),
("1.0DEV", "1.0.dev0"),
("1.0.DEV", "1.0.dev0"),
("1.0DEV1", "1.0.dev1"),
("1.0.DEV1", "1.0.dev1"),
("1.0-DEV", "1.0.dev0"),
("1.0-DEV1", "1.0.dev1"),
# Various alpha incarnations
("1.0a", "1.0a0"),
("1.0.a", "1.0a0"),
("1.0.a1", "1.0a1"),
("1.0-a", "1.0a0"),
("1.0-a1", "1.0a1"),
("1.0alpha", "1.0a0"),
("1.0.alpha", "1.0a0"),
("1.0.alpha1", "1.0a1"),
("1.0-alpha", "1.0a0"),
("1.0-alpha1", "1.0a1"),
("1.0A", "1.0a0"),
("1.0.A", "1.0a0"),
("1.0.A1", "1.0a1"),
("1.0-A", "1.0a0"),
("1.0-A1", "1.0a1"),
("1.0ALPHA", "1.0a0"),
("1.0.ALPHA", "1.0a0"),
("1.0.ALPHA1", "1.0a1"),
("1.0-ALPHA", "1.0a0"),
("1.0-ALPHA1", "1.0a1"),
# Various beta incarnations
("1.0b", "1.0b0"),
("1.0.b", "1.0b0"),
("1.0.b1", "1.0b1"),
("1.0-b", "1.0b0"),
("1.0-b1", "1.0b1"),
("1.0beta", "1.0b0"),
("1.0.beta", "1.0b0"),
("1.0.beta1", "1.0b1"),
("1.0-beta", "1.0b0"),
("1.0-beta1", "1.0b1"),
("1.0B", "1.0b0"),
("1.0.B", "1.0b0"),
("1.0.B1", "1.0b1"),
("1.0-B", "1.0b0"),
("1.0-B1", "1.0b1"),
("1.0BETA", "1.0b0"),
("1.0.BETA", "1.0b0"),
("1.0.BETA1", "1.0b1"),
("1.0-BETA", "1.0b0"),
("1.0-BETA1", "1.0b1"),
# Various release candidate incarnations
("1.0c", "1.0rc0"),
("1.0.c", "1.0rc0"),
("1.0.c1", "1.0rc1"),
("1.0-c", "1.0rc0"),
("1.0-c1", "1.0rc1"),
("1.0rc", "1.0rc0"),
("1.0.rc", "1.0rc0"),
("1.0.rc1", "1.0rc1"),
("1.0-rc", "1.0rc0"),
("1.0-rc1", "1.0rc1"),
("1.0C", "1.0rc0"),
("1.0.C", "1.0rc0"),
("1.0.C1", "1.0rc1"),
("1.0-C", "1.0rc0"),
("1.0-C1", "1.0rc1"),
("1.0RC", "1.0rc0"),
("1.0.RC", "1.0rc0"),
("1.0.RC1", "1.0rc1"),
("1.0-RC", "1.0rc0"),
("1.0-RC1", "1.0rc1"),
# Various post release incarnations
("1.0post", "1.0.post0"),
("1.0.post", "1.0.post0"),
("1.0post1", "1.0.post1"),
("1.0-post", "1.0.post0"),
("1.0-post1", "1.0.post1"),
("1.0POST", "1.0.post0"),
("1.0.POST", "1.0.post0"),
("1.0POST1", "1.0.post1"),
("1.0r", "1.0.post0"),
("1.0rev", "1.0.post0"),
("1.0.POST1", "1.0.post1"),
("1.0.r1", "1.0.post1"),
("1.0.rev1", "1.0.post1"),
("1.0-POST", "1.0.post0"),
("1.0-POST1", "1.0.post1"),
("1.0-5", "1.0.post5"),
("1.0-r5", "1.0.post5"),
("1.0-rev5", "1.0.post5"),
# Local version case insensitivity
("1.0+AbC", "1.0+abc"),
# Integer Normalization
("1.01", "1.1"),
("1.0a05", "1.0a5"),
("1.0b07", "1.0b7"),
("1.0c056", "1.0rc56"),
("1.0rc09", "1.0rc9"),
("1.0.post000", "1.0.post0"),
("1.1.dev09000", "1.1.dev9000"),
("00!1.2", "1.2"),
("0100!0.0", "100!0.0"),
# Various other normalizations
("v1.0", "1.0"),
(" v1.0\t\n", "1.0"),
],
)
def test_normalized_versions(self, version: str, normalized: str) -> None:
assert str(Version(version)) == normalized
@pytest.mark.parametrize(
("version", "expected"),
[
("1.0.dev456", "1.0.dev456"),
("1.0a1", "1.0a1"),
("1.0a2.dev456", "1.0a2.dev456"),
("1.0a12.dev456", "1.0a12.dev456"),
("1.0a12", "1.0a12"),
("1.0b1.dev456", "1.0b1.dev456"),
("1.0b2", "1.0b2"),
("1.0b2.post345.dev456", "1.0b2.post345.dev456"),
("1.0b2.post345", "1.0b2.post345"),
("1.0rc1.dev456", "1.0rc1.dev456"),
("1.0rc1", "1.0rc1"),
("1.0", "1.0"),
("1.0.post456.dev34", "1.0.post456.dev34"),
("1.0.post456", "1.0.post456"),
("1.0.1", "1.0.1"),
("0!1.0.2", "1.0.2"),
("1.0.3+7", "1.0.3+7"),
("0!1.0.4+8.0", "1.0.4+8.0"),
("1.0.5+9.5", "1.0.5+9.5"),
("1.2+1234.abc", "1.2+1234.abc"),
("1.2+123456", "1.2+123456"),
("1.2+123abc", "1.2+123abc"),
("1.2+123abc456", "1.2+123abc456"),
("1.2+abc", "1.2+abc"),
("1.2+abc123", "1.2+abc123"),
("1.2+abc123def", "1.2+abc123def"),
("1.1.dev1", "1.1.dev1"),
("7!1.0.dev456", "7!1.0.dev456"),
("7!1.0a1", "7!1.0a1"),
("7!1.0a2.dev456", "7!1.0a2.dev456"),
("7!1.0a12.dev456", "7!1.0a12.dev456"),
("7!1.0a12", "7!1.0a12"),
("7!1.0b1.dev456", "7!1.0b1.dev456"),
("7!1.0b2", "7!1.0b2"),
("7!1.0b2.post345.dev456", "7!1.0b2.post345.dev456"),
("7!1.0b2.post345", "7!1.0b2.post345"),
("7!1.0rc1.dev456", "7!1.0rc1.dev456"),
("7!1.0rc1", "7!1.0rc1"),
("7!1.0", "7!1.0"),
("7!1.0.post456.dev34", "7!1.0.post456.dev34"),
("7!1.0.post456", "7!1.0.post456"),
("7!1.0.1", "7!1.0.1"),
("7!1.0.2", "7!1.0.2"),
("7!1.0.3+7", "7!1.0.3+7"),
("7!1.0.4+8.0", "7!1.0.4+8.0"),
("7!1.0.5+9.5", "7!1.0.5+9.5"),
("7!1.1.dev1", "7!1.1.dev1"),
],
)
def test_version_str_repr(self, version: str, expected: str) -> None:
assert str(Version(version)) == expected
assert repr(Version(version)) == f"<Version({expected!r})>"
def test_version_rc_and_c_equals(self) -> None:
assert Version("1.0rc1") == Version("1.0c1")
@pytest.mark.parametrize("version", VERSIONS)
def test_version_hash(self, version: str) -> None:
assert hash(Version(version)) == hash(Version(version))
@pytest.mark.parametrize(
("version", "public"),
[
("1.0", "1.0"),
("1.0.dev0", "1.0.dev0"),
("1.0.dev6", "1.0.dev6"),
("1.0a1", "1.0a1"),
("1.0a1.post5", "1.0a1.post5"),
("1.0a1.post5.dev6", "1.0a1.post5.dev6"),
("1.0rc4", "1.0rc4"),
("1.0.post5", "1.0.post5"),
("1!1.0", "1!1.0"),
("1!1.0.dev6", "1!1.0.dev6"),
("1!1.0a1", "1!1.0a1"),
("1!1.0a1.post5", "1!1.0a1.post5"),
("1!1.0a1.post5.dev6", "1!1.0a1.post5.dev6"),
("1!1.0rc4", "1!1.0rc4"),
("1!1.0.post5", "1!1.0.post5"),
("1.0+deadbeef", "1.0"),
("1.0.dev6+deadbeef", "1.0.dev6"),
("1.0a1+deadbeef", "1.0a1"),
("1.0a1.post5+deadbeef", "1.0a1.post5"),
("1.0a1.post5.dev6+deadbeef", "1.0a1.post5.dev6"),
("1.0rc4+deadbeef", "1.0rc4"),
("1.0.post5+deadbeef", "1.0.post5"),
("1!1.0+deadbeef", "1!1.0"),
("1!1.0.dev6+deadbeef", "1!1.0.dev6"),
("1!1.0a1+deadbeef", "1!1.0a1"),
("1!1.0a1.post5+deadbeef", "1!1.0a1.post5"),
("1!1.0a1.post5.dev6+deadbeef", "1!1.0a1.post5.dev6"),
("1!1.0rc4+deadbeef", "1!1.0rc4"),
("1!1.0.post5+deadbeef", "1!1.0.post5"),
],
)
def test_version_public(self, version: str, public: str) -> None:
assert Version(version).public == public
@pytest.mark.parametrize(
("version", "base_version"),
[
("1.0", "1.0"),
("1.0.dev0", "1.0"),
("1.0.dev6", "1.0"),
("1.0a1", "1.0"),
("1.0a1.post5", "1.0"),
("1.0a1.post5.dev6", "1.0"),
("1.0rc4", "1.0"),
("1.0.post5", "1.0"),
("1!1.0", "1!1.0"),
("1!1.0.dev6", "1!1.0"),
("1!1.0a1", "1!1.0"),
("1!1.0a1.post5", "1!1.0"),
("1!1.0a1.post5.dev6", "1!1.0"),
("1!1.0rc4", "1!1.0"),
("1!1.0.post5", "1!1.0"),
("1.0+deadbeef", "1.0"),
("1.0.dev6+deadbeef", "1.0"),
("1.0a1+deadbeef", "1.0"),
("1.0a1.post5+deadbeef", "1.0"),
("1.0a1.post5.dev6+deadbeef", "1.0"),
("1.0rc4+deadbeef", "1.0"),
("1.0.post5+deadbeef", "1.0"),
("1!1.0+deadbeef", "1!1.0"),
("1!1.0.dev6+deadbeef", "1!1.0"),
("1!1.0a1+deadbeef", "1!1.0"),
("1!1.0a1.post5+deadbeef", "1!1.0"),
("1!1.0a1.post5.dev6+deadbeef", "1!1.0"),
("1!1.0rc4+deadbeef", "1!1.0"),
("1!1.0.post5+deadbeef", "1!1.0"),
],
)
def test_version_base_version(self, version: str, base_version: str) -> None:
assert Version(version).base_version == base_version
@pytest.mark.parametrize(
("version", "epoch"),
[
("1.0", 0),
("1.0.dev0", 0),
("1.0.dev6", 0),
("1.0a1", 0),
("1.0a1.post5", 0),
("1.0a1.post5.dev6", 0),
("1.0rc4", 0),
("1.0.post5", 0),
("1!1.0", 1),
("1!1.0.dev6", 1),
("1!1.0a1", 1),
("1!1.0a1.post5", 1),
("1!1.0a1.post5.dev6", 1),
("1!1.0rc4", 1),
("1!1.0.post5", 1),
("1.0+deadbeef", 0),
("1.0.dev6+deadbeef", 0),
("1.0a1+deadbeef", 0),
("1.0a1.post5+deadbeef", 0),
("1.0a1.post5.dev6+deadbeef", 0),
("1.0rc4+deadbeef", 0),
("1.0.post5+deadbeef", 0),
("1!1.0+deadbeef", 1),
("1!1.0.dev6+deadbeef", 1),
("1!1.0a1+deadbeef", 1),
("1!1.0a1.post5+deadbeef", 1),
("1!1.0a1.post5.dev6+deadbeef", 1),
("1!1.0rc4+deadbeef", 1),
("1!1.0.post5+deadbeef", 1),
],
)
def test_version_epoch(self, version: str, epoch: int) -> None:
assert Version(version).epoch == epoch
@pytest.mark.parametrize(
("version", "release"),
[
("1.0", (1, 0)),
("1.0.dev0", (1, 0)),
("1.0.dev6", (1, 0)),
("1.0a1", (1, 0)),
("1.0a1.post5", (1, 0)),
("1.0a1.post5.dev6", (1, 0)),
("1.0rc4", (1, 0)),
("1.0.post5", (1, 0)),
("1!1.0", (1, 0)),
("1!1.0.dev6", (1, 0)),
("1!1.0a1", (1, 0)),
("1!1.0a1.post5", (1, 0)),
("1!1.0a1.post5.dev6", (1, 0)),
("1!1.0rc4", (1, 0)),
("1!1.0.post5", (1, 0)),
("1.0+deadbeef", (1, 0)),
("1.0.dev6+deadbeef", (1, 0)),
("1.0a1+deadbeef", (1, 0)),
("1.0a1.post5+deadbeef", (1, 0)),
("1.0a1.post5.dev6+deadbeef", (1, 0)),
("1.0rc4+deadbeef", (1, 0)),
("1.0.post5+deadbeef", (1, 0)),
("1!1.0+deadbeef", (1, 0)),
("1!1.0.dev6+deadbeef", (1, 0)),
("1!1.0a1+deadbeef", (1, 0)),
("1!1.0a1.post5+deadbeef", (1, 0)),
("1!1.0a1.post5.dev6+deadbeef", (1, 0)),
("1!1.0rc4+deadbeef", (1, 0)),
("1!1.0.post5+deadbeef", (1, 0)),
],
)
def test_version_release(self, version: str, release: tuple[int, int]) -> None:
assert Version(version).release == release
@pytest.mark.parametrize(
("version", "local"),
[
("1.0", None),
("1.0.dev0", None),
("1.0.dev6", None),
("1.0a1", None),
("1.0a1.post5", None),
("1.0a1.post5.dev6", None),
("1.0rc4", None),
("1.0.post5", None),
("1!1.0", None),
("1!1.0.dev6", None),
("1!1.0a1", None),
("1!1.0a1.post5", None),
("1!1.0a1.post5.dev6", None),
("1!1.0rc4", None),
("1!1.0.post5", None),
("1.0+deadbeef", "deadbeef"),
("1.0.dev6+deadbeef", "deadbeef"),
("1.0a1+deadbeef", "deadbeef"),
("1.0a1.post5+deadbeef", "deadbeef"),
("1.0a1.post5.dev6+deadbeef", "deadbeef"),
("1.0rc4+deadbeef", "deadbeef"),
("1.0.post5+deadbeef", "deadbeef"),
("1!1.0+deadbeef", "deadbeef"),
("1!1.0.dev6+deadbeef", "deadbeef"),
("1!1.0a1+deadbeef", "deadbeef"),
("1!1.0a1.post5+deadbeef", "deadbeef"),
("1!1.0a1.post5.dev6+deadbeef", "deadbeef"),
("1!1.0rc4+deadbeef", "deadbeef"),
("1!1.0.post5+deadbeef", "deadbeef"),
],
)
def test_version_local(self, version: str, local: str | None) -> None:
assert Version(version).local == local
@pytest.mark.parametrize(
("version", "pre"),
[
("1.0", None),
("1.0.dev0", None),
("1.0.dev6", None),
("1.0a1", ("a", 1)),
("1.0a1.post5", ("a", 1)),
("1.0a1.post5.dev6", ("a", 1)),
("1.0rc4", ("rc", 4)),
("1.0.post5", None),
("1!1.0", None),
("1!1.0.dev6", None),
("1!1.0a1", ("a", 1)),
("1!1.0a1.post5", ("a", 1)),
("1!1.0a1.post5.dev6", ("a", 1)),
("1!1.0rc4", ("rc", 4)),
("1!1.0.post5", None),
("1.0+deadbeef", None),
("1.0.dev6+deadbeef", None),
("1.0a1+deadbeef", ("a", 1)),
("1.0a1.post5+deadbeef", ("a", 1)),
("1.0a1.post5.dev6+deadbeef", ("a", 1)),
("1.0rc4+deadbeef", ("rc", 4)),
("1.0.post5+deadbeef", None),
("1!1.0+deadbeef", None),
("1!1.0.dev6+deadbeef", None),
("1!1.0a1+deadbeef", ("a", 1)),
("1!1.0a1.post5+deadbeef", ("a", 1)),
("1!1.0a1.post5.dev6+deadbeef", ("a", 1)),
("1!1.0rc4+deadbeef", ("rc", 4)),
("1!1.0.post5+deadbeef", None),
],
)
def test_version_pre(self, version: str, pre: None | tuple[str, int]) -> None:
assert Version(version).pre == pre
@pytest.mark.parametrize(
("version", "expected"),
[
("1.0.dev0", True),
("1.0.dev1", True),
("1.0a1.dev1", True),
("1.0b1.dev1", True),
("1.0c1.dev1", True),
("1.0rc1.dev1", True),
("1.0a1", True),
("1.0b1", True),
("1.0c1", True),
("1.0rc1", True),
("1.0a1.post1.dev1", True),
("1.0b1.post1.dev1", True),
("1.0c1.post1.dev1", True),
("1.0rc1.post1.dev1", True),
("1.0a1.post1", True),
("1.0b1.post1", True),
("1.0c1.post1", True),
("1.0rc1.post1", True),
("1.0", False),
("1.0+dev", False),
("1.0.post1", False),
("1.0.post1+dev", False),
],
)
def test_version_is_prerelease(self, version: str, expected: bool) -> None:
assert Version(version).is_prerelease is expected
@pytest.mark.parametrize(
("version", "dev"),
[
("1.0", None),
("1.0.dev0", 0),
("1.0.dev6", 6),
("1.0a1", None),
("1.0a1.post5", None),
("1.0a1.post5.dev6", 6),
("1.0rc4", None),
("1.0.post5", None),
("1!1.0", None),
("1!1.0.dev6", 6),
("1!1.0a1", None),
("1!1.0a1.post5", None),
("1!1.0a1.post5.dev6", 6),
("1!1.0rc4", None),
("1!1.0.post5", None),
("1.0+deadbeef", None),
("1.0.dev6+deadbeef", 6),
("1.0a1+deadbeef", None),
("1.0a1.post5+deadbeef", None),
("1.0a1.post5.dev6+deadbeef", 6),
("1.0rc4+deadbeef", None),
("1.0.post5+deadbeef", None),
("1!1.0+deadbeef", None),
("1!1.0.dev6+deadbeef", 6),
("1!1.0a1+deadbeef", None),
("1!1.0a1.post5+deadbeef", None),
("1!1.0a1.post5.dev6+deadbeef", 6),
("1!1.0rc4+deadbeef", None),
("1!1.0.post5+deadbeef", None),
],
)
def test_version_dev(self, version: str, dev: int | None) -> None:
assert Version(version).dev == dev
@pytest.mark.parametrize(
("version", "expected"),
[
("1.0", False),
("1.0.dev0", True),
("1.0.dev6", True),
("1.0a1", False),
("1.0a1.post5", False),
("1.0a1.post5.dev6", True),
("1.0rc4", False),
("1.0.post5", False),
("1!1.0", False),
("1!1.0.dev6", True),
("1!1.0a1", False),
("1!1.0a1.post5", False),
("1!1.0a1.post5.dev6", True),
("1!1.0rc4", False),
("1!1.0.post5", False),
("1.0+deadbeef", False),
("1.0.dev6+deadbeef", True),
("1.0a1+deadbeef", False),
("1.0a1.post5+deadbeef", False),
("1.0a1.post5.dev6+deadbeef", True),
("1.0rc4+deadbeef", False),
("1.0.post5+deadbeef", False),
("1!1.0+deadbeef", False),
("1!1.0.dev6+deadbeef", True),
("1!1.0a1+deadbeef", False),
("1!1.0a1.post5+deadbeef", False),
("1!1.0a1.post5.dev6+deadbeef", True),
("1!1.0rc4+deadbeef", False),
("1!1.0.post5+deadbeef", False),
],
)
def test_version_is_devrelease(self, version: str, expected: bool) -> None:
assert Version(version).is_devrelease is expected
@pytest.mark.parametrize(
("version", "post"),
[
("1.0", None),
("1.0.dev0", None),
("1.0.dev6", None),
("1.0a1", None),
("1.0a1.post5", 5),
("1.0a1.post5.dev6", 5),
("1.0rc4", None),
("1.0.post5", 5),
("1!1.0", None),
("1!1.0.dev6", None),
("1!1.0a1", None),
("1!1.0a1.post5", 5),
("1!1.0a1.post5.dev6", 5),
("1!1.0rc4", None),
("1!1.0.post5", 5),
("1.0+deadbeef", None),
("1.0.dev6+deadbeef", None),
("1.0a1+deadbeef", None),
("1.0a1.post5+deadbeef", 5),
("1.0a1.post5.dev6+deadbeef", 5),
("1.0rc4+deadbeef", None),
("1.0.post5+deadbeef", 5),
("1!1.0+deadbeef", None),
("1!1.0.dev6+deadbeef", None),
("1!1.0a1+deadbeef", None),
("1!1.0a1.post5+deadbeef", 5),
("1!1.0a1.post5.dev6+deadbeef", 5),
("1!1.0rc4+deadbeef", None),
("1!1.0.post5+deadbeef", 5),
],
)
def test_version_post(self, version: str, post: int | None) -> None:
assert Version(version).post == post
@pytest.mark.parametrize(
("version", "expected"),
[
("1.0.dev1", False),
("1.0", False),
("1.0+foo", False),
("1.0.post1.dev1", True),
("1.0.post1", True),
],
)
def test_version_is_postrelease(self, version: str, expected: bool) -> None:
assert Version(version).is_postrelease is expected
@pytest.mark.parametrize(
("left", "right", "op"),
# Below we'll generate every possible combination of VERSIONS that
# should be True for the given operator
itertools.chain.from_iterable(
# Verify that the less than (<) operator works correctly
[
[(x, y, operator.lt) for y in VERSIONS[i + 1 :]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the less than equal (<=) operator works correctly
[
[(x, y, operator.le) for y in VERSIONS[i:]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the equal (==) operator works correctly
[[(x, x, operator.eq) for x in VERSIONS]]
+
# Verify that the not equal (!=) operator works correctly
[
[(x, y, operator.ne) for j, y in enumerate(VERSIONS) if i != j]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the greater than equal (>=) operator works correctly
[
[(x, y, operator.ge) for y in VERSIONS[: i + 1]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the greater than (>) operator works correctly
[
[(x, y, operator.gt) for y in VERSIONS[:i]]
for i, x in enumerate(VERSIONS)
]
),
)
def test_comparison_true(
self, left: str, right: str, op: Callable[[Version, Version], bool]
) -> None:
assert op(Version(left), Version(right))
@pytest.mark.parametrize(
("left", "right", "op"),
# Below we'll generate every possible combination of VERSIONS that
# should be False for the given operator
itertools.chain.from_iterable(
# Verify that the less than (<) operator works correctly
[
[(x, y, operator.lt) for y in VERSIONS[: i + 1]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the less than equal (<=) operator works correctly
[
[(x, y, operator.le) for y in VERSIONS[:i]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the equal (==) operator works correctly
[
[(x, y, operator.eq) for j, y in enumerate(VERSIONS) if i != j]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the not equal (!=) operator works correctly
[[(x, x, operator.ne) for x in VERSIONS]]
+
# Verify that the greater than equal (>=) operator works correctly
[
[(x, y, operator.ge) for y in VERSIONS[i + 1 :]]
for i, x in enumerate(VERSIONS)
]
+
# Verify that the greater than (>) operator works correctly
[
[(x, y, operator.gt) for y in VERSIONS[i:]]
for i, x in enumerate(VERSIONS)
]
),
)
def test_comparison_false(
self, left: str, right: str, op: Callable[[Version, Version], bool]
) -> None:
assert not op(Version(left), Version(right))
@pytest.mark.parametrize("op", ["lt", "le", "eq", "ge", "gt", "ne"])
def test_dunder_op_returns_notimplemented(self, op: str) -> None:
method = getattr(Version, f"__{op}__")
assert method(Version("1"), 1) is NotImplemented
@pytest.mark.parametrize(("op", "expected"), [("eq", False), ("ne", True)])
def test_compare_other(self, op: str, expected: bool) -> None:
other = pretend.stub(**{f"__{op}__": lambda _: NotImplemented})
assert getattr(operator, op)(Version("1"), other) is expected
def test_major_version(self) -> None:
assert Version("2.1.0").major == 2
def test_minor_version(self) -> None:
assert Version("2.1.0").minor == 1
assert Version("2").minor == 0
def test_micro_version(self) -> None:
assert Version("2.1.3").micro == 3
assert Version("2.1").micro == 0
assert Version("2").micro == 0
| TestVersion |
python | altair-viz__altair | altair/datasets/_loader.py | {
"start": 8568,
"end": 10292
} | class ____(Loader[IntoDataFrameT, IntoFrameT]):
@overload
def __call__( # pyright: ignore[reportOverlappingOverload]
self,
name: Dataset | LiteralString,
suffix: Extension | None = ...,
/,
backend: None = ...,
**kwds: Any,
) -> IntoDataFrameT: ...
@overload
def __call__(
self,
name: Dataset | LiteralString,
suffix: Extension | None = ...,
/,
backend: Literal["polars"] = ...,
**kwds: Any,
) -> pl.DataFrame: ...
@overload
def __call__(
self,
name: Dataset | LiteralString,
suffix: Extension | None = ...,
/,
backend: Literal["pandas", "pandas[pyarrow]"] = ...,
**kwds: Any,
) -> pd.DataFrame: ...
@overload
def __call__(
self,
name: Dataset | LiteralString,
suffix: Extension | None = ...,
/,
backend: Literal["pyarrow"] = ...,
**kwds: Any,
) -> pa.Table: ...
def __call__(
self,
name: Dataset | LiteralString,
suffix: Extension | None = None,
/,
backend: _Backend | None = None,
**kwds: Any,
) -> IntoDataFrameT | pl.DataFrame | pd.DataFrame | pa.Table:
if backend is None:
return super().__call__(name, suffix, **kwds)
else:
return self.from_backend(backend)(name, suffix, **kwds)
load: _Load[Any, Any]
def __getattr__(name):
if name == "load":
reader = _reader.infer_backend()
global load
load = _Load.from_reader(reader)
return load
else:
msg = f"module {__name__!r} has no attribute {name!r}"
raise AttributeError(msg)
| _Load |
python | gevent__gevent | src/greentest/3.12/test_ftplib.py | {
"start": 2347,
"end": 3552
} | class ____(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = bytearray()
self.encoding = baseclass.encoding
def handle_read(self):
new_data = self.recv(1024)
self.baseclass.last_received_data += new_data
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.shutdown()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode(self.encoding))
def handle_error(self):
default_error_handler()
def shutdown(self):
self.close()
| DummyDTPHandler |
python | getsentry__sentry | src/sentry/api/endpoints/project_rule_actions.py | {
"start": 1470,
"end": 7224
} | class ____(ProjectEndpoint):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ISSUES
permission_classes = (ProjectAlertRulePermission,)
def post(self, request: Request, project) -> Response:
"""
Creates a dummy event/group and activates the actions given by request body
{method} {path}
{{
"actions": []
"name": string
}}
"""
serializer = DummyRuleSerializer(
context={"project": project, "organization": project.organization}, data=request.data
)
if not serializer.is_valid():
raise ValidationError(serializer.errors)
data = serializer.validated_data
if len(data.get("actions", [])) == 0:
raise ValidationError("No actions to perform.")
for action in data.get("actions"):
action["skipDigests"] = True
data.update(
{
"conditions": [],
"filters": [],
"actionMatch": "all",
"filterMatch": "all",
"frequency": 30,
}
)
rule = Rule(id=TEST_NOTIFICATION_ID, project=project, data=data, label=data.get("name"))
# Cast to GroupEvent rather than Event to match expected types
test_event = create_sample_event(
project, platform=project.platform, default="javascript", tagged=True
)
group_event = GroupEvent.from_event(
event=test_event,
group=test_event.group,
)
if should_fire_workflow_actions(project.organization, ErrorGroupType.type_id):
return self.execute_future_on_test_event_workflow_engine(group_event, rule)
else:
return self.execute_future_on_test_event(group_event, rule)
def execute_future_on_test_event(
self,
test_event: GroupEvent,
rule: Rule,
) -> Response:
"""
A slightly modified version of utils.safe.safe_execute that handles
IntegrationFormErrors, and returns a body with `{ actions: [<error info>] }`.
This is used in our Alert Rule UI to display errors to the user.
"""
action_exceptions = []
for callback, futures in activate_downstream_actions(rule, test_event).values():
try:
callback(test_event, futures)
except Exception as exc:
callback_name = getattr(callback, "__name__", str(callback))
cls_name = callback.__class__.__name__
logger = logging.getLogger(f"sentry.test_rule.{cls_name.lower()}")
# safe_execute logs these as exceptions, which can result in
# noisy sentry issues, so log with a warning instead.
if isinstance(exc, REPORTABLE_ERROR_TYPES):
logger.warning(
"%s.test_alert.integration_error", callback_name, extra={"exc": exc}
)
# IntegrationFormErrors should be safe to propagate via the API
action_exceptions.append(str(exc))
else:
# If we encounter some unexpected exception, we probably
# don't want to continue executing more callbacks.
logger.warning(
"%s.test_alert.unexpected_exception", callback_name, exc_info=True
)
error_id = sentry_sdk.capture_exception(exc)
action_exceptions.append(
f"An unexpected error occurred. Error ID: '{error_id}'"
)
break
status = None
data = None
# Presence of "actions" here means we have exceptions to surface to the user
if len(action_exceptions) > 0:
status = 400
data = {"actions": action_exceptions}
return Response(status=status, data=data)
def execute_future_on_test_event_workflow_engine(
self,
test_event: GroupEvent,
rule: Rule,
) -> Response:
"""
Invoke the workflow_engine to send a test notification.
This method will lookup the corresponding workflow for a given rule then invoke the notification action.
"""
action_exceptions = []
actions = rule.data.get("actions", [])
workflow = Workflow(
id=TEST_NOTIFICATION_ID,
name="Test Workflow",
organization=rule.project.organization,
)
event_data = WorkflowEventData(
event=test_event,
group=test_event.group,
)
for action_blob in actions:
try:
action = translate_rule_data_actions_to_notification_actions(
[action_blob], skip_failures=False
)[0]
action.id = TEST_NOTIFICATION_ID
# Annotate the action with the workflow id
setattr(action, "workflow_id", workflow.id)
except REPORTABLE_ERROR_TYPES as e:
action_exceptions.append(str(e))
continue
except Exception as e:
error_id = sentry_sdk.capture_exception(e)
action_exceptions.append(f"An unexpected error occurred. Error ID: '{error_id}'")
continue
action_exceptions.extend(test_fire_action(action, event_data))
status = None
data = None
if len(action_exceptions) > 0:
status = 400
data = {"actions": action_exceptions}
return Response(status=status, data=data)
| ProjectRuleActionsEndpoint |
python | pytorch__pytorch | torch/nn/modules/upsampling.py | {
"start": 8335,
"end": 9878
} | class ____(Upsample):
r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When :attr:`size` is given, it is the output size of the image `(h, w)`.
Args:
size (int or Tuple[int, int], optional): output spatial sizes
scale_factor (float or Tuple[float, float], optional): multiplier for
spatial size.
.. warning::
This class is deprecated in favor of :func:`~nn.functional.interpolate`.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[1., 2.],
[3., 4.]]]])
>>> m = nn.UpsamplingNearest2d(scale_factor=2)
>>> m(input)
tensor([[[[1., 1., 2., 2.],
[1., 1., 2., 2.],
[3., 3., 4., 4.],
[3., 3., 4., 4.]]]])
"""
def __init__(
self,
size: Optional[_size_2_t] = None,
scale_factor: Optional[_ratio_2_t] = None,
) -> None:
super().__init__(size, scale_factor, mode="nearest")
| UpsamplingNearest2d |
python | encode__django-rest-framework | tests/test_routers.py | {
"start": 1285,
"end": 1485
} | class ____(viewsets.ModelViewSet):
queryset = RouterTestModel.objects.all()
serializer_class = NoteSerializer
lookup_field = 'text__contains'
lookup_url_kwarg = 'text'
| KWargedNoteViewSet |
python | kamyu104__LeetCode-Solutions | Python/sum-of-total-strength-of-wizards.py | {
"start": 1040,
"end": 2361
} | class ____(object):
def totalStrength(self, strength):
"""
:type strength: List[int]
:rtype: int
"""
MOD = 10**9+7
prefix, prefix2 = [0]*(len(strength)+1), [0]*(len(strength)+1)
for i in xrange(len(strength)):
prefix[i+1] = (prefix[i]+strength[i])%MOD
prefix2[i+1] = (prefix2[i]+strength[i]*(i+1))%MOD
suffix, suffix2 = [0]*(len(strength)+1), [0]*(len(strength)+1)
for i in reversed(xrange(len(strength))):
suffix[i] = (suffix[i+1]+strength[i])%MOD
suffix2[i] = (suffix2[i+1]+strength[i]*(len(strength)-i))%MOD
stk, result = [-1], 0
for i in xrange(len(strength)+1):
while stk[-1] != -1 and (i == len(strength) or strength[stk[-1]] >= strength[i]):
x, y, z = stk[-2]+1, stk.pop(), i-1
# assert(all(strength[j] >= strength[y] for j in xrange(x, y+1)))
# assert(all(strength[j] > strength[y] for j in xrange(y+1, z+1)))
result = (result+(strength[y]*((z-y+1)*((prefix2[y+1]-prefix2[x])-x*(prefix[y+1]-prefix[x]))+
(y-x+1)*((suffix2[y+1]-suffix2[z+1])-(len(strength)-(z+1))*(suffix[y+1]-suffix[z+1])))))%MOD
stk.append(i)
return result
| Solution2 |
python | Textualize__textual | docs/examples/widgets/text_area_example.py | {
"start": 190,
"end": 385
} | class ____(App):
def compose(self) -> ComposeResult:
yield TextArea.code_editor(TEXT, language="python")
app = TextAreaExample()
if __name__ == "__main__":
app.run()
| TextAreaExample |
python | huggingface__transformers | src/transformers/models/cohere/modular_cohere.py | {
"start": 12026,
"end": 15871
} | class ____(LlamaForCausalLM):
def __init__(self, config):
super().__init__(config)
self.model = CohereModel(config)
self.logit_scale = config.logit_scale
self.tie_word_embeddings = config.tie_word_embeddings
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>> from transformers import AutoTokenizer, CohereForCausalLM
>> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
>> prompt = "Hey, are you conscious? Can you talk to me?"
>> inputs = tokenizer(prompt, return_tensors="pt")
>> # Generate
>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
logits = logits * self.logit_scale # main diff from Llama
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"CohereForCausalLM",
"CohereModel",
"CoherePreTrainedModel", # noqa: F822
]
| CohereForCausalLM |
python | pytorch__pytorch | test/test_xpu.py | {
"start": 1685,
"end": 24242
} | class ____(TestCase):
expandable_segments = False
def test_device_behavior(self):
current_device = torch.xpu.current_device()
torch.xpu.set_device(current_device)
self.assertEqual(current_device, torch.xpu.current_device())
@unittest.skipIf(not TEST_MULTIXPU, "only one GPU detected")
def test_multi_device_behavior(self):
current_device = torch.xpu.current_device()
target_device = (current_device + 1) % torch.xpu.device_count()
with torch.xpu.device(target_device):
self.assertEqual(target_device, torch.xpu.current_device())
self.assertEqual(current_device, torch.xpu.current_device())
with torch.xpu._DeviceGuard(target_device):
self.assertEqual(target_device, torch.xpu.current_device())
self.assertEqual(current_device, torch.xpu.current_device())
def test_get_device_properties(self):
current_device = torch.xpu.current_device()
device_properties = torch.xpu.get_device_properties(current_device)
self.assertEqual(device_properties, torch.xpu.get_device_properties(None))
self.assertEqual(device_properties, torch.xpu.get_device_properties())
device_name = torch.xpu.get_device_name(current_device)
self.assertEqual(device_name, torch.xpu.get_device_name(None))
self.assertEqual(device_name, torch.xpu.get_device_name())
device_capability = torch.xpu.get_device_capability(current_device)
self.assertTrue(device_capability["device_id"] > 0)
self.assertTrue(device_capability["max_work_group_size"] > 0)
self.assertTrue(device_capability["max_num_sub_groups"] > 0)
self.assertEqual(
device_properties.driver_version, device_capability["driver_version"]
)
self.assertEqual(device_properties.has_fp16, device_capability["has_fp16"])
self.assertEqual(device_properties.has_fp64, device_capability["has_fp64"])
self.assertEqual(
device_properties.has_atomic64, device_capability["has_atomic64"]
)
self.assertEqual(
device_properties.has_bfloat16_conversions,
device_capability["has_bfloat16_conversions"],
)
self.assertEqual(
device_properties.has_subgroup_matrix_multiply_accumulate,
device_capability["has_subgroup_matrix_multiply_accumulate"],
)
self.assertEqual(
device_properties.has_subgroup_matrix_multiply_accumulate_tensor_float32,
device_capability["has_subgroup_matrix_multiply_accumulate_tensor_float32"],
)
self.assertEqual(
device_properties.has_subgroup_2d_block_io,
device_capability["has_subgroup_2d_block_io"],
)
if int(torch.version.xpu) >= 20250000:
self.assertEqual(
device_properties.architecture,
device_capability["architecture"],
)
self.assertEqual(
len(str(device_properties.uuid)), 36
) # xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
self.assertEqual(len(device_properties.uuid.bytes), 16)
@unittest.skipIf(IS_WINDOWS, "not applicable to Windows (only fails with fork)")
def test_wrong_xpu_fork(self):
stderr = TestCase.runWithPytorchAPIUsageStderr(
"""\
import torch
from torch.multiprocessing import Process
def run(rank):
torch.xpu.set_device(rank)
if __name__ == "__main__":
size = 2
processes = []
for rank in range(size):
# it would work fine without the line below
torch.xpu.set_device(0)
p = Process(target=run, args=(rank,))
p.start()
processes.append(p)
for p in processes:
p.join()
"""
)
self.assertRegex(stderr, "Cannot re-initialize XPU in forked subprocess.")
@unittest.skipIf(
IS_WINDOWS, "Only for lazy initialization on Linux, not applicable on Windows."
)
def test_lazy_init(self):
"""Validate that no XPU calls are made during `import torch` call"""
def check_output(script: str) -> str:
return (
subprocess.check_output([sys.executable, "-c", script])
.decode("ascii")
.strip()
)
test_script = """\
import torch
from torch.multiprocessing import Process
import copy
def run_model(model, input):
input_xpu = input.clone().to('xpu')
model_xpu = copy.deepcopy(model).to('xpu')
loss_xpu = model_xpu(input_xpu).sum()
loss = model(input).sum()
torch.testing.assert_close(loss_xpu.cpu(), loss)
def test_multi_process(model, input):
p = Process(target=run_model, args=(model, input))
p.start()
p.join()
assert p.exitcode == 0
input = torch.rand(32, 3, 224, 224)
model = torch.nn.Sequential(
torch.nn.Conv2d(3, 64, 3, stride=2),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2, 2),
)
if __name__ == "__main__":
test_multi_process(model, input)
test_multi_process(model, input)
print(torch.xpu.device_count())
"""
# XPU have extra lines, so get the last line, refer https://github.com/intel/torch-xpu-ops/issues/2261
rc = check_output(test_script).splitlines()[-1]
self.assertEqual(rc, str(torch.xpu.device_count()))
def test_streams(self):
s0 = torch.xpu.Stream()
torch.xpu.set_stream(s0)
s1 = torch.xpu.current_stream()
self.assertEqual(s0, s1)
s2 = torch.xpu.Stream()
self.assertFalse(s0 == s2)
torch.xpu.set_stream(s2)
with torch.xpu.stream(s0):
self.assertEqual(s0, torch.xpu.current_stream())
self.assertEqual(s2, torch.xpu.current_stream())
def test_stream_priority(self):
low, high = torch.xpu.Stream.priority_range()
s0 = torch.xpu.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device("xpu:0"), s0.device)
s1 = torch.xpu.Stream(device=0, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device("xpu:0"), s1.device)
def test_stream_event_repr(self):
s = torch.xpu.current_stream()
self.assertTrue("torch.xpu.Stream" in str(s))
e = torch.xpu.Event()
self.assertTrue("torch.xpu.Event(uninitialized)" in str(e))
s.record_event(e)
self.assertTrue("torch.xpu.Event" in str(e))
def test_events(self):
stream = torch.xpu.current_stream()
event = torch.xpu.Event()
self.assertTrue(event.query())
stream.record_event(event)
event.synchronize()
self.assertTrue(event.query())
start_event = torch.xpu.Event(enable_timing=True)
end_event = torch.xpu.Event(enable_timing=True)
stream.record_event(start_event)
time.sleep(0.1)
stream.record_event(end_event)
torch.xpu.synchronize()
if int(torch.version.xpu) >= 20250000:
self.assertGreater(start_event.elapsed_time(end_event), 0)
else:
with self.assertRaisesRegex(
NotImplementedError,
"elapsed_time of XPUEvent requires PyTorch to be built with SYCL compiler version 2025.0.0 or newer.",
):
start_event.elapsed_time(end_event)
event = torch.xpu.Event(enable_timing=True)
self.assertEqual(event.sycl_event, 0)
self.assertEqual(event.event_id, 0)
event.record()
self.assertNotEqual(event.sycl_event, 0)
self.assertNotEqual(event.event_id, 0)
self.assertEqual(event.sycl_event, event.event_id)
def test_generic_stream_event(self):
stream = torch.Stream("xpu")
self.assertEqual(stream.device_index, torch.xpu.current_device())
xpu_stream = torch.xpu.Stream(
stream_id=stream.stream_id,
device_index=stream.device_index,
device_type=stream.device_type,
)
self.assertIsInstance(xpu_stream, torch.Stream)
self.assertTrue(issubclass(type(xpu_stream), torch.Stream))
self.assertTrue(torch.Stream in type(xpu_stream).mro())
self.assertEqual(stream.stream_id, xpu_stream.stream_id)
self.assertNotEqual(stream.stream_id, torch.xpu.current_stream().stream_id)
event1 = torch.Event("xpu", enable_timing=True)
event2 = torch.Event("xpu", enable_timing=True)
self.assertEqual(event1.event_id, 0)
a = torch.randn(1000)
b = torch.randn(1000)
with torch.xpu.stream(xpu_stream):
a_xpu = a.to("xpu", non_blocking=True)
b_xpu = b.to("xpu", non_blocking=True)
self.assertEqual(stream.stream_id, torch.xpu.current_stream().stream_id)
event1.record(stream)
event1.synchronize()
self.assertTrue(event1.query())
c_xpu = a_xpu + b_xpu
# Here intendionly records another stream.
event2.record()
event2.synchronize()
self.assertTrue(event2.query())
self.assertNotEqual(event1.event_id, event2.event_id)
self.assertEqual(c_xpu.cpu(), a + b)
if int(torch.version.xpu) >= 20250000:
self.assertGreater(event1.elapsed_time(event2), 0)
else:
with self.assertRaisesRegex(
NotImplementedError,
"elapsedTime requires PyTorch to be built with SYCL compiler version 2025.0.0 or newer.",
):
event1.elapsed_time(event2)
xpu_event = torch.xpu.Event()
self.assertIsInstance(xpu_event, torch.Event)
self.assertTrue(issubclass(type(xpu_event), torch.Event))
self.assertTrue(torch.Event in type(xpu_event).mro())
def test_stream_compatibility(self):
s1 = torch.xpu.Stream()
s2 = torch.xpu.Stream()
torch.accelerator.set_stream(s1)
self.assertEqual(torch.accelerator.current_stream().stream_id, s1.stream_id)
torch.accelerator.set_stream(s2)
self.assertEqual(torch.accelerator.current_stream().stream_id, s2.stream_id)
with self.assertRaisesRegex(RuntimeError, "The device index is out of range"):
torch.accelerator.current_stream(torch.accelerator.device_count())
def test_device_context_manager(self):
prev_device = torch.xpu.current_device()
with torch.accelerator.device_index(None):
self.assertEqual(torch.xpu.current_device(), prev_device)
self.assertEqual(torch.xpu.current_device(), prev_device)
with torch.accelerator.device_index(0):
self.assertEqual(torch.xpu.current_device(), 0)
self.assertEqual(torch.xpu.current_device(), prev_device)
@unittest.skipIf(not TEST_MULTIXPU, "only one GPU detected")
def test_multi_device_context_manager(self):
src_device = 0
dst_device = 1
torch.xpu.set_device(src_device)
with torch.accelerator.device_index(dst_device):
self.assertEqual(torch.xpu.current_device(), 1)
self.assertEqual(torch.xpu.current_device(), src_device)
def test_stream_context_manager(self):
prev_stream = torch.xpu.current_stream()
with torch.xpu.Stream() as stream:
self.assertEqual(stream, torch.xpu.current_stream())
self.assertEqual(prev_stream, torch.xpu.current_stream())
@unittest.skipIf(not TEST_MULTIXPU, "only one GPU detected")
def test_multi_device_stream_context_manager(self):
src_device = 0
dst_device = 1
torch.xpu.set_device(src_device)
src_prev_stream = torch.xpu.current_stream(src_device)
dst_prev_stream = torch.xpu.current_stream(dst_device)
with torch.xpu.Stream(dst_device) as dst_stream:
self.assertEqual(dst_device, torch.xpu.current_device())
self.assertEqual(dst_stream, torch.xpu.current_stream())
self.assertEqual(src_prev_stream, torch.xpu.current_stream(src_device))
self.assertEqual(src_device, torch.xpu.current_device())
self.assertEqual(src_prev_stream, torch.xpu.current_stream())
self.assertEqual(dst_prev_stream, torch.xpu.current_stream(dst_device))
def test_generator(self):
torch.manual_seed(2024)
g_state0 = torch.xpu.get_rng_state()
torch.manual_seed(1234)
g_state1 = torch.xpu.get_rng_state()
self.assertNotEqual(g_state0, g_state1)
torch.xpu.manual_seed(2024)
g_state2 = torch.xpu.get_rng_state()
self.assertEqual(g_state0, g_state2)
torch.xpu.set_rng_state(g_state1)
self.assertEqual(g_state1, torch.xpu.get_rng_state())
torch.manual_seed(1234)
torch.xpu.set_rng_state(g_state0)
self.assertEqual(2024, torch.xpu.initial_seed())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).xpu()
y = torch.zeros(2, 5, dtype=torch.int, device="xpu")
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertEqual(q_copy[0].dtype, torch.float)
self.assertEqual(q_copy[1].dtype, torch.int)
self.assertEqual(q_copy[2].dtype, torch.float)
self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage))
self.assertTrue(isinstance(q_copy[3]._untyped_storage, torch.UntypedStorage))
q_copy[1].fill_(10)
y.fill_(10)
self.assertEqual(q_copy[3], y.storage())
def test_serialization_array_with_empty(self):
x = [
torch.randn(4, 4).xpu(),
torch.tensor([], dtype=torch.float, device=torch.device("xpu")),
]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
def test_out_of_memory(self):
if self.expandable_segments:
self.skipTest("Skipping OOM test for expandable segments allocator.")
tensor = torch.zeros(1024, device="xpu") # noqa: F841
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device="xpu")
with self.assertRaisesRegex(RuntimeError, "XPU out of memory."):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device="xpu")
def test_raises_oom(self):
if self.expandable_segments:
self.skipTest("Skipping OOM test for expandable segments allocator.")
torch.xpu.memory.empty_cache()
with self.assertRaises(torch.OutOfMemoryError):
torch.empty(1024 * 1024 * 1024 * 1024, device="xpu")
@serialTest()
def test_set_per_process_memory_fraction(self):
gc.collect()
torch.xpu.empty_cache()
total_memory = torch.xpu.get_device_properties().total_memory
fraction = 0.5
orig_fraction = torch.xpu.get_per_process_memory_fraction()
with self.assertRaisesRegex(ValueError, "invalid fraction:"):
torch.xpu.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "invalid fraction:"):
torch.xpu.set_per_process_memory_fraction(1.1)
torch.xpu.set_per_process_memory_fraction(fraction)
allowed_memory = int(total_memory * 0.49)
reserved_memory = torch.xpu.memory_reserved()
application_memory = allowed_memory - reserved_memory
tensor = torch.empty(application_memory, dtype=torch.int8, device="xpu")
del tensor
gc.collect()
torch.xpu.empty_cache()
self.assertEqual(fraction, torch.xpu.get_per_process_memory_fraction())
application_memory = int(total_memory * 0.51)
with self.assertRaises(torch.OutOfMemoryError):
_ = torch.empty(application_memory, dtype=torch.int8, device="xpu")
torch.xpu.set_per_process_memory_fraction(orig_fraction)
def test_memory_allocation(self):
torch.xpu.empty_cache()
prev_allocated = torch.xpu.memory_allocated()
prev_reserved = torch.xpu.memory_reserved()
self.assertGreaterEqual(prev_allocated, 0)
self.assertGreaterEqual(prev_reserved, 0)
a = torch.ones(10, device="xpu")
self.assertGreater(torch.xpu.memory_allocated(), prev_allocated)
self.assertGreaterEqual(torch.xpu.memory_reserved(), prev_reserved)
del a
self.assertEqual(torch.xpu.memory_allocated(), prev_allocated)
torch.xpu.empty_cache()
self.assertLessEqual(torch.xpu.memory_reserved(), prev_reserved)
torch.xpu.reset_accumulated_memory_stats()
# Activate 1kB memory
prev_active_current = torch.xpu.memory_stats()["active_bytes.all.current"]
a = torch.randn(256, device="xpu")
# Detect if the current active memory is 1kB
self.assertEqual(
torch.xpu.memory_stats()["active_bytes.all.current"],
1024 + prev_active_current,
)
self.assertEqual(torch.xpu.memory_stats()["active_bytes.all.freed"], 0)
del a
self.assertEqual(
torch.xpu.memory_stats()["active_bytes.all.current"], prev_active_current
)
self.assertEqual(torch.xpu.memory_stats()["active_bytes.all.freed"], 1024)
@unittest.skipIf(not TEST_MULTIXPU, "only one GPU detected")
def test_device_memory_allocated(self):
device_count = torch.xpu.device_count()
current_alloc = [torch.xpu.memory_allocated(idx) for idx in range(device_count)]
a = torch.ones(10, device="xpu:0")
self.assertGreater(torch.xpu.memory_allocated(0), current_alloc[0])
self.assertTrue(
all(
torch.xpu.memory_allocated(idx) == current_alloc[idx]
for idx in range(1, device_count)
)
)
del a
def test_memory_stats(self):
gc.collect()
torch.xpu.empty_cache()
torch.xpu.reset_peak_memory_stats()
torch.xpu.reset_accumulated_memory_stats()
prev_allocated = torch.accelerator.memory_allocated()
prev_reserved = torch.accelerator.memory_reserved()
prev_max_allocated = torch.accelerator.max_memory_allocated()
prev_max_reserved = torch.accelerator.max_memory_reserved()
self.assertEqual(prev_allocated, prev_max_allocated)
self.assertEqual(prev_reserved, prev_max_reserved)
# Activate 1kB memory
prev_active_current = torch.accelerator.memory_stats()[
"active_bytes.all.current"
]
tmp = torch.randn(256, device="xpu")
# Detect if the current active memory is 1kB
self.assertEqual(
torch.accelerator.memory_stats()["active_bytes.all.current"],
1024 + prev_active_current,
)
self.assertEqual(torch.accelerator.memory_stats()["active_bytes.all.freed"], 0)
del tmp
gc.collect()
torch.accelerator.empty_cache()
self.assertEqual(
torch.accelerator.memory_stats()["active_bytes.all.current"],
prev_active_current,
)
self.assertEqual(
torch.accelerator.memory_stats()["active_bytes.all.freed"], 1024
)
torch.accelerator.reset_peak_memory_stats()
self.assertEqual(torch.accelerator.max_memory_allocated(), prev_max_allocated)
self.assertEqual(torch.accelerator.max_memory_reserved(), prev_max_reserved)
@unittest.skipIf(
int(torch.version.xpu) < 20250000,
"Test requires SYCL compiler version 2025.0.0 or newer.",
)
def test_mem_get_info(self):
torch.xpu.synchronize()
torch.xpu.empty_cache()
before_free_bytes, before_total_bytes = torch.xpu.mem_get_info()
# increasing to 1MB to force acquiring a new block.
torch.randn(1024 * 256, device="xpu")
torch.xpu.synchronize()
after_free_bytes, after_total_bytes = torch.xpu.mem_get_info()
self.assertGreaterEqual(before_free_bytes, after_free_bytes)
self.assertEqual(before_total_bytes, after_total_bytes)
def test_get_arch_list(self):
arch_list = torch.xpu.get_arch_list()
if not arch_list:
return
flags = torch.xpu.get_gencode_flags()
for arch in arch_list:
self.assertTrue(arch in flags)
@unittest.skipIf(not TEST_MULTIXPU, "only one GPU detected")
def test_can_device_access_peer(self):
device_count = torch.xpu.device_count()
for device in range(device_count):
for peer in range(device_count):
self.assertEqual(
torch.xpu.can_device_access_peer(device, peer),
torch.xpu.can_device_access_peer(peer, device),
)
def test_torch_version_xpu(self):
self.assertEqual(len(torch.version.xpu), 8)
compiler_version = int(torch.version.xpu)
self.assertGreater(compiler_version, 20230000)
if IS_LINUX:
library = find_library_location("libtorch_xpu.so")
cmd = f"ldd {library} | grep libsycl"
results = subprocess.check_output(cmd, shell=True).strip().split(b"\n")
# There should be only one libsycl.so
self.assertEqual(len(results), 1)
for result in results:
self.assertTrue(b"libsycl.so" in result)
def test_dlpack_conversion(self):
if self.expandable_segments:
self.skipTest("Skipping DLPack test for expandable segments allocator.")
x = make_tensor((5,), dtype=torch.float32, device="xpu")
if IS_WINDOWS and int(torch.version.xpu) < 20250000:
with self.assertRaisesRegex(
NotImplementedError,
"Default context is not supported on XPU by default on Windows for SYCL compiler versions earlier than 2025.0.0.",
):
torch.to_dlpack(x)
else:
z = torch.from_dlpack(torch.to_dlpack(x))
z[0] = z[0] + 1.0
self.assertEqual(z, x)
@unittest.skipIf(not TEST_XPU, "XPU not available, skipping tests")
| TestXpu |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_project_uptime_alert_index.py | {
"start": 628,
"end": 12627
} | class ____(ProjectUptimeAlertIndexBaseEndpointTest):
method = "post"
def test(self) -> None:
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
name="test",
environment="uptime-prod",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1500,
body=None,
)
detector = Detector.objects.get(id=resp.data["id"])
uptime_subscription = get_uptime_subscription(detector)
assert detector.name == "test"
assert detector.config["environment"] == "uptime-prod"
assert detector.owner_user_id == self.user.id
assert detector.owner_team_id is None
assert detector.config["mode"] == UptimeMonitorMode.MANUAL
assert detector.config["recovery_threshold"] == DEFAULT_RECOVERY_THRESHOLD
assert detector.config["downtime_threshold"] == DEFAULT_DOWNTIME_THRESHOLD
assert uptime_subscription.url == "http://sentry.io"
assert uptime_subscription.interval_seconds == 60
assert uptime_subscription.timeout_ms == 1500
assert uptime_subscription.body is None
assert uptime_subscription.trace_sampling is False
def test_set_trace_sampling(self) -> None:
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
name="test",
environment="uptime-prod",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1500,
body=None,
trace_sampling=True,
)
detector = Detector.objects.get(id=resp.data["id"])
uptime_subscription = get_uptime_subscription(detector)
assert uptime_subscription.trace_sampling is True
def test_custom_thresholds(self) -> None:
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
name="test",
environment="uptime-prod",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1500,
recovery_threshold=2,
downtime_threshold=5,
)
detector = Detector.objects.get(id=resp.data["id"])
assert detector.config["recovery_threshold"] == 2
assert detector.config["downtime_threshold"] == 5
def test_no_environment(self) -> None:
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
body=None,
)
detector = Detector.objects.get(id=resp.data["id"])
assert detector.config.get("environment") is None
def test_no_owner(self) -> None:
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
url="http://sentry.io",
owner=None,
interval_seconds=60,
timeout_ms=1000,
)
detector = Detector.objects.get(id=resp.data["id"])
assert detector.owner_user_id is None
assert detector.owner_team_id is None
# Test without passing the owner
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
url="http://getsentry.com",
interval_seconds=60,
timeout_ms=1000,
)
detector = Detector.objects.get(id=resp.data["id"])
assert detector.owner_user_id is None
assert detector.owner_team_id is None
def test_mode_no_superadmin(self) -> None:
resp = self.get_error_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
mode=UptimeMonitorMode.AUTO_DETECTED_ACTIVE,
status_code=400,
)
assert resp.data == {
"mode": [ErrorDetail(string="Only superusers can modify `mode`", code="invalid")]
}
def test_mode_superadmin(self) -> None:
self.login_as(self.user, superuser=True)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
mode=UptimeMonitorMode.AUTO_DETECTED_ACTIVE,
)
detector = Detector.objects.get(id=resp.data["id"])
uptime_subscription = get_uptime_subscription(detector)
assert detector.name == "test"
assert detector.owner_user_id == self.user.id
assert detector.owner_team_id is None
assert detector.config["mode"] == UptimeMonitorMode.AUTO_DETECTED_ACTIVE
assert uptime_subscription.url == "http://sentry.io"
assert uptime_subscription.interval_seconds == 60
assert uptime_subscription.timeout_ms == 1000
def test_headers_body_method(self) -> None:
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
method="POST",
body='{"key": "value"}',
headers=[["header", "value"]],
)
detector = Detector.objects.get(id=resp.data["id"])
uptime_subscription = get_uptime_subscription(detector)
assert uptime_subscription.body == '{"key": "value"}'
assert uptime_subscription.headers == [["header", "value"]]
def test_headers_body_method_already_exists(self) -> None:
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
method="POST",
body='{"key": "value"}',
headers=[["header", "value"]],
)
detector = Detector.objects.get(id=resp.data["id"])
uptime_subscription = get_uptime_subscription(detector)
new_proj = self.create_project()
resp = self.get_success_response(
self.organization.slug,
new_proj.slug,
environment=self.environment.name,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
method="POST",
body='{"key": "value"}',
headers=[["header", "value"]],
)
new_detector = Detector.objects.get(id=resp.data["id"])
new_uptime_subscription = get_uptime_subscription(new_detector)
assert uptime_subscription.id != new_uptime_subscription.id
assert new_detector.project_id != detector.project_id
resp = self.get_success_response(
self.organization.slug,
new_proj.slug,
environment=self.environment.name,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
method="POST",
body='{"key": "value"}',
headers=[["header", "different value"]],
)
newer_detector = Detector.objects.get(id=resp.data["id"])
newer_uptime_subscription = get_uptime_subscription(newer_detector)
assert newer_uptime_subscription.id != new_uptime_subscription.id
def test_headers_invalid_format(self) -> None:
resp = self.get_error_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
method="POST",
body='{"key": "value"}',
headers={"header", "value"},
status_code=400,
)
assert resp.data == {
"headers": [ErrorDetail(string="Expected array of header tuples.", code="invalid")]
}
def test_size_too_big(self) -> None:
resp = self.get_error_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
method="POST",
body="body" * 250,
headers=[["header", "value"]],
)
assert resp.data == {
"nonFieldErrors": [
ErrorDetail(
string=f"Request is too large, max size is {MAX_REQUEST_SIZE_BYTES} bytes",
code="invalid",
)
]
}
def test_over_limit(self) -> None:
with mock.patch(
"sentry.uptime.subscriptions.subscriptions.MAX_MANUAL_SUBSCRIPTIONS_PER_ORG", new=1
):
self.get_success_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
owner=f"user:{self.user.id}",
)
self.get_error_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
url="http://santry.io",
interval_seconds=60,
timeout_ms=1000,
owner=f"user:{self.user.id}",
)
@mock.patch(
"sentry.quotas.backend.assign_seat",
return_value=Outcome.RATE_LIMITED,
)
def test_no_seat_assignment(self, _mock_assign_seat: mock.MagicMock) -> None:
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=1000,
owner=f"user:{self.user.id}",
)
detector = Detector.objects.get(id=resp.data["id"])
assert detector.enabled is False
def test_timeout_too_large(self) -> None:
resp = self.get_error_response(
self.organization.slug,
self.project.slug,
environment=self.environment.name,
name="test",
owner=f"user:{self.user.id}",
url="http://sentry.io",
interval_seconds=60,
timeout_ms=60_001,
method="POST",
body="body",
headers=[["header", "value"]],
)
assert resp.data == {
"timeoutMs": [
ErrorDetail(
string="Ensure this value is less than or equal to 60000.",
code="max_value",
)
]
}
| ProjectUptimeAlertIndexPostEndpointTest |
python | bokeh__bokeh | tests/unit/bokeh/model/test_model.py | {
"start": 3537,
"end": 4963
} | class ____:
def test_fails_with_unknown_event_name(self) -> None:
cb = CustomJS(code="foo")
m = SomeModel()
with pytest.raises(ValueError):
m.js_on_event("foo", cb)
def test_with_multple_callbacks(self) -> None:
cb1 = CustomJS(code="foo")
cb2 = CustomJS(code="bar")
m = SomeModel()
m.js_on_event("document_ready", cb1, cb2)
assert m.js_event_callbacks == {"document_ready": [cb1, cb2]}
def test_with_multple_callbacks_separately(self) -> None:
cb1 = CustomJS(code="foo")
cb2 = CustomJS(code="bar")
m = SomeModel()
m.js_on_event("document_ready", cb1)
assert m.js_event_callbacks == {"document_ready": [cb1]}
m.js_on_event("document_ready", cb2)
assert m.js_event_callbacks == {"document_ready": [cb1, cb2]}
def test_ignores_dupe_callbacks(self) -> None:
cb = CustomJS(code="foo")
m = SomeModel()
m.js_on_event("document_ready", cb, cb)
assert m.js_event_callbacks == {"document_ready": [cb]}
def test_ignores_dupe_callbacks_separately(self) -> None:
cb = CustomJS(code="foo")
m = SomeModel()
m.js_on_event("document_ready", cb)
assert m.js_event_callbacks == {"document_ready": [cb]}
m.js_on_event("document_ready", cb)
assert m.js_event_callbacks == {"document_ready": [cb]}
| Test_js_on_event |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/distribute.py | {
"start": 1496,
"end": 3958
} | class ____(dataset_ops.UnaryDataset):
"""A `Dataset` that shards the `Dataset` automatically.
This dataset takes in an existing dataset and tries to automatically figure
out how to shard the dataset in a multi-worker scenario using graph rewrites.
If the AutoShardPolicy is set to FILE, it walks up the dataset graph until
it finds a reader dataset, then inserts a ShardDataset op before that node
so that each worker only sees some files.
If the AutoShardPolicy is set to DATA, it inserts a ShardDataset op at the
end of the input pipeline, before any terminal PrefetchDataset if there is
one. Additionally, if there is a RebatchDatasetV2 in the input pipeline, it
is written to legacy RebatchDataset for correctness reasons, since
RebatchDatasetV2 is incompatible with data sharding.
If the AutoShardPolicy is set to AUTO, it tries to do file-based sharding.
If it cannot find a reader dataset, it falls back to doing data-based
sharding.
If the AutoShardPolicy is set to OFF, it does nothing.
Attributes:
num_workers: Total number of workers to shard this dataset across.
index: The current worker index (out of the total number of workers) this
dataset is for.
num_replicas: The total number of replicas across all workers. This is used
only when sharding by data (either DATA or AUTO) in order to rewrite
RebatchDatasetV2 to RebatchDataset.
Raises:
NotFoundError: If we cannot find a suitable reader dataset to begin
automatically sharding the dataset.
"""
def __init__(self, input_dataset, num_workers, index, num_replicas=None):
self._input_dataset = input_dataset
self._element_spec = input_dataset.element_spec
variant_tensor = ged_ops.auto_shard_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
num_workers=num_workers,
index=index,
auto_shard_policy=int(
input_dataset.options().experimental_distribute.auto_shard_policy),
num_replicas=num_replicas,
**self._flat_structure)
super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._element_spec
def _AutoShardDatasetV1(input_dataset, num_workers, index, num_replicas=None): # pylint: disable=invalid-name
return dataset_ops.DatasetV1Adapter(
_AutoShardDataset(input_dataset, num_workers, index, num_replicas))
| _AutoShardDataset |
python | weaviate__weaviate-python-client | weaviate/config.py | {
"start": 1913,
"end": 2169
} | class ____(BaseModel):
"""Timeouts for the different operations in the client."""
query: Union[int, float] = Field(default=30, ge=0)
insert: Union[int, float] = Field(default=90, ge=0)
init: Union[int, float] = Field(default=2, ge=0)
| Timeout |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/test_post_votes.py | {
"start": 1093,
"end": 6825
} | class ____(TestCase):
@property
def _config(self):
return (
ConfigBuilder()
.with_basic_auth_credentials("user@example.com", "password")
.with_subdomain("d3v-airbyte")
.with_start_date(ab_datetime_now().subtract(timedelta(weeks=104)))
.build()
)
def get_authenticator(self, config):
return ApiTokenAuthenticator(email=config["credentials"]["email"], password=config["credentials"]["api_token"])
@HttpMocker()
def test_given_one_page_when_read_posts_comments_then_return_records(self, http_mocker):
"""
A normal full refresh sync without pagination
"""
api_token_authenticator = self.get_authenticator(self._config)
# todo: Add this back once the CDK supports conditional streams on an endpoint
# _ = given_ticket_forms(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
posts_record_builder = given_posts(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
post = posts_record_builder.build()
http_mocker.get(
PostsVotesRequestBuilder.posts_votes_endpoint(api_token_authenticator, post["id"])
.with_start_time(self._config["start_date"])
.with_page_size(100)
.build(),
PostsVotesResponseBuilder.posts_votes_response().with_record(PostsVotesRecordBuilder.posts_votes_record()).build(),
)
output = read_stream("post_votes", SyncMode.full_refresh, self._config)
assert len(output.records) == 1
@HttpMocker()
def test_given_403_error_when_read_posts_comments_then_skip_stream(self, http_mocker):
"""
Get a 403 error and then skip the stream
"""
api_token_authenticator = self.get_authenticator(self._config)
# todo: Add this back once the CDK supports conditional streams on an endpoint
# _ = given_ticket_forms(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
posts_record_builder = given_posts(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
post = posts_record_builder.build()
http_mocker.get(
PostsVotesRequestBuilder.posts_votes_endpoint(api_token_authenticator, post["id"])
.with_start_time(self._config["start_date"])
.with_page_size(100)
.build(),
ErrorResponseBuilder.response_with_status(403).build(),
)
output = read_stream("post_votes", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
assert output.get_stream_statuses("post_votes")[-1] == AirbyteStreamStatus.INCOMPLETE
assert any(
[
"failed with status code '403' and error message" in error
for error in get_log_messages_by_log_level(output.logs, LogLevel.ERROR)
]
)
@HttpMocker()
def test_given_404_error_when_read_posts_comments_then_skip_stream(self, http_mocker):
"""
Get a 404 error and skip the stream
"""
api_token_authenticator = self.get_authenticator(self._config)
# todo: Add this back once the CDK supports conditional streams on an endpoint
# _ = given_ticket_forms(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
posts_record_builder = given_posts(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
post = posts_record_builder.build()
http_mocker.get(
PostsVotesRequestBuilder.posts_votes_endpoint(api_token_authenticator, post["id"])
.with_start_time(self._config["start_date"])
.with_page_size(100)
.build(),
ErrorResponseBuilder.response_with_status(404).build(),
)
output = read_stream("post_votes", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
assert output.get_stream_statuses("post_votes")[-1] == AirbyteStreamStatus.INCOMPLETE
assert any(
[
"failed with status code '404' and error message" in error
for error in get_log_messages_by_log_level(output.logs, LogLevel.ERROR)
]
)
@HttpMocker()
def test_given_500_error_when_read_posts_comments_then_stop_syncing(self, http_mocker):
"""
Get a 500 error and stop the stream
"""
api_token_authenticator = self.get_authenticator(self._config)
# todo: Add this back once the CDK supports conditional streams on an endpoint
# _ = given_ticket_forms(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
posts_record_builder = given_posts(http_mocker, string_to_datetime(self._config["start_date"]), api_token_authenticator)
post = posts_record_builder.build()
http_mocker.get(
PostsVotesRequestBuilder.posts_votes_endpoint(api_token_authenticator, post["id"])
.with_start_time(self._config["start_date"])
.with_page_size(100)
.build(),
ErrorResponseBuilder.response_with_status(500).build(),
)
with patch("time.sleep", return_value=None):
output = read_stream("post_votes", SyncMode.full_refresh, self._config)
assert len(output.records) == 0
error_logs = get_log_messages_by_log_level(output.logs, LogLevel.ERROR)
assert any(["Internal server error" in error for error in error_logs])
@freezegun.freeze_time(_NOW.isoformat())
| TestPostsVotesStreamFullRefresh |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-sagemaker-endpoint/llama_index/llms/sagemaker_endpoint/base.py | {
"start": 1178,
"end": 11596
} | class ____(LLM):
r"""
SageMaker LLM.
Examples:
`pip install llama-index-llms-sagemaker-endpoint`
```python
from llama_index.llms.sagemaker import SageMakerLLM
# hooks for HuggingFaceH4/zephyr-7b-beta
# different models may require different formatting
def messages_to_prompt(messages):
prompt = ""
for message in messages:
if message.role == 'system':
prompt += f"<|system|>\n{message.content}</s>\n"
elif message.role == 'user':
prompt += f"<|user|>\n{message.content}</s>\n"
elif message.role == 'assistant':
prompt += f"<|assistant|>\n{message.content}</s>\n"
# ensure we start with a system prompt, insert blank if needed
if not prompt.startswith("<|system|>\n"):
prompt = "<|system|>\n</s>\n" + prompt
# add final assistant prompt
prompt = prompt + "<|assistant|>\n"
return prompt
def completion_to_prompt(completion):
return f"<|system|>\n</s>\n<|user|>\n{completion}</s>\n<|assistant|>\n"
# Additional setup for SageMakerLLM class
model_name = "HuggingFaceH4/zephyr-7b-beta"
api_key = "your_api_key"
region = "your_region"
llm = SageMakerLLM(
model_name=model_name,
api_key=api_key,
region=region,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
)
```
"""
endpoint_name: str = Field(description="SageMaker LLM endpoint name")
endpoint_kwargs: Dict[str, Any] = Field(
default={},
description="Additional kwargs for the invoke_endpoint request.",
)
model_kwargs: Dict[str, Any] = Field(
default={},
description="kwargs to pass to the model.",
)
content_handler: BaseIOHandler = Field(
default=DEFAULT_IO_HANDLER,
description="used to serialize input, deserialize output, and remove a prefix.",
)
profile_name: Optional[str] = Field(
description="The name of aws profile to use. If not given, then the default profile is used."
)
aws_access_key_id: Optional[str] = Field(description="AWS Access Key ID to use")
aws_secret_access_key: Optional[str] = Field(
description="AWS Secret Access Key to use"
)
aws_session_token: Optional[str] = Field(description="AWS Session Token to use")
region_name: Optional[str] = Field(
description="AWS region name to use. Uses region configured in AWS CLI if not passed"
)
max_retries: Optional[int] = Field(
default=3,
description="The maximum number of API retries.",
ge=0,
)
timeout: Optional[float] = Field(
default=60.0,
description="The timeout, in seconds, for API requests.",
ge=0,
)
_client: Any = PrivateAttr()
_completion_to_prompt: Callable[[str, Optional[str]], str] = PrivateAttr()
def __init__(
self,
endpoint_name: str,
endpoint_kwargs: Optional[Dict[str, Any]] = {},
model_kwargs: Optional[Dict[str, Any]] = {},
content_handler: Optional[BaseIOHandler] = DEFAULT_IO_HANDLER,
profile_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
aws_region_name: Optional[str] = None,
max_retries: Optional[int] = 3,
timeout: Optional[float] = 60.0,
temperature: Optional[float] = 0.5,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[
Callable[[Sequence[ChatMessage]], str]
] = LLAMA_MESSAGES_TO_PROMPT,
completion_to_prompt: Callable[
[str, Optional[str]], str
] = LLAMA_COMPLETION_TO_PROMPT,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
if not endpoint_name:
raise ValueError(
"Missing required argument:`endpoint_name`"
" Please specify the endpoint_name"
)
endpoint_kwargs = endpoint_kwargs or {}
model_kwargs = model_kwargs or {}
model_kwargs["temperature"] = temperature
content_handler = content_handler
callback_manager = callback_manager or CallbackManager([])
region_name = kwargs.pop("region_name", None)
if region_name is not None:
warnings.warn(
"Kwarg `region_name` is deprecated and will be removed in a future version. "
"Please use `aws_region_name` instead.",
DeprecationWarning,
)
if not aws_region_name:
aws_region_name = region_name
super().__init__(
endpoint_name=endpoint_name,
endpoint_kwargs=endpoint_kwargs,
model_kwargs=model_kwargs,
content_handler=content_handler,
profile_name=profile_name,
region_name=aws_region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
timeout=timeout,
max_retries=max_retries,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
self._completion_to_prompt = completion_to_prompt
self._client = get_aws_service_client(
service_name="sagemaker-runtime",
profile_name=profile_name,
region_name=aws_region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
max_retries=max_retries,
timeout=timeout,
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
model_kwargs = {**self.model_kwargs, **kwargs}
if not formatted:
prompt = self._completion_to_prompt(prompt, self.system_prompt)
request_body = self.content_handler.serialize_input(prompt, model_kwargs)
response = self._client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=request_body,
ContentType=self.content_handler.content_type,
Accept=self.content_handler.accept,
**self.endpoint_kwargs,
)
response["Body"] = self.content_handler.deserialize_output(response["Body"])
text = self.content_handler.remove_prefix(response["Body"], prompt)
return CompletionResponse(
text=text,
raw=response,
additional_kwargs={
"model_kwargs": model_kwargs,
"endpoint_kwargs": self.endpoint_kwargs,
},
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
model_kwargs = {**self.model_kwargs, **kwargs}
if not formatted:
prompt = self._completion_to_prompt(prompt, self.system_prompt)
request_body = self.content_handler.serialize_input(prompt, model_kwargs)
def gen() -> CompletionResponseGen:
raw_text = ""
prev_clean_text = ""
for response in self._client.invoke_endpoint_with_response_stream(
EndpointName=self.endpoint_name,
Body=request_body,
ContentType=self.content_handler.content_type,
Accept=self.content_handler.accept,
**self.endpoint_kwargs,
)["Body"]:
delta = self.content_handler.deserialize_streaming_output(
response["PayloadPart"]["Bytes"]
)
raw_text += delta
clean_text = self.content_handler.remove_prefix(raw_text, prompt)
delta = clean_text[len(prev_clean_text) :]
prev_clean_text = clean_text
yield CompletionResponse(text=clean_text, delta=delta, raw=response)
return gen()
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
prompt = self.messages_to_prompt(messages)
completion_response_gen = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response_gen)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
raise NotImplementedError
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
raise NotImplementedError
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise NotImplementedError
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError
@classmethod
def class_name(cls) -> str:
return "SageMakerLLM"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
model_name=self.endpoint_name,
)
# Deprecated, kept for backwards compatibility
SageMakerLLMEndPoint = SageMakerLLM
| SageMakerLLM |
python | getsentry__sentry | tests/sentry/integrations/jira/test_search_endpoint.py | {
"start": 312,
"end": 8160
} | class ____(APITestCase):
@cached_property
def integration(self):
integration = self.create_provider_integration(
provider="jira",
name="Jira Cloud",
metadata={
"oauth_client_id": "oauth-client-id",
"shared_secret": "a-super-secret-key-from-atlassian",
"base_url": "https://example.atlassian.net",
"domain_name": "example.atlassian.net",
},
)
integration.add_organization(self.organization, self.user)
return integration
@responses.activate
def test_issue_search_text(self) -> None:
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/search/jql/",
body=StubService.get_stub_json("jira", "search_response.json"),
content_type="json",
)
org = self.organization
self.login_as(self.user)
path = reverse("sentry-extensions-jira-search", args=[org.slug, self.integration.id])
resp = self.client.get(f"{path}?field=externalIssue&query=test")
assert resp.status_code == 200
assert resp.data == [{"label": "(HSP-1) this is a test issue summary", "value": "HSP-1"}]
@responses.activate
def test_issue_search_id(self) -> None:
def responder(request):
query = parse_qs(urlparse(request.url).query)
assert 'id="hsp-1"' == query["jql"][0]
data = StubService.get_stub_json("jira", "search_response.json")
return 200, {}, data
responses.add_callback(
responses.GET,
"https://example.atlassian.net/rest/api/2/search/jql/",
callback=responder,
)
org = self.organization
self.login_as(self.user)
path = reverse("sentry-extensions-jira-search", args=[org.slug, self.integration.id])
# queries come through from the front end lowercased, so HSP-1 -> hsp-1
for field in ("externalIssue", "parent"):
resp = self.client.get(f"{path}?field={field}&query=hsp-1")
assert resp.status_code == 200
assert resp.data == [
{"label": "(HSP-1) this is a test issue summary", "value": "HSP-1"}
]
@responses.activate
def test_issue_search_error(self) -> None:
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/search/jql/",
status=500,
body="Totally broken",
)
org = self.organization
self.login_as(self.user)
path = reverse("sentry-extensions-jira-search", args=[org.slug, self.integration.id])
for field in ("externalIssue", "parent"):
resp = self.client.get(f"{path}?field={field}&query=test")
assert resp.status_code == 400
assert resp.data == {"detail": "Something went wrong while communicating with Jira"}
@responses.activate
def test_assignee_search(self) -> None:
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/project",
json=[{"key": "HSP", "id": "10000"}],
)
def responder(request):
query = parse_qs(urlparse(request.url).query)
assert "HSP" == query["project"][0]
assert "bob" == query["query"][0]
data = StubService.get_stub_json("jira", "user_search_response.json")
return 200, {}, data
responses.add_callback(
responses.GET,
"https://example.atlassian.net/rest/api/2/user/assignable/search",
callback=responder,
content_type="json",
)
org = self.organization
self.login_as(self.user)
path = reverse("sentry-extensions-jira-search", args=[org.slug, self.integration.id])
resp = self.client.get(f"{path}?project=10000&field=assignee&query=bob")
assert resp.status_code == 200
assert resp.data == [{"value": "deadbeef123", "label": "Bobby - bob@example.org"}]
@responses.activate
def test_assignee_search_error(self) -> None:
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/project",
json=[{"key": "HSP", "id": "10000"}],
)
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/user/assignable/search",
status=500,
body="Bad things",
)
org = self.organization
self.login_as(self.user)
path = reverse("sentry-extensions-jira-search", args=[org.slug, self.integration.id])
resp = self.client.get(f"{path}?project=10000&field=assignee&query=bob")
assert resp.status_code == 400
@responses.activate
def test_customfield_search(self) -> None:
def responder(request):
query = parse_qs(urlparse(request.url).query)
assert "cf[0123]" == query["fieldName"][0]
assert "sp" == query["fieldValue"][0]
return 200, {}, '{"results": [{"displayName": "<b>Sp</b>rint 1 (1)", "value": "1"}]}'
responses.add_callback(
responses.GET,
"https://example.atlassian.net/rest/api/2/jql/autocompletedata/suggestions",
callback=responder,
content_type="application/json",
)
org = self.organization
self.login_as(self.user)
path = reverse("sentry-extensions-jira-search", args=[org.slug, self.integration.id])
resp = self.client.get(f"{path}?field=customfield_0123&query=sp")
assert resp.status_code == 200
assert resp.data == [{"label": "Sprint 1 (1)", "value": "1"}]
@responses.activate
def test_customfield_search_error(self) -> None:
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/jql/autocompletedata/suggestions",
status=500,
body="Totally broken",
)
org = self.organization
self.login_as(self.user)
path = reverse("sentry-extensions-jira-search", args=[org.slug, self.integration.id])
resp = self.client.get(f"{path}?field=customfield_0123&query=sp")
assert resp.status_code == 400
assert resp.data == {
"detail": "Unable to fetch autocomplete for customfield_0123 from Jira"
}
@responses.activate
def test_project_search_with_pagination(self) -> None:
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/project/search",
json={
"values": [
{"id": "10000", "key": "EX", "name": "Example"},
],
"total": 2,
},
)
self.login_as(self.user)
path = reverse(
"sentry-extensions-jira-search", args=[self.organization.slug, self.integration.id]
)
resp = self.client.get(f"{path}?field=project&query=example")
assert resp.status_code == 200
assert resp.data == [
{"label": "EX - Example", "value": "10000"},
]
@responses.activate
def test_project_search_error_with_pagination(self) -> None:
responses.add(
responses.GET,
"https://example.atlassian.net/rest/api/2/project/search",
status=500,
body="susge",
)
self.login_as(self.user)
path = reverse(
"sentry-extensions-jira-search", args=[self.organization.slug, self.integration.id]
)
resp = self.client.get(f"{path}?field=project&query=example")
assert resp.status_code == 400
assert resp.data == {"detail": "Unable to fetch projects from Jira"}
| JiraSearchEndpointTest |
python | streamlit__streamlit | lib/tests/streamlit/runtime/credentials_test.py | {
"start": 1195,
"end": 14785
} | class ____(unittest.TestCase):
"""Credentials Class Unittest class."""
def setUp(self):
"""Setup."""
# Credentials._singleton should be None here, but a mis-behaving
# test may have left it intact.
Credentials._singleton = None
def tearDown(self) -> None:
Credentials._singleton = None
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_constructor(self):
"""Test Credentials constructor."""
c = Credentials()
assert c._conf_file == MOCK_PATH
assert c.activation is None
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_get_current(self):
"""Test Credentials.get_current."""
Credentials._singleton = None
c = Credentials.get_current()
assert Credentials._singleton == c
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_constructor_runs_twice(self):
"""Test Credentials constructor runs twice."""
Credentials._singleton = None
Credentials()
with pytest.raises(RuntimeError) as e:
Credentials()
assert (
str(e.value)
== "Credentials already initialized. Use .get_current() instead"
)
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_load(self):
"""Test Credentials.load()."""
data = textwrap.dedent(
"""
[general]
email = "user@domain.com"
"""
).strip()
m = mock_open(read_data=data)
with patch("streamlit.runtime.credentials.open", m, create=True):
c = Credentials.get_current()
c.load()
assert c.activation.email == "user@domain.com"
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_load_empty(self):
"""Test Credentials.load() with empty email"""
data = textwrap.dedent(
"""
[general]
email = ""
"""
).strip()
m = mock_open(read_data=data)
with patch("streamlit.runtime.credentials.open", m, create=True):
c = Credentials.get_current()
c.load()
assert c.activation.email == ""
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_load_twice(self):
"""Test Credentials.load() called twice."""
c = Credentials.get_current()
c.activation = _Activation("some_email", True)
with patch("streamlit.runtime.credentials._LOGGER") as p:
c.load()
p.error.assert_called_once_with(
"Credentials already loaded. Not rereading file."
)
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_load_file_not_found(self):
"""Test Credentials.load() with FileNotFoundError."""
with patch("streamlit.runtime.credentials.open") as m:
m.side_effect = FileNotFoundError()
c = Credentials.get_current()
c.activation = None
with pytest.raises(RuntimeError) as e:
c.load()
assert (
str(e.value)
== 'Credentials not found. Please run "streamlit activate".'
)
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_load_permission_denied(self):
"""Test Credentials.load() with Perission denied."""
with patch("streamlit.runtime.credentials.open") as m:
m.side_effect = PermissionError(
"[Errno 13] Permission denied: ~/.streamlit/credentials.toml"
)
c = Credentials.get_current()
c.activation = None
expected_msg = (
f"\nUnable to load credentials from {MOCK_PATH}.\n"
'Run "streamlit reset" and try again.\n'
)
with pytest.raises(RuntimeError, match=re.escape(expected_msg)):
c.load()
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_check_activated_already_loaded(self):
"""Test Credentials.check_activated() already loaded."""
c = Credentials.get_current()
c.activation = _Activation("some_email", True)
with patch("streamlit.runtime.credentials._exit") as p:
c._check_activated(auto_resolve=False)
p.assert_not_called()
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_check_activated_false(self):
"""Test Credentials.check_activated() not activated."""
c = Credentials.get_current()
c.activation = _Activation("some_email", False)
with patch("streamlit.runtime.credentials._exit") as p:
c._check_activated(auto_resolve=False)
p.assert_called_once_with("Activation email not valid.")
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_check_activated_error(self):
"""Test Credentials.check_activated() has an error."""
c = Credentials.get_current()
c.activation = _Activation("some_email", True)
with (
patch.object(c, "load", side_effect=Exception("Some error")),
patch("streamlit.runtime.credentials._exit") as p,
):
c._check_activated(auto_resolve=False)
p.assert_called_once_with("Some error")
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_save(self):
"""Test Credentials.save()."""
c = Credentials.get_current()
c.activation = _Activation("some_email", True)
truth = textwrap.dedent(
"""
[general]
email = "some_email"
"""
).lstrip()
streamlit_root_path = os.path.join(
"/mock/home/folder", file_util.CONFIG_FOLDER_NAME
)
# patch streamlit.*.os.makedirs instead of os.makedirs for py35 compat
with (
patch(
"streamlit.runtime.credentials.open", mock_open(), create=True
) as file_open,
patch("streamlit.runtime.credentials.os.makedirs") as make_dirs,
):
c.save()
make_dirs.assert_called_once_with(streamlit_root_path, exist_ok=True)
file_open.return_value.write.assert_called_once_with(truth)
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_activate_already_activated(self):
"""Test Credentials.activate() already activated."""
c = Credentials.get_current()
c.activation = _Activation("some_email", True)
with patch("streamlit.runtime.credentials._LOGGER") as p:
with pytest.raises(SystemExit):
c.activate()
assert p.error.call_count == 2
assert p.error.call_args_list[1] == call("Already activated")
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_activate_already_activated_not_valid(self):
"""Test Credentials.activate() already activated but not valid."""
c = Credentials.get_current()
c.activation = _Activation("some_email", False)
with patch("streamlit.runtime.credentials._LOGGER") as p:
with pytest.raises(SystemExit):
c.activate()
assert p.error.call_count == 2
assert str(p.error.call_args_list[1])[0:27] == "call('Activation not valid."
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_activate(self):
"""Test Credentials.activate()"""
c = Credentials.get_current()
c.activation = None
with (
patch.object(c, "load", side_effect=RuntimeError("Some error")),
patch.object(c, "save") as patched_save,
patch(PROMPT) as patched_prompt,
):
patched_prompt.side_effect = ["user@domain.com"]
c.activate()
patched_save.assert_called_once()
assert c.activation.email == "user@domain.com"
assert c.activation.is_valid
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_reset(self):
"""Test Credentials.reset()."""
c = Credentials.get_current()
with patch("streamlit.runtime.credentials.os.remove") as p:
Credentials.reset()
p.assert_called_once_with(MOCK_PATH)
assert c == Credentials.get_current()
@patch(
"streamlit.runtime.credentials.file_util.get_streamlit_file_path", mock_get_path
)
def test_Credentials_reset_error(self):
"""Test Credentials.reset() with error."""
with (
patch(
"streamlit.runtime.credentials.os.remove",
side_effect=OSError("some error"),
),
patch("streamlit.runtime.credentials._LOGGER") as p,
):
Credentials.reset()
p.exception.assert_called_once_with("Error removing credentials file.")
@tempdir()
def test_email_send(self, temp_dir):
"""Test that saving a new Credential sends an email"""
with requests_mock.mock() as m:
m.get(
"https://data.streamlit.io/metrics.json",
status_code=200,
json={"url": "https://www.example.com"},
)
m.post("https://www.example.com", status_code=200)
creds: Credentials = Credentials.get_current() # type: ignore
creds._conf_file = str(Path(temp_dir.path) / "config.toml")
creds.activation = _verify_email("email@example.com")
creds.save()
# Check that metrics url fetched
first_request = m.request_history[0]
assert first_request.method == "GET"
assert first_request.url == "https://data.streamlit.io/metrics.json"
# Check that email sent to the url fetched
last_request = m.request_history[-1]
assert last_request.method == "POST"
assert last_request.url == "https://www.example.com/"
assert '"userId": "email@example.com"' in last_request.text
@tempdir()
def test_email_failed_metrics_fetch(self, temp_dir):
"""Test that saving a new Credential does not send an email if metrics fetch fails"""
with requests_mock.mock() as m:
m.get("https://data.streamlit.io/metrics.json", status_code=404)
creds: Credentials = Credentials.get_current()
creds._conf_file = str(Path(temp_dir.path) / "config.toml")
creds.activation = _verify_email("email@example.com")
with self.assertLogs(
"streamlit.runtime.credentials", level="ERROR"
) as mock_logger:
creds.save()
assert len(m.request_history) == 1
assert len(mock_logger.output) == 1
assert "Failed to fetch metrics URL" in mock_logger.output[0]
@tempdir()
def test_email_not_send(self, temp_dir):
"""
Test that saving a new Credential does not send an email if the email is invalid
"""
with requests_mock.mock() as m:
m.get(
"https://data.streamlit.io/metrics.json",
status_code=200,
json={"url": "https://www.example.com"},
)
m.post("https://www.example.com", status_code=200)
creds: Credentials = Credentials.get_current() # type: ignore
creds._conf_file = str(Path(temp_dir.path) / "config.toml")
creds.activation = _verify_email("some_email")
creds.save()
assert len(m.request_history) == 0
@tempdir()
def test_email_send_exception_handling(self, temp_dir):
"""
Test that saving a new Credential catches and logs failures from the segment
endpoint
"""
with requests_mock.mock() as m:
m.get(
"https://data.streamlit.io/metrics.json",
status_code=200,
json={"url": "https://www.example.com"},
)
m.post("https://www.example.com", status_code=403)
creds: Credentials = Credentials.get_current() # type: ignore
creds._conf_file = str(Path(temp_dir.path) / "config.toml")
creds.activation = _verify_email("email@example.com")
with self.assertLogs(
"streamlit.runtime.credentials", level="ERROR"
) as mock_logger:
creds.save()
assert len(mock_logger.output) == 1
assert "Error saving email" in mock_logger.output[0]
| CredentialsClassTest |
python | readthedocs__readthedocs.org | readthedocs/search/migrations/0002_add_total_results_field.py | {
"start": 149,
"end": 511
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("search", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="searchquery",
name="total_results",
field=models.IntegerField(default=0, null=True, verbose_name="Total results"),
),
]
| Migration |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-earnings-call-transcript/llama_index/readers/earnings_call_transcript/base.py | {
"start": 242,
"end": 1464
} | class ____(BaseReader):
def __init__(self, year: int, ticker: str, quarter: str):
"""
Get the earning call transcripts for a given company, in a given year and quarter.
Args:
year (int): Year of the transcript
ticker (str): ticker symbol of the stock
quarter (str): quarter
"""
curr_year = datetime.now().year
assert year <= curr_year, "The year should be less than current year"
assert quarter in [
"Q1",
"Q2",
"Q3",
"Q4",
], 'The quarter should from the list ["Q1","Q2","Q3","Q4"]'
self.year = year
self.ticker = ticker
self.quarter = quarter
def load_data(self) -> List[Document]:
resp_dict, speakers_list = get_earnings_transcript(
self.quarter, self.ticker, self.year
)
return Document(
text=resp_dict["content"],
extra_info={
"ticker": resp_dict["symbol"],
"quarter": "Q" + str(resp_dict["quarter"]),
"date_time": resp_dict["date"],
"speakers_list": speakers_list,
},
)
| EarningsCallTranscript |
python | django__django | tests/user_commands/management/commands/subparser_required.py | {
"start": 54,
"end": 513
} | class ____(BaseCommand):
def add_arguments(self, parser):
subparsers_1 = parser.add_subparsers(dest="subcommand_1")
parser_foo_1 = subparsers_1.add_parser("foo_1")
subparsers_2 = parser_foo_1.add_subparsers(dest="subcommand_2")
parser_foo_2 = subparsers_2.add_parser("foo_2")
parser_foo_2.add_argument("--bar", required=True)
def handle(self, *args, **options):
self.stdout.write(",".join(options))
| Command |
python | pypa__pipenv | pipenv/patched/pip/_internal/configuration.py | {
"start": 2661,
"end": 14080
} | class ____:
"""Handles management of configuration.
Provides an interface to accessing and managing configuration files.
This class converts provides an API that takes "section.key-name" style
keys and stores the value associated with it as "key-name" under the
section "section".
This allows for a clean interface wherein the both the section and the
key-name are preserved in an easy to manage form in the configuration files
and the data stored is also nice.
"""
def __init__(self, isolated: bool, load_only: Optional[Kind] = None) -> None:
super().__init__()
if load_only is not None and load_only not in VALID_LOAD_ONLY:
raise ConfigurationError(
"Got invalid value for load_only - should be one of {}".format(
", ".join(map(repr, VALID_LOAD_ONLY))
)
)
self.isolated = isolated
self.load_only = load_only
# Because we keep track of where we got the data from
self._parsers: Dict[Kind, List[Tuple[str, RawConfigParser]]] = {
variant: [] for variant in OVERRIDE_ORDER
}
self._config: Dict[Kind, Dict[str, Any]] = {
variant: {} for variant in OVERRIDE_ORDER
}
self._modified_parsers: List[Tuple[str, RawConfigParser]] = []
def load(self) -> None:
"""Loads configuration from configuration files and environment"""
self._load_config_files()
if not self.isolated:
self._load_environment_vars()
def get_file_to_edit(self) -> Optional[str]:
"""Returns the file with highest priority in configuration"""
assert self.load_only is not None, "Need to be specified a file to be editing"
try:
return self._get_parser_to_modify()[0]
except IndexError:
return None
def items(self) -> Iterable[Tuple[str, Any]]:
"""Returns key-value pairs like dict.items() representing the loaded
configuration
"""
return self._dictionary.items()
def get_value(self, key: str) -> Any:
"""Get a value from the configuration."""
orig_key = key
key = _normalize_name(key)
try:
return self._dictionary[key]
except KeyError:
# disassembling triggers a more useful error message than simply
# "No such key" in the case that the key isn't in the form command.option
_disassemble_key(key)
raise ConfigurationError(f"No such key - {orig_key}")
def set_value(self, key: str, value: Any) -> None:
"""Modify a value in the configuration."""
key = _normalize_name(key)
self._ensure_have_load_only()
assert self.load_only
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
# Modify the parser and the configuration
if not parser.has_section(section):
parser.add_section(section)
parser.set(section, name, value)
self._config[self.load_only][key] = value
self._mark_as_modified(fname, parser)
def unset_value(self, key: str) -> None:
"""Unset a value in the configuration."""
orig_key = key
key = _normalize_name(key)
self._ensure_have_load_only()
assert self.load_only
if key not in self._config[self.load_only]:
raise ConfigurationError(f"No such key - {orig_key}")
fname, parser = self._get_parser_to_modify()
if parser is not None:
section, name = _disassemble_key(key)
if not (
parser.has_section(section) and parser.remove_option(section, name)
):
# The option was not removed.
raise ConfigurationError(
"Fatal Internal error [id=1]. Please report as a bug."
)
# The section may be empty after the option was removed.
if not parser.items(section):
parser.remove_section(section)
self._mark_as_modified(fname, parser)
del self._config[self.load_only][key]
def save(self) -> None:
"""Save the current in-memory state."""
self._ensure_have_load_only()
for fname, parser in self._modified_parsers:
logger.info("Writing to %s", fname)
# Ensure directory exists.
ensure_dir(os.path.dirname(fname))
# Ensure directory's permission(need to be writeable)
try:
with open(fname, "w") as f:
parser.write(f)
except OSError as error:
raise ConfigurationError(
f"An error occurred while writing to the configuration file "
f"{fname}: {error}"
)
#
# Private routines
#
def _ensure_have_load_only(self) -> None:
if self.load_only is None:
raise ConfigurationError("Needed a specific file to be modifying.")
logger.debug("Will be working with %s variant only", self.load_only)
@property
def _dictionary(self) -> Dict[str, Any]:
"""A dictionary representing the loaded configuration."""
# NOTE: Dictionaries are not populated if not loaded. So, conditionals
# are not needed here.
retval = {}
for variant in OVERRIDE_ORDER:
retval.update(self._config[variant])
return retval
def _load_config_files(self) -> None:
"""Loads configuration from configuration files"""
config_files = dict(self.iter_config_files())
if config_files[kinds.ENV][0:1] == [os.devnull]:
logger.debug(
"Skipping loading configuration files due to "
"environment's PIP_CONFIG_FILE being os.devnull"
)
return
for variant, files in config_files.items():
for fname in files:
# If there's specific variant set in `load_only`, load only
# that variant, not the others.
if self.load_only is not None and variant != self.load_only:
logger.debug("Skipping file '%s' (variant: %s)", fname, variant)
continue
parser = self._load_file(variant, fname)
# Keeping track of the parsers used
self._parsers[variant].append((fname, parser))
def _load_file(self, variant: Kind, fname: str) -> RawConfigParser:
logger.verbose("For variant '%s', will try loading '%s'", variant, fname)
parser = self._construct_parser(fname)
for section in parser.sections():
items = parser.items(section)
self._config[variant].update(self._normalized_keys(section, items))
return parser
def _construct_parser(self, fname: str) -> RawConfigParser:
parser = configparser.RawConfigParser()
# If there is no such file, don't bother reading it but create the
# parser anyway, to hold the data.
# Doing this is useful when modifying and saving files, where we don't
# need to construct a parser.
if os.path.exists(fname):
locale_encoding = locale.getpreferredencoding(False)
try:
parser.read(fname, encoding=locale_encoding)
except UnicodeDecodeError:
# See https://github.com/pypa/pip/issues/4963
raise ConfigurationFileCouldNotBeLoaded(
reason=f"contains invalid {locale_encoding} characters",
fname=fname,
)
except configparser.Error as error:
# See https://github.com/pypa/pip/issues/4893
raise ConfigurationFileCouldNotBeLoaded(error=error)
return parser
def _load_environment_vars(self) -> None:
"""Loads configuration from environment variables"""
self._config[kinds.ENV_VAR].update(
self._normalized_keys(":env:", self.get_environ_vars())
)
def _normalized_keys(
self, section: str, items: Iterable[Tuple[str, Any]]
) -> Dict[str, Any]:
"""Normalizes items to construct a dictionary with normalized keys.
This routine is where the names become keys and are made the same
regardless of source - configuration files or environment.
"""
normalized = {}
for name, val in items:
key = section + "." + _normalize_name(name)
normalized[key] = val
return normalized
def get_environ_vars(self) -> Iterable[Tuple[str, str]]:
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if key.startswith("PIP_"):
name = key[4:].lower()
if name not in ENV_NAMES_IGNORED:
yield name, val
# XXX: This is patched in the tests.
def iter_config_files(self) -> Iterable[Tuple[Kind, List[str]]]:
"""Yields variant and configuration files associated with it.
This should be treated like items of a dictionary. The order
here doesn't affect what gets overridden. That is controlled
by OVERRIDE_ORDER. However this does control the order they are
displayed to the user. It's probably most ergonomic to display
things in the same order as OVERRIDE_ORDER
"""
# SMELL: Move the conditions out of this function
env_config_file = os.environ.get("PIP_CONFIG_FILE", None)
config_files = get_configuration_files()
yield kinds.GLOBAL, config_files[kinds.GLOBAL]
# per-user config is not loaded when env_config_file exists
should_load_user_config = not self.isolated and not (
env_config_file and os.path.exists(env_config_file)
)
if should_load_user_config:
# The legacy config file is overridden by the new config file
yield kinds.USER, config_files[kinds.USER]
# virtualenv config
yield kinds.SITE, config_files[kinds.SITE]
if env_config_file is not None:
yield kinds.ENV, [env_config_file]
else:
yield kinds.ENV, []
def get_values_in_config(self, variant: Kind) -> Dict[str, Any]:
"""Get values present in a config file"""
return self._config[variant]
def _get_parser_to_modify(self) -> Tuple[str, RawConfigParser]:
# Determine which parser to modify
assert self.load_only
parsers = self._parsers[self.load_only]
if not parsers:
# This should not happen if everything works correctly.
raise ConfigurationError(
"Fatal Internal error [id=2]. Please report as a bug."
)
# Use the highest priority parser.
return parsers[-1]
# XXX: This is patched in the tests.
def _mark_as_modified(self, fname: str, parser: RawConfigParser) -> None:
file_parser_tuple = (fname, parser)
if file_parser_tuple not in self._modified_parsers:
self._modified_parsers.append(file_parser_tuple)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._dictionary!r})"
| Configuration |
python | pyparsing__pyparsing | examples/inv_regex.py | {
"start": 1434,
"end": 1565
} | class ____:
def make_generator(self):
def dot_gen():
yield from printables
return dot_gen
| DotEmitter |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/validators/actions/test_ticketing.py | {
"start": 1112,
"end": 1244
} | class ____(BaseTicketingActionValidatorTest):
__test__ = True
provider = Action.Type.JIRA_SERVER
| TestJiraServerActionValidator |
python | pandas-dev__pandas | pandas/tests/io/formats/test_to_latex.py | {
"start": 26198,
"end": 29010
} | class ____:
def test_to_latex_with_formatters(self):
df = DataFrame(
{
"datetime64": [
datetime(2016, 1, 1),
datetime(2016, 2, 5),
datetime(2016, 3, 3),
],
"float": [1.0, 2.0, 3.0],
"int": [1, 2, 3],
"object": [(1, 2), True, False],
}
)
formatters = {
"datetime64": lambda x: x.strftime("%Y-%m"),
"float": lambda x: f"[{x: 4.1f}]",
"int": lambda x: f"0x{x:x}",
"object": lambda x: f"-{x!s}-",
"__index__": lambda x: f"index: {x}",
}
result = df.to_latex(formatters=dict(formatters))
expected = _dedent(
r"""
\begin{tabular}{llrrl}
\toprule
& datetime64 & float & int & object \\
\midrule
index: 0 & 2016-01 & [ 1.0] & 0x1 & -(1, 2)- \\
index: 1 & 2016-02 & [ 2.0] & 0x2 & -True- \\
index: 2 & 2016-03 & [ 3.0] & 0x3 & -False- \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_float_format_no_fixed_width_3decimals(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
result = df.to_latex(float_format="%.3f")
expected = _dedent(
r"""
\begin{tabular}{lr}
\toprule
& x \\
\midrule
0 & 0.200 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
def test_to_latex_float_format_no_fixed_width_integer(self):
# GH 22270
df = DataFrame({"x": [100.0]})
result = df.to_latex(float_format="%.0f")
expected = _dedent(
r"""
\begin{tabular}{lr}
\toprule
& x \\
\midrule
0 & 100 \\
\bottomrule
\end{tabular}
"""
)
assert result == expected
@pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
def test_to_latex_na_rep_and_float_format(self, na_rep):
df = DataFrame(
[
["A", 1.2225],
["A", None],
],
columns=["Group", "Data"],
)
result = df.to_latex(na_rep=na_rep, float_format="{:.2f}".format)
expected = _dedent(
rf"""
\begin{{tabular}}{{llr}}
\toprule
& Group & Data \\
\midrule
0 & A & 1.22 \\
1 & A & {na_rep} \\
\bottomrule
\end{{tabular}}
"""
)
assert result == expected
| TestToLatexFormatters |
python | getsentry__sentry | src/sentry/rules/conditions/event_attribute.py | {
"start": 11102,
"end": 12316
} | class ____(AttributeHandler):
minimum_path_length = 2
@classmethod
def _handle(cls, path: list[str], event: GroupEvent) -> list[str]:
stacktrace = event.interfaces.get("stacktrace")
if stacktrace:
stacks = [stacktrace]
else:
stacks = [
getattr(e, "stacktrace")
for e in getattr(event.interfaces.get("exception"), "values", [])
if getattr(e, "stacktrace", None)
]
result = []
for st in stacks:
for frame in st.frames:
if path[1] in ("filename", "module", "abs_path", "package"):
value = getattr(frame, path[1], None)
if value is not None:
result.append(value)
elif path[1] == "code":
if frame.pre_context:
result.extend(frame.pre_context)
if frame.context_line:
result.append(frame.context_line)
if frame.post_context:
result.extend(frame.post_context)
return result
@attribute_registry.register("device")
| StacktraceAttributeHandler |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 112655,
"end": 113437
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.greater(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
return KerasTensor(output_shape, dtype="bool")
@keras_export(["keras.ops.greater", "keras.ops.numpy.greater"])
def greater(x1, x2):
"""Return the truth value of `x1 > x2` element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise comparison of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Greater().symbolic_call(x1, x2)
return backend.numpy.greater(x1, x2)
| Greater |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 796942,
"end": 798033
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"description",
"how_it_works",
"name",
"primary_listing_count",
"resource_path",
"secondary_listing_count",
"slug",
"url",
)
description = sgqlc.types.Field(String, graphql_name="description")
how_it_works = sgqlc.types.Field(String, graphql_name="howItWorks")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
primary_listing_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="primaryListingCount"
)
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
secondary_listing_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="secondaryListingCount"
)
slug = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="slug")
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
| MarketplaceCategory |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 654228,
"end": 654656
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("EnterpriseServerUserAccountEmail", graphql_name="node")
"""The item at the end of the edge."""
| EnterpriseServerUserAccountEmailEdge |
python | pytorch__pytorch | torch/nn/attention/flex_attention.py | {
"start": 9856,
"end": 10098
} | class ____(NamedTuple):
"""Auxiliary outputs from flex_attention operation.
Fields will be None if not requested, or contain the tensor if requested.
"""
lse: Tensor | None = None
max_scores: Tensor | None = None
| AuxOutput |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/documentation/models.py | {
"start": 556,
"end": 948
} | class ____:
def __init__(self, header: str):
self.header = header
self._content: List[str] = []
@property
def content(self) -> List[str]:
return self._content
@content.setter
def content(self, content: str) -> None:
self._content.append(content)
def __repr__(self) -> str:
return f"{self.header}: {self.content}"
| SectionContent |
python | django__django | django/contrib/gis/db/models/fields.py | {
"start": 11856,
"end": 12037
} | class ____(GeometryField):
geom_type = "MULTIPOLYGON"
geom_class = MultiPolygon
form_class = forms.MultiPolygonField
description = _("Multi polygon")
| MultiPolygonField |
python | Lightning-AI__lightning | tests/tests_pytorch/plugins/test_amp_plugins.py | {
"start": 1944,
"end": 2110
} | class ____(torch.optim.SGD):
def step(self, *args, pl_module=None):
pl_module.check_grads_clipped()
return super().step(*args)
| TestClippingOptimizer |
python | doocs__leetcode | solution/0000-0099/0004.Median of Two Sorted Arrays/Solution.py | {
"start": 0,
"end": 695
} | class ____:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
def f(i: int, j: int, k: int) -> int:
if i >= m:
return nums2[j + k - 1]
if j >= n:
return nums1[i + k - 1]
if k == 1:
return min(nums1[i], nums2[j])
p = k // 2
x = nums1[i + p - 1] if i + p - 1 < m else inf
y = nums2[j + p - 1] if j + p - 1 < n else inf
return f(i + p, j, k - p) if x < y else f(i, j + p, k - p)
m, n = len(nums1), len(nums2)
a = f(0, 0, (m + n + 1) // 2)
b = f(0, 0, (m + n + 2) // 2)
return (a + b) / 2
| Solution |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_alert_rule_details.py | {
"start": 85138,
"end": 91863
} | class ____(AlertRuleDetailsBase):
method = "put"
def test_sentry_app(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
sentry_app = self.create_sentry_app(
name="foo", organization=self.organization, is_alertable=True, verify_install=False
)
self.create_sentry_app_installation(
slug=sentry_app.slug, organization=self.organization, user=self.user
)
self.login_as(self.user)
alert_rule = self.alert_rule
# We need the IDs to force update instead of create, so we just get the rule using our own API. Like frontend would.
serialized_alert_rule = self.get_serialized_alert_rule()
serialized_alert_rule["name"] = "ValidSentryAppTestRule"
serialized_alert_rule["triggers"][0]["actions"][0] = {
"type": "sentry_app",
"targetType": "sentry_app",
"targetIdentifier": sentry_app.id,
"sentryAppId": sentry_app.id,
}
with self.feature("organizations:incidents"):
resp = self.get_success_response(
self.organization.slug, alert_rule.id, **serialized_alert_rule
)
alert_rule.refresh_from_db()
alert_rule.name = "ValidSentryAppTestRule"
assert resp.data == serialize(alert_rule)
assert resp.data["triggers"][0]["actions"][0]["sentryAppId"] == sentry_app.id
def test_no_config_sentry_app(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
sentry_app = self.create_sentry_app(is_alertable=True)
self.create_sentry_app_installation(
slug=sentry_app.slug, organization=self.organization, user=self.user
)
self.login_as(self.user)
test_params = {
**self.valid_params,
"triggers": [
{
"actions": [
{
"type": "sentry_app",
"targetType": "sentry_app",
"targetIdentifier": sentry_app.id,
"sentryAppId": sentry_app.id,
}
],
"alertThreshold": 300,
"label": "critical",
}
],
}
with self.feature(["organizations:incidents", "organizations:performance-view"]):
self.get_success_response(
self.organization.slug,
self.alert_rule.id,
status_code=200,
**test_params,
)
@responses.activate
def test_success_response_from_sentry_app(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
responses.add(
method=responses.POST,
url="https://example.com/sentry/alert-rule",
status=202,
)
sentry_app = self.create_sentry_app(
name="foo",
organization=self.organization,
schema={
"elements": [
self.create_alert_rule_action_schema(),
]
},
)
install = self.create_sentry_app_installation(
slug="foo", organization=self.organization, user=self.user
)
sentry_app_settings = [
{"name": "title", "value": "test title"},
{"name": "description", "value": "test description"},
]
test_params = self.valid_params.copy()
test_params["triggers"] = [
{
"actions": [
{
"type": "sentry_app",
"targetType": "sentry_app",
"targetIdentifier": sentry_app.id,
"hasSchemaFormConfig": True,
"sentryAppId": sentry_app.id,
"sentryAppInstallationUuid": install.uuid,
"settings": sentry_app_settings,
}
],
"alertThreshold": 300,
"label": "critical",
}
]
with self.feature(["organizations:incidents", "organizations:performance-view"]):
self.get_success_response(
self.organization.slug,
self.alert_rule.id,
status_code=200,
**test_params,
)
@responses.activate
def test_error_response_from_sentry_app(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
error_message = "Everything is broken!"
responses.add(
method=responses.POST,
url="https://example.com/sentry/alert-rule",
status=500,
json={"message": error_message},
)
sentry_app = self.create_sentry_app(
name="foo",
organization=self.organization,
schema={
"elements": [
self.create_alert_rule_action_schema(),
]
},
)
install = self.create_sentry_app_installation(
slug="foo", organization=self.organization, user=self.user
)
sentry_app_settings = [
{"name": "title", "value": "test title"},
{"name": "description", "value": "test description"},
]
test_params = self.valid_params.copy()
test_params["triggers"] = [
{
"actions": [
{
"type": "sentry_app",
"targetType": "sentry_app",
"targetIdentifier": sentry_app.id,
"hasSchemaFormConfig": True,
"sentryAppId": sentry_app.id,
"sentryAppInstallationUuid": install.uuid,
"settings": sentry_app_settings,
}
],
"alertThreshold": 300,
"label": "critical",
}
]
with self.feature(["organizations:incidents", "organizations:performance-view"]):
resp = self.get_response(self.organization.slug, self.alert_rule.id, **test_params)
assert resp.status_code == 500
assert error_message in resp.data["detail"]
| AlertRuleDetailsSentryAppPutEndpointTest |
python | django-haystack__django-haystack | haystack/backends/whoosh_backend.py | {
"start": 2455,
"end": 31852
} | class ____(BaseSearchBackend):
# Word reserved by Whoosh for special use.
RESERVED_WORDS = ("AND", "NOT", "OR", "TO")
# Characters reserved by Whoosh for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
"\\",
"+",
"-",
"&&",
"||",
"!",
"(",
")",
"{",
"}",
"[",
"]",
"^",
'"',
"~",
"*",
"?",
":",
".",
)
def __init__(self, connection_alias, **connection_options):
super().__init__(connection_alias, **connection_options)
self.setup_complete = False
self.use_file_storage = True
self.post_limit = getattr(connection_options, "POST_LIMIT", 128 * 1024 * 1024)
self.path = connection_options.get("PATH")
if connection_options.get("STORAGE", "file") != "file":
self.use_file_storage = False
if self.use_file_storage and not self.path:
raise ImproperlyConfigured(
"You must specify a 'PATH' in your settings for connection '%s'."
% connection_alias
)
self.log = logging.getLogger("haystack")
def setup(self):
"""
Defers loading until needed.
"""
from haystack import connections
new_index = False
# Make sure the index is there.
if self.use_file_storage and not os.path.exists(self.path):
try:
os.makedirs(self.path)
except Exception:
raise IOError(
"The directory of your Whoosh index '%s' (cwd='%s') cannot be created for the current user/group."
% (self.path, os.getcwd())
)
new_index = True
if self.use_file_storage and not os.access(self.path, os.W_OK):
raise IOError(
"The path to your Whoosh index '%s' is not writable for the current user/group."
% self.path
)
if self.use_file_storage:
self.storage = FileStorage(self.path)
else:
global LOCALS
if getattr(LOCALS, "RAM_STORE", None) is None:
LOCALS.RAM_STORE = RamStorage()
self.storage = LOCALS.RAM_STORE
self.content_field_name, self.schema = self.build_schema(
connections[self.connection_alias].get_unified_index().all_searchfields()
)
self.parser = QueryParser(self.content_field_name, schema=self.schema)
self.parser.add_plugins([FuzzyTermPlugin])
if new_index is True:
self.index = self.storage.create_index(self.schema)
else:
try:
self.index = self.storage.open_index(schema=self.schema)
except index.EmptyIndexError:
self.index = self.storage.create_index(self.schema)
self.setup_complete = True
def build_schema(self, fields):
schema_fields = {
ID: WHOOSH_ID(stored=True, unique=True),
DJANGO_CT: WHOOSH_ID(stored=True),
DJANGO_ID: WHOOSH_ID(stored=True),
}
# Grab the number of keys that are hard-coded into Haystack.
# We'll use this to (possibly) fail slightly more gracefully later.
initial_key_count = len(schema_fields)
content_field_name = ""
for _, field_class in fields.items():
if field_class.is_multivalued:
if field_class.indexed is False:
schema_fields[field_class.index_fieldname] = IDLIST(
stored=True, field_boost=field_class.boost
)
else:
schema_fields[field_class.index_fieldname] = KEYWORD(
stored=True,
commas=True,
scorable=True,
field_boost=field_class.boost,
)
elif field_class.field_type in ["date", "datetime"]:
schema_fields[field_class.index_fieldname] = DATETIME(
stored=field_class.stored, sortable=True
)
elif field_class.field_type == "integer":
schema_fields[field_class.index_fieldname] = NUMERIC(
stored=field_class.stored,
numtype=int,
field_boost=field_class.boost,
)
elif field_class.field_type == "float":
schema_fields[field_class.index_fieldname] = NUMERIC(
stored=field_class.stored,
numtype=float,
field_boost=field_class.boost,
)
elif field_class.field_type == "boolean":
# Field boost isn't supported on BOOLEAN as of 1.8.2.
schema_fields[field_class.index_fieldname] = BOOLEAN(
stored=field_class.stored
)
elif field_class.field_type == "ngram":
schema_fields[field_class.index_fieldname] = NGRAM(
minsize=3,
maxsize=15,
stored=field_class.stored,
field_boost=field_class.boost,
)
elif field_class.field_type == "edge_ngram":
schema_fields[field_class.index_fieldname] = NGRAMWORDS(
minsize=2,
maxsize=15,
at="start",
stored=field_class.stored,
field_boost=field_class.boost,
)
else:
schema_fields[field_class.index_fieldname] = TEXT(
stored=True,
analyzer=field_class.analyzer or StemmingAnalyzer(),
field_boost=field_class.boost,
sortable=True,
)
if field_class.document is True:
content_field_name = field_class.index_fieldname
schema_fields[field_class.index_fieldname].spelling = True
# Fail more gracefully than relying on the backend to die if no fields
# are found.
if len(schema_fields) <= initial_key_count:
raise SearchBackendError(
"No fields were found in any search_indexes. Please correct this before attempting to search."
)
return (content_field_name, Schema(**schema_fields))
def update(self, index, iterable, commit=True):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
writer = AsyncWriter(self.index)
for obj in iterable:
try:
doc = index.full_prepare(obj)
except SkipDocument:
self.log.debug("Indexing for object `%s` skipped", obj)
else:
# Really make sure it's unicode, because Whoosh won't have it any
# other way.
for key in doc:
doc[key] = self._from_python(doc[key])
# Document boosts aren't supported in Whoosh 2.5.0+.
if "boost" in doc:
del doc["boost"]
try:
writer.update_document(**doc)
except Exception:
if not self.silently_fail:
raise
# We'll log the object identifier but won't include the actual object
# to avoid the possibility of that generating encoding errors while
# processing the log message:
self.log.exception(
"Preparing object for update",
extra={"data": {"index": index, "object": get_identifier(obj)}},
)
if len(iterable) > 0:
# For now, commit no matter what, as we run into locking issues otherwise.
writer.commit()
if writer.ident is not None:
writer.join()
def remove(self, obj_or_string, commit=True):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
whoosh_id = get_identifier(obj_or_string)
try:
self.index.delete_by_query(q=self.parser.parse('%s:"%s"' % (ID, whoosh_id)))
except Exception:
if not self.silently_fail:
raise
self.log.exception(
"Failed to remove document '%s' from Whoosh",
whoosh_id,
)
def clear(self, models=None, commit=True):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
if models is not None:
assert isinstance(models, (list, tuple))
try:
if models is None:
self.delete_index()
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
self.index.delete_by_query(
q=self.parser.parse(" OR ".join(models_to_delete))
)
except Exception:
if not self.silently_fail:
raise
if models is not None:
self.log.exception(
"Failed to clear Whoosh index of models '%s'",
",".join(models_to_delete),
)
else:
self.log.exception("Failed to clear Whoosh index")
def delete_index(self):
# Per the Whoosh mailing list, if wiping out everything from the index,
# it's much more efficient to simply delete the index files.
if self.use_file_storage and os.path.exists(self.path):
shutil.rmtree(self.path)
elif not self.use_file_storage:
self.storage.clean()
# Recreate everything.
self.setup()
def optimize(self):
if not self.setup_complete:
self.setup()
self.index = self.index.refresh()
self.index.optimize()
def calculate_page(self, start_offset=0, end_offset=None):
# Prevent against Whoosh throwing an error. Requires an end_offset
# greater than 0.
if end_offset is not None and end_offset <= 0:
end_offset = 1
# Determine the page.
page_num = 0
if end_offset is None:
end_offset = 1000000
if start_offset is None:
start_offset = 0
page_length = end_offset - start_offset
if page_length and page_length > 0:
page_num = int(start_offset / page_length)
# Increment because Whoosh uses 1-based page numbers.
page_num += 1
return page_num, page_length
@log_query
def search(
self,
query_string,
sort_by=None,
start_offset=0,
end_offset=None,
fields="",
highlight=False,
facets=None,
date_facets=None,
query_facets=None,
narrow_queries=None,
spelling_query=None,
within=None,
dwithin=None,
distance_point=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**kwargs
):
if not self.setup_complete:
self.setup()
# A zero length query should return no results.
if len(query_string) == 0:
return {"results": [], "hits": 0}
query_string = force_str(query_string)
# A one-character query (non-wildcard) gets nabbed by a stopwords
# filter and should yield zero results.
if len(query_string) <= 1 and query_string != "*":
return {"results": [], "hits": 0}
reverse = False
if sort_by is not None:
# Determine if we need to reverse the results and if Whoosh can
# handle what it's being asked to sort by. Reversing is an
# all-or-nothing action, unfortunately.
sort_by_list = []
reverse_counter = 0
for order_by in sort_by:
if order_by.startswith("-"):
reverse_counter += 1
if reverse_counter and reverse_counter != len(sort_by):
raise SearchBackendError(
"Whoosh requires all order_by fields"
" to use the same sort direction"
)
for order_by in sort_by:
if order_by.startswith("-"):
sort_by_list.append(order_by[1:])
if len(sort_by_list) == 1:
reverse = True
else:
sort_by_list.append(order_by)
if len(sort_by_list) == 1:
reverse = False
sort_by = sort_by_list
group_by = []
facet_types = {}
if facets is not None:
group_by += [
FieldFacet(facet, allow_overlap=True, maptype=Count) for facet in facets
]
facet_types.update(dict.fromkeys(facets, "fields"))
if date_facets is not None:
def _fixup_datetime(dt):
if isinstance(dt, datetime):
return dt
if isinstance(dt, date):
return datetime(dt.year, dt.month, dt.day)
raise ValueError
for key, value in date_facets.items():
start = _fixup_datetime(value["start_date"])
end = _fixup_datetime(value["end_date"])
gap_by = value["gap_by"]
gap_amount = value.get("gap_amount", 1)
gap = RelativeDelta(**{"%ss" % gap_by: gap_amount})
group_by.append(DateRangeFacet(key, start, end, gap, maptype=Count))
facet_types[key] = "dates"
if query_facets is not None:
warnings.warn(
"Whoosh does not handle query faceting.", Warning, stacklevel=2
)
narrowed_results = None
self.index = self.index.refresh()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add(
" OR ".join(["%s:%s" % (DJANGO_CT, rm) for rm in model_choices])
)
narrow_searcher = None
if narrow_queries is not None:
# Potentially expensive? I don't see another way to do it in Whoosh...
narrow_searcher = self.index.searcher()
for nq in narrow_queries:
recent_narrowed_results = narrow_searcher.search(
self.parser.parse(force_str(nq)), limit=None
)
if len(recent_narrowed_results) <= 0:
return {"results": [], "hits": 0}
if narrowed_results is not None:
narrowed_results.filter(recent_narrowed_results)
else:
narrowed_results = recent_narrowed_results
self.index = self.index.refresh()
if self.index.doc_count():
searcher = self.index.searcher()
parsed_query = self.parser.parse(query_string)
# In the event of an invalid/stopworded query, recover gracefully.
if parsed_query is None:
return {"results": [], "hits": 0}
page_num, page_length = self.calculate_page(start_offset, end_offset)
search_kwargs = {
"pagelen": page_length,
"sortedby": sort_by,
"reverse": reverse,
"groupedby": group_by,
}
# Handle the case where the results have been narrowed.
if narrowed_results is not None:
search_kwargs["filter"] = narrowed_results
try:
raw_page = searcher.search_page(parsed_query, page_num, **search_kwargs)
except ValueError:
if not self.silently_fail:
raise
return {"results": [], "hits": 0, "spelling_suggestion": None}
# Because as of Whoosh 2.5.1, it will return the wrong page of
# results if you request something too high. :(
if raw_page.pagenum < page_num:
return {"results": [], "hits": 0, "spelling_suggestion": None}
results = self._process_results(
raw_page,
highlight=highlight,
query_string=query_string,
spelling_query=spelling_query,
result_class=result_class,
facet_types=facet_types,
)
searcher.close()
if hasattr(narrow_searcher, "close"):
narrow_searcher.close()
return results
else:
if self.include_spelling:
if spelling_query:
spelling_suggestion = self.create_spelling_suggestion(
spelling_query
)
else:
spelling_suggestion = self.create_spelling_suggestion(query_string)
else:
spelling_suggestion = None
return {
"results": [],
"hits": 0,
"spelling_suggestion": spelling_suggestion,
}
def more_like_this(
self,
model_instance,
additional_query_string=None,
start_offset=0,
end_offset=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**kwargs
):
if not self.setup_complete:
self.setup()
field_name = self.content_field_name
narrow_queries = set()
narrowed_results = None
self.index = self.index.refresh()
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
if narrow_queries is None:
narrow_queries = set()
narrow_queries.add(
" OR ".join(["%s:%s" % (DJANGO_CT, rm) for rm in model_choices])
)
if additional_query_string and additional_query_string != "*":
narrow_queries.add(additional_query_string)
narrow_searcher = None
if narrow_queries is not None:
# Potentially expensive? I don't see another way to do it in Whoosh...
narrow_searcher = self.index.searcher()
for nq in narrow_queries:
recent_narrowed_results = narrow_searcher.search(
self.parser.parse(force_str(nq)), limit=None
)
if len(recent_narrowed_results) <= 0:
return {"results": [], "hits": 0}
if narrowed_results:
narrowed_results.filter(recent_narrowed_results)
else:
narrowed_results = recent_narrowed_results
page_num, page_length = self.calculate_page(start_offset, end_offset)
self.index = self.index.refresh()
raw_results = EmptyResults()
searcher = None
if self.index.doc_count():
query = "%s:%s" % (ID, get_identifier(model_instance))
searcher = self.index.searcher()
parsed_query = self.parser.parse(query)
results = searcher.search(parsed_query)
if len(results):
raw_results = results[0].more_like_this(field_name, top=end_offset)
# Handle the case where the results have been narrowed.
if narrowed_results is not None and hasattr(raw_results, "filter"):
raw_results.filter(narrowed_results)
try:
raw_page = ResultsPage(raw_results, page_num, page_length)
except ValueError:
if not self.silently_fail:
raise
return {"results": [], "hits": 0, "spelling_suggestion": None}
# Because as of Whoosh 2.5.1, it will return the wrong page of
# results if you request something too high. :(
if raw_page.pagenum < page_num:
return {"results": [], "hits": 0, "spelling_suggestion": None}
results = self._process_results(raw_page, result_class=result_class)
if searcher:
searcher.close()
if hasattr(narrow_searcher, "close"):
narrow_searcher.close()
return results
def _process_results(
self,
raw_page,
highlight=False,
query_string="",
spelling_query=None,
result_class=None,
facet_types=None,
):
from haystack import connections
results = []
# It's important to grab the hits first before slicing. Otherwise, this
# can cause pagination failures.
hits = len(raw_page)
if result_class is None:
result_class = SearchResult
spelling_suggestion = None
unified_index = connections[self.connection_alias].get_unified_index()
indexed_models = unified_index.get_indexed_models()
facets = {}
if facet_types:
facets = {
"fields": {},
"dates": {},
"queries": {},
}
for facet_fieldname in raw_page.results.facet_names():
group = raw_page.results.groups(facet_fieldname)
facet_type = facet_types[facet_fieldname]
# Extract None item for later processing, if present.
none_item = group.pop(None, None)
lst = facets[facet_type][facet_fieldname] = sorted(
group.items(), key=(lambda itm: (-itm[1], itm[0]))
)
if none_item is not None:
# Inject None item back into the results.
none_entry = (None, none_item)
if not lst or lst[-1][1] >= none_item:
lst.append(none_entry)
else:
for i, value in enumerate(lst):
if value[1] < none_item:
lst.insert(i, none_entry)
break
for doc_offset, raw_result in enumerate(raw_page):
score = raw_page.score(doc_offset) or 0
app_label, model_name = raw_result[DJANGO_CT].split(".")
additional_fields = {}
model = haystack_get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in raw_result.items():
index = unified_index.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(
index.fields[string_key], "convert"
):
# Special-cased due to the nature of KEYWORD fields.
if index.fields[string_key].is_multivalued:
if value is None or len(value) == 0:
additional_fields[string_key] = []
else:
additional_fields[string_key] = value.split(",")
else:
additional_fields[string_key] = index.fields[
string_key
].convert(value)
else:
additional_fields[string_key] = self._to_python(value)
del additional_fields[DJANGO_CT]
del additional_fields[DJANGO_ID]
if highlight:
sa = StemmingAnalyzer()
formatter = WhooshHtmlFormatter("em")
terms = [token.text for token in sa(query_string)]
whoosh_result = whoosh_highlight(
additional_fields.get(self.content_field_name),
terms,
sa,
ContextFragmenter(),
formatter,
)
additional_fields["highlighted"] = {
self.content_field_name: [whoosh_result]
}
result = result_class(
app_label,
model_name,
raw_result[DJANGO_ID],
score,
**additional_fields
)
results.append(result)
else:
hits -= 1
if self.include_spelling:
if spelling_query:
spelling_suggestion = self.create_spelling_suggestion(spelling_query)
else:
spelling_suggestion = self.create_spelling_suggestion(query_string)
return {
"results": results,
"hits": hits,
"facets": facets,
"spelling_suggestion": spelling_suggestion,
}
def create_spelling_suggestion(self, query_string):
spelling_suggestion = None
reader = self.index.reader()
corrector = reader.corrector(self.content_field_name)
cleaned_query = force_str(query_string)
if not query_string:
return spelling_suggestion
# Clean the string.
for rev_word in self.RESERVED_WORDS:
cleaned_query = cleaned_query.replace(rev_word, "")
for rev_char in self.RESERVED_CHARACTERS:
cleaned_query = cleaned_query.replace(rev_char, "")
# Break it down.
query_words = cleaned_query.split()
suggested_words = []
for word in query_words:
suggestions = corrector.suggest(word, limit=1)
if len(suggestions) > 0:
suggested_words.append(suggestions[0])
spelling_suggestion = " ".join(suggested_words)
return spelling_suggestion
def _from_python(self, value):
"""
Converts Python values to a string for Whoosh.
Code courtesy of pysolr.
"""
if hasattr(value, "strftime"):
if not hasattr(value, "hour"):
value = datetime(value.year, value.month, value.day, 0, 0, 0)
elif isinstance(value, bool):
if value:
value = "true"
else:
value = "false"
elif isinstance(value, (list, tuple)):
value = ",".join([force_str(v) for v in value])
elif isinstance(value, (int, float)):
# Leave it alone.
pass
else:
value = force_str(value)
return value
def _to_python(self, value):
"""
Converts values from Whoosh to native Python values.
A port of the same method in pysolr, as they deal with data the same way.
"""
if value == "true":
return True
elif value == "false":
return False
if value and isinstance(value, str):
possible_datetime = DATETIME_REGEX.search(value)
if possible_datetime:
date_values = possible_datetime.groupdict()
for dk, dv in date_values.items():
date_values[dk] = int(dv)
return datetime(
date_values["year"],
date_values["month"],
date_values["day"],
date_values["hour"],
date_values["minute"],
date_values["second"],
)
try:
# Attempt to use json to load the values.
converted_value = json.loads(value)
# Try to handle most built-in types.
if isinstance(
converted_value,
(list, tuple, set, dict, int, float, complex),
):
return converted_value
except Exception:
# If it fails (SyntaxError or its ilk) or we don't trust it,
# continue on.
pass
return value
| WhooshSearchBackend |
python | scipy__scipy | scipy/special/tests/test_gammainc.py | {
"start": 266,
"end": 2678
} | class ____:
@pytest.mark.parametrize('a, x', INVALID_POINTS)
def test_domain(self, a, x):
assert np.isnan(sc.gammainc(a, x))
def test_a_eq_0_x_gt_0(self):
assert sc.gammainc(0, 1) == 1
@pytest.mark.parametrize('a, x, desired', [
(np.inf, 1, 0),
(np.inf, 0, 0),
(np.inf, np.inf, np.nan),
(1, np.inf, 1)
])
def test_infinite_arguments(self, a, x, desired):
result = sc.gammainc(a, x)
if np.isnan(desired):
assert np.isnan(result)
else:
assert result == desired
@pytest.mark.parametrize("x", [-np.inf, -1.0, -0.0, 0.0, np.inf, np.nan])
def test_a_nan(self, x):
assert np.isnan(sc.gammainc(np.nan, x))
@pytest.mark.parametrize("a", [-np.inf, -1.0, -0.0, 0.0, np.inf, np.nan])
def test_x_nan(self, a):
assert np.isnan(sc.gammainc(a, np.nan))
def test_infinite_limits(self):
# Test that large arguments converge to the hard-coded limits
# at infinity.
assert_allclose(
sc.gammainc(1000, 100),
sc.gammainc(np.inf, 100),
atol=1e-200, # Use `atol` since the function converges to 0.
rtol=0
)
assert sc.gammainc(100, 1000) == sc.gammainc(100, np.inf)
def test_x_zero(self):
a = np.arange(1, 10)
assert_array_equal(sc.gammainc(a, 0), 0)
def test_limit_check(self):
result = sc.gammainc(1e-10, 1)
limit = sc.gammainc(0, 1)
assert np.isclose(result, limit)
def gammainc_line(self, x):
# The line a = x where a simpler asymptotic expansion (analog
# of DLMF 8.12.15) is available.
c = np.array([-1/3, -1/540, 25/6048, 101/155520,
-3184811/3695155200, -2745493/8151736420])
res = 0
xfac = 1
for ck in c:
res -= ck*xfac
xfac /= x
res /= np.sqrt(2*np.pi*x)
res += 0.5
return res
def test_line(self):
x = np.logspace(np.log10(25), 300, 500)
a = x
dataset = np.vstack((a, x, self.gammainc_line(x))).T
FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-11).check()
def test_roundtrip(self):
a = np.logspace(-5, 10, 100)
x = np.logspace(-5, 10, 100)
y = sc.gammaincinv(a, sc.gammainc(a, x))
assert_allclose(x, y, rtol=1e-10)
| TestGammainc |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_scalar_ctors.py | {
"start": 637,
"end": 1706
} | class ____(TestCase):
@xpassIfTorchDynamo_np # (reason="XXX: floats from strings")
def test_floating(self):
# Ticket #640, floats from string
fsingle = np.single("1.234")
fdouble = np.double("1.234")
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
@xpassIfTorchDynamo_np # (reason="XXX: floats from strings")
def test_floating_overflow(self):
"""Strings containing an unrepresentable float overflow"""
fhalf = np.half("1e10000")
assert_equal(fhalf, np.inf)
fsingle = np.single("1e10000")
assert_equal(fsingle, np.inf)
fdouble = np.double("1e10000")
assert_equal(fdouble, np.inf)
fhalf = np.half("-1e10000")
assert_equal(fhalf, -np.inf)
fsingle = np.single("-1e10000")
assert_equal(fsingle, -np.inf)
fdouble = np.double("-1e10000")
assert_equal(fdouble, -np.inf)
def test_bool(self):
with pytest.raises(TypeError):
np.bool_(False, garbage=True)
| TestFromString |
python | realpython__materials | python-unittest/stack.py | {
"start": 0,
"end": 399
} | class ____:
def __init__(self, items=None):
self.items = list(items) if items is not None else []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
def __reversed__(self):
return reversed(self.items)
| Stack |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_cross_op_test.py | {
"start": 37662,
"end": 45796
} | class ____(BaseSparseCrossOpTest):
@test_util.run_deprecated_v1
def test_hashed_zero_bucket_no_hash_key(self):
sp_inp_1 = self._sparse_tensor([['batch1-FC1-F1']])
sp_inp_2 = self._sparse_tensor([['batch1-FC2-F1']])
sp_inp_3 = self._sparse_tensor([['batch1-FC3-F1']])
inds, vals, shapes = gen_sparse_ops.sparse_cross_hashed(
indices=[sp_inp_1.indices, sp_inp_2.indices, sp_inp_3.indices],
values=[sp_inp_1.values, sp_inp_2.values, sp_inp_3.values],
shapes=[
sp_inp_1.dense_shape, sp_inp_2.dense_shape, sp_inp_3.dense_shape
],
dense_inputs=[],
num_buckets=0,
salt=[1, 1],
strong_hash=False)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[9186962005966787372]])
out = sparse_tensor.SparseTensor(inds, vals, shapes)
with self.cached_session():
self._assert_sparse_tensor_equals(expected_out, self.evaluate(out))
# salt is not being used when `strong_hash` is False.
inds_2, vals_2, shapes_2 = gen_sparse_ops.sparse_cross_hashed(
indices=[sp_inp_1.indices, sp_inp_2.indices, sp_inp_3.indices],
values=[sp_inp_1.values, sp_inp_2.values, sp_inp_3.values],
shapes=[
sp_inp_1.dense_shape, sp_inp_2.dense_shape, sp_inp_3.dense_shape
],
dense_inputs=[],
num_buckets=0,
salt=[137, 173],
strong_hash=False)
out_2 = sparse_tensor.SparseTensor(inds_2, vals_2, shapes_2)
with self.cached_session():
self._assert_sparse_tensor_equals(expected_out, self.evaluate(out_2))
@test_util.run_deprecated_v1
def test_hashed_output(self):
sp_inp_1 = self._sparse_tensor([['batch1-FC1-F1']])
sp_inp_2 = self._sparse_tensor([['batch1-FC2-F1']])
sp_inp_3 = self._sparse_tensor([['batch1-FC3-F1']])
inds, vals, shapes = gen_sparse_ops.sparse_cross_hashed(
indices=[sp_inp_1.indices, sp_inp_2.indices, sp_inp_3.indices],
values=[sp_inp_1.values, sp_inp_2.values, sp_inp_3.values],
shapes=[
sp_inp_1.dense_shape, sp_inp_2.dense_shape, sp_inp_3.dense_shape
],
dense_inputs=[],
num_buckets=100,
salt=[137, 173],
strong_hash=False)
# Check actual hashed output to prevent unintentional hashing changes.
expected_out = self._sparse_tensor([[79]])
out = sparse_tensor.SparseTensor(inds, vals, shapes)
with self.cached_session():
self._assert_sparse_tensor_equals(expected_out, self.evaluate(out))
@test_util.run_deprecated_v1
def test_hashed_has_no_collision(self):
"""Tests that fingerprint concatenation has no collisions."""
# Although the last 10 bits of 359 and 1024+359 are identical.
# As a result, all the crosses shouldn't collide.
t1 = constant_op.constant([[359], [359 + 1024]], dtype=dtypes.int64)
t2 = constant_op.constant(
[list(range(10)), list(range(10))], dtype=dtypes.int64)
inds, vals, shapes = gen_sparse_ops.sparse_cross_hashed(
indices=[],
values=[],
shapes=[],
dense_inputs=[t2, t1],
num_buckets=1024,
salt=[137, 173],
strong_hash=False)
cross = sparse_tensor.SparseTensor(inds, vals, shapes)
cross_dense = sparse_ops.sparse_tensor_to_dense(cross)
with session.Session():
values = self.evaluate(cross_dense)
self.assertTrue(numpy.not_equal(values[0], values[1]).all())
def test_hashed_3x1x2(self):
"""Tests 3x1x2 permutation with hashed output."""
sp_inp_1 = self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']])
sp_inp_2 = self._sparse_tensor([['batch1-FC2-F1']])
sp_inp_3 = self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
inds, vals, shapes = gen_sparse_ops.sparse_cross_hashed(
indices=[sp_inp_1.indices, sp_inp_2.indices, sp_inp_3.indices],
values=[sp_inp_1.values, sp_inp_2.values, sp_inp_3.values],
shapes=[
sp_inp_1.dense_shape, sp_inp_2.dense_shape, sp_inp_3.dense_shape
],
dense_inputs=[],
num_buckets=1000,
salt=[137, 173],
strong_hash=False)
output = sparse_tensor.SparseTensor(inds, vals, shapes)
with self.cached_session():
out = self.evaluate(output)
self.assertEqual(6, len(out.values))
self.assertAllEqual([[0, i] for i in range(6)], out.indices)
self.assertTrue(all(x < 1000 and x >= 0 for x in out.values))
all_values_are_different = len(out.values) == len(set(out.values))
self.assertTrue(all_values_are_different)
def test_hashed_different_salt(self):
sp_inp_1 = self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']])
sp_inp_2 = self._sparse_tensor([['batch1-FC2-F1']])
sp_inp_3 = self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
inds, vals, shapes = gen_sparse_ops.sparse_cross_hashed(
indices=[sp_inp_1.indices, sp_inp_2.indices, sp_inp_3.indices],
values=[sp_inp_1.values, sp_inp_2.values, sp_inp_3.values],
shapes=[
sp_inp_1.dense_shape, sp_inp_2.dense_shape, sp_inp_3.dense_shape
],
dense_inputs=[],
strong_hash=False,
num_buckets=1000,
salt=[137, 173])
output = sparse_tensor.SparseTensor(inds, vals, shapes)
inds_2, vals_2, shapes_2 = gen_sparse_ops.sparse_cross_hashed(
indices=[sp_inp_1.indices, sp_inp_2.indices, sp_inp_3.indices],
values=[sp_inp_1.values, sp_inp_2.values, sp_inp_3.values],
shapes=[
sp_inp_1.dense_shape, sp_inp_2.dense_shape, sp_inp_3.dense_shape
],
dense_inputs=[],
strong_hash=True,
num_buckets=1000,
salt=[137, 1])
output_2 = sparse_tensor.SparseTensor(inds_2, vals_2, shapes_2)
with self.cached_session():
out = self.evaluate(output)
out_2 = self.evaluate(output_2)
self.assertAllEqual(out.indices, out_2.indices)
self.assertNotAllEqual(out.values, out_2.values)
def test_sep_ignored_in_hashed_out(self):
sp_inp_1 = self._sparse_tensor(
[['batch1-FC1-F1', 'batch1-FC1-F2', 'batch1-FC1-F3']])
sp_inp_2 = self._sparse_tensor([['batch1-FC2-F1']])
sp_inp_3 = self._sparse_tensor([['batch1-FC3-F1', 'batch1-FC3-F2']])
inds, vals, shapes = gen_sparse_ops.sparse_cross_hashed(
indices=[sp_inp_1.indices, sp_inp_2.indices, sp_inp_3.indices],
values=[sp_inp_1.values, sp_inp_2.values, sp_inp_3.values],
shapes=[
sp_inp_1.dense_shape, sp_inp_2.dense_shape, sp_inp_3.dense_shape
],
dense_inputs=[],
strong_hash=True,
num_buckets=1000,
salt=[137, 173])
output = sparse_tensor.SparseTensor(inds, vals, shapes)
inds_2, vals_2, shapes_2 = gen_sparse_ops.sparse_cross_hashed(
indices=[sp_inp_1.indices, sp_inp_2.indices, sp_inp_3.indices],
values=[sp_inp_1.values, sp_inp_2.values, sp_inp_3.values],
shapes=[
sp_inp_1.dense_shape, sp_inp_2.dense_shape, sp_inp_3.dense_shape
],
dense_inputs=[],
strong_hash=True,
num_buckets=1000,
salt=[137, 173])
output_2 = sparse_tensor.SparseTensor(inds_2, vals_2, shapes_2)
with self.cached_session():
out = self.evaluate(output)
out_2 = self.evaluate(output_2)
self.assertAllEqual(out.indices, out_2.indices)
self.assertAllEqual(out.values, out_2.values)
def test_sparse_cross_hashed_empty_seed(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError, r'Input "salt" must have length 2'
):
indices = []
values = []
shapes = []
dense_inputs = []
num_buckets = -461
salt = []
strong_hash = False
op = gen_sparse_ops.sparse_cross_hashed(
indices=indices,
values=values,
shapes=shapes,
dense_inputs=dense_inputs,
num_buckets=num_buckets,
salt=salt,
strong_hash=strong_hash,
)
self.evaluate(op)
if __name__ == '__main__':
test.main()
| SparseCrossHashedOpTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 57147,
"end": 57464
} | class ____(ReturnTypeFromArgs[_T]): # noqa: A001
"""The SQL POW() function which performs the power operator.
E.g.:
.. sourcecode:: pycon+sql
>>> print(select(func.pow(2, 8)))
{printsql}SELECT pow(:pow_2, :pow_3) AS pow_1
.. versionadded:: 2.1
"""
inherit_cache = True
| pow |
python | joke2k__faker | faker/providers/job/ko_KR/__init__.py | {
"start": 181,
"end": 9694
} | class ____(BaseProvider):
jobs = [
"의회의원/고위공무원 및 공공단체임원",
"기업고위임원",
"정부행정 관리자",
"경영지원 관리자",
"기타 행정 및 경영지원 관리자",
"연구 관리자",
"교육 관리자",
"법률/경찰/소방 및 교도 관리자",
"보험 및 금융 관리자",
"보건의료관련 관리자",
"사회복지관련 관리자",
"문화/예술/디자인 및 영상관련 관리자",
"정보통신관련 관리자",
"기타 전문서비스 관리자",
"건설 및 광업 관련 관리자",
"전기/가스 및 수도 관련 관리자",
"제품 생산관련 관리자",
"기타 건설/전기 및 생산 관련 관리자",
"영업 및 판매 관련 관리자",
"운송관련 관리자",
"숙박/여행/오락 및 스포츠 관련 관리자",
"음식서비스관련 관리자",
"환경/청소 및 경비 관련 관리자",
"기타 판매 및 고객 서비스 관리자",
"생명과학 연구원",
"자연과학 연구원",
"인문과학 연구원",
"사회과학 연구원",
"생명과학 시험원",
"농림어업관련 시험원",
"자연과학 시험원",
"컴퓨터 하드웨어 기술자 및 연구원",
"통신공학 기술자 및 연구원",
"컴퓨터시스템 설계 및 분석가",
"시스템 소프트웨어 개발자",
"응용 소프트웨어 개발자",
"데이터베이스 개발자",
"네트워크시스템 개발자",
"컴퓨터 보안 전문가",
"웹 및 멀티미디어 기획자",
"웹 개발자",
"정보 시스템 운영자",
"통신 및 방송송출 장비 기사",
"건축가 및 건축공학 기술자",
"토목공학 기술자",
"조경 기술자",
"도시 및 교통설계 전문가",
"측량 및 지리정보 전문가",
"건설자재 시험원",
"화학공학 기술자 및 연구원",
"화학공학 시험원",
"금속 / 재료공학 연구원 및 기술자",
"금속 / 재료공학 시험원",
"환경공학 기술자 및 연구원",
"환경공학 시험원",
"전기공학 기술자 및 연구원",
"전자공학 기술자 및 연구원",
"기계공학 기술자 및 연구원",
"전기/전자 및 기계 공학 시험원",
"산업안전 및 위험 관리원",
"보건위생 및 환경 검사원",
"비파괴 검사원",
"항공기 조종사",
"선장/항해사 및 도선사",
"관제사",
"식품공학 기술자 및 연구원",
"섬유공학 기술자 및 연구원",
"가스/에너지 기술자 및 연구원",
"소방공학 기술자 및 연구원",
"식품/섬유 공학 및 에너지 시험원",
"캐드원",
"기타 공학관련 기술자 및 시험원",
"전문 의사",
"일반 의사",
"한의사",
"치과 의사",
"수의사",
"약사 및 한약사",
"간호사",
"영양사",
"임상병리사",
"방사선사",
"치과기공사",
"치과위생사",
"의지보조기기사",
"물리 및 작업 치료사",
"임상 심리사 및 기타 치료사",
"응급구조사",
"위생사",
"안경사",
"의무기록사",
"간호조무사",
"안마사",
"사회복지사",
"보육 교사",
"직업상담사 및 취업 알선원",
"상담 전문가 및 청소년 지도사",
"시민 단체 활동가",
"기타 사회복지관련 종사원",
"성직자",
"기타 종교관련 종사자",
"대학 교수",
"대학 시간강사",
"중/고등학교 교사",
"초등학교 교사",
"특수교육 교사",
"유치원 교사",
"문리 및 어학 강사",
"컴퓨터 강사",
"기술 및 기능계 강사",
"예능 강사",
"학습지 및 방문 교사",
"기타 문리/기술 및 예능 강사",
"장학관/연구관 및 교육 관련 전문가",
"대학 교육조교",
"보조 교사 및 기타 교사",
"판사 및 검사",
"변호사",
"법무사 및 집행관",
"변리사",
"정부 및 공공 행정 전문가",
"인사 및 노사 관련 전문가",
"회계사",
"세무사",
"관세사",
"경영 및 진단 전문가",
"투자 및 신용 분석가",
"자산 운용가",
"보험 및 금융 상품 개발자",
"증권 및 외환 딜러",
"손해사정인",
"기타 금융 및 보험 관련 전문가",
"상품기획 전문가",
"여행상품 개발자",
"광고 및 홍보 전문가",
"조사 전문가",
"행사기획자",
"감정평가 전문가",
"해외 영업원",
"기술 영업원",
"상품중개인 및 경매사",
"부동산 컨설턴트 및 중개인",
"기타 기술영업 및 중개 관련 종사자",
"작가 및 관련 전문가",
"번역가",
"통역가",
"기자 및 논설위원",
"출판물 전문가",
"큐레이터 및 문화재 보존원",
"사서 및 기록물관리사",
"감독 및 기술감독",
"배우 및 모델",
"아나운서 및 리포터",
"촬영기사",
"음향 및 녹음 기사",
"영상/녹화 및 편집 기사",
"조명기사 및 영사기사",
"기타 연극/영화 및 영상 관련 종사자",
"화가 및 조각가",
"사진기자 및 사진가",
"만화가 및 만화영화 작가",
"국악 및 전통예능인",
"지휘자/작곡가 및 연주가",
"가수 및 성악가",
"무용가 및 안무가",
"제품 디자이너",
"패션 디자이너",
"실내장식 디자이너",
"시각 디자이너",
"웹 및 멀티미디어 디자이너",
"경기감독 및 코치",
"직업 운동선수",
"경기심판 및 경기기록원",
"스포츠 및 레크레이션 강사",
"기타 스포츠 및 레크레이션 관련 전문가",
"연예인 및 스포츠 매니저",
"마술사 및 기타 문화/ 예술 관련 종사자",
"조세행정 사무원",
"관세행정 사무원",
"병무행정 사무원",
"국가/지방 및 공공행정 사무원",
"기획 및 마케팅 사무원",
"인사 및 교육/훈련 사무원",
"자재관리 사무원",
"생산 및 품질 관리 사무원",
"무역 사무원",
"운송 사무원",
"총무 사무원",
"회계 사무원",
"경리 사무원",
"비서",
"전산 자료 입력원 및 사무 보조원",
"출납창구 사무원",
"보험 심사원 및 사무원",
"금융관련 사무원",
"신용 추심원",
"법률관련 사무원",
"감사 사무원",
"통계관련 사무원",
"여행 사무원",
"안내 / 접수 사무원 및 전화교환원",
"고객 상담 및 모니터 요원",
"기타 사무원",
"경찰관",
"소방관",
"소년보호관 및 교도관",
"경호원",
"청원 경찰",
"무인 경비원",
"기타 경호 및 보안 관련 종사원",
"간병인",
"기타 의료/복지 관련 서비스 종사원",
"이용사",
"미용사",
"피부미용 및 체형관리사",
"메이크업 아티스트 및 분장사",
"애완동물 미용사",
"기타 미용관련 서비스 종사원",
"결혼 상담원 및 웨딩플래너",
"혼례 종사원",
"장례 상담원 및 장례 지도사",
"기타 이미용/예식 및 의료보조 서비스 종사원",
"항공기 객실승무원",
"선박 및 열차 객실승무원",
"여행 및 관광통역 안내원",
"숙박시설 서비스원",
"오락시설 서비스원",
"기타 여가 및 스포츠 관련 종사원",
"한식 주방장 및 조리사",
"중식 주방장 및 조리사",
"양식 주방장 및 조리사",
"일식 주방장 및 조리사",
"기타 주방장 및 조리사",
"바텐더",
"웨이터",
"기타 음식서비스 종사원",
"자동차 영업원",
"제품 및 광고 영업원",
"보험 설계사 및 간접투자증권 판매인",
"상점 판매원",
"매표원 및 복권 판매원",
"매장계산원 및 요금정산원",
"상품 대여원",
"방문 판매원",
"통신서비스판매원",
"텔레마케터",
"인터넷 판매원",
"노점 및 이동 판매원",
"홍보 도우미 및 판촉원",
"곡식작물 재배원",
"채소 및 특용작물 재배원",
"과수작물 재배원",
"원예작물 재배원",
"조경원",
"낙농업관련 종사원",
"가축 사육 종사원",
"기타 사육관련 종사원",
"조림/영림 및 벌목원",
"임산물채취 및 기타 임업 관련 종사원",
"양식원",
"어부 및 해녀",
"제빵원 및 제과원",
"떡제조원",
"정육원 및 도축원",
"식품 및 담배 등급원",
"김치 및 밑반찬 제조 종사원",
"기타 식품가공관련 종사원",
"패턴사",
"재단사",
"재봉사",
"제화원",
"기타 섬유 및 가죽 관련 기능 종사원",
"한복 제조원",
"양장 및 양복 제조원",
"모피 및 가죽의복 제조원",
"의복/가죽 및 모피 수선원",
"기타 의복 제조원",
"목제품 제조관련 종사원",
"가구 제조 및 수리원",
"악기제조 및 조율사",
"간판 제작 및 설치원",
"금형원",
"주조원",
"단조원",
"제관원",
"판금원",
"용접원",
"자동차 정비원",
"항공기 정비원",
"선박 정비원",
"철도 기관차 및 전동차 정비원",
"기타 운송장비 정비원",
"공업기계 설치 및 정비원",
"승강기 설치 및 정비원",
"물품 이동 장비 설치 및 정비원",
"냉동/냉장 /공조기 설치 및 정비원",
"보일러 설치 및 정비원",
"건설 및 광업기계 설치 및 정비원",
"농업용 및 기타 기계장비 설치 및 정비원",
"가전제품 설치 및 수리원",
"기타 전기/전자기기 설치 및 수리원",
"산업전공",
"내선전공",
"외선전공",
"강구조물 가공원 및 건립원",
"경량 철골공",
"철근공",
"콘크리트공",
"건축 석공",
"건축 목공",
"조적공 및 석재 부설원",
"기타 건설관련 기능 종사원",
"미장공",
"방수공",
"단열공",
"바닥재 시공원",
"도배공 및 유리 부착원",
"건축 도장공",
"섀시 조립 및 설치원",
"기타 건축마감관련 기능 종사원",
"광원/채석원 및 석재 절단원",
"철로 설치 및 보수원",
"기타 채굴 및 토목 관련 종사자",
"영상 및 관련 장비 설치 및 수리원",
"통신 및 관련 장비 설치 및 수리원",
"통신/방송 및 인터넷 케이블 설치 및 수리원",
"공예원",
"귀금속 및 보석 세공원",
"건설 배관공",
"공업 배관공",
"기타 배관공",
"배관 세정원 및 방역원",
"기타 기능관련 종사원",
"제분 및 도정 관련 기계 조작원",
"곡물가공제품 기계 조작원",
"육류/어패류 및 낙농품 가공 기계조작원",
"과실 및 채소 관련 기계조작원",
"음료 제조관련 기계 조작원",
"기타 식품가공관련 기계조작원",
"섬유제조 기계조작원",
"표백 및 염색 관련 조작원",
"직조기 및 편직기 조작원",
"신발제조기 조작원 및 조립원",
"기타 직물 및 신발 관련 기계조작원 및 조립원",
"세탁관련 기계조작원",
"석유 및 천연가스제조 관련 제어장치 조작원",
"화학물 가공장치 조작원",
"기타 석유 및 화학물 가공장치 조작원",
"화학제품 생산기 조작원",
"타이어 및 고무제품 생산기 조작원",
"플라스틱제품 생산기 조작원",
"고무 및 플라스틱 제품 조립원",
"주조기 조작원",
"단조기 조작원",
"용접기 조작원",
"금속가공관련 제어장치 조작원",
"금속가공 기계조작원",
"제관기 조작원",
"판금기 조작원",
"도장기 조작원",
"도금 및 금속분무기 조작원",
"유리제조 및 가공기 조작원",
"점토제품 생산기 조작원",
"시멘트 및 광물제품 제조기 조작원",
"광석 및 석제품 가공기 조작원",
"기타 비금속제품관련 생산기 조작원",
"금속공작기계 조작원",
"냉/난방 관련 설비 조작원",
"자동조립라인 및 산업용 로봇 조작원",
"자동차 조립원",
"자동차 부분품 조립원",
"운송장비 조립원",
"일반기계 조립원",
"금속기계부품 조립원",
"발전 및 배전장치 조작원",
"전기 및 전자 설비 조작원",
"전기 부품 및 제품제조 기계조작원",
"전자 부품 및 제품 제조 기계조작원",
"전기/전자 부품 및 제품 조립원",
"철도 및 전동차 기관사",
"화물열차 차장 및 관련 종사원",
"택시 운전원",
"버스 운전원",
"화물차 및 특수차 운전원",
"기타 자동차 운전원",
"물품이동 장비 조작원",
"건설 및 채굴 기계 운전원",
"선박 갑판승무원 및 관련 종사원",
"상/하수도 처리장치 조작원",
"재활용 처리 및 소각로 조작원",
"목재 가공관련 기계 조작원",
"가구조립원",
"펄프 및 종이 제조장치 조작원",
"종이제품 생산기 조작원",
"기타 목재 및 종이 관련 기계조작원",
"인쇄기 조작원",
"사진인화 및 현상기 조작원",
"기타 제조관련 기계 조작원",
"건설 및 광업 단순 종사원",
"하역 및 적재 단순 종사원",
"우편물 집배원",
"택배원",
"음식 배달원",
"기타 배달원",
"제조관련 단순 종사원",
"청소원",
"환경 미화원 및 재활용품 수거원",
"경비원",
"검표원",
"가사 도우미",
"육아 도우미",
"패스트푸드원",
"주방 보조원",
"주유원",
"기타 판매관련 단순 종사원",
"농림어업관련 단순 종사원",
"계기 검침원 및 가스점검원",
"수금원",
"주차 관리원 및 안내원",
"구두 미화원",
"세탁원 및 다림질원",
"기타 서비스관련 단순 종사원",
"영관급 이상",
"위관급",
"장기 부사관 및 준위",
]
| Provider |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 13135,
"end": 13563
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "ScheduleNotFoundError"
schedule_name = graphene.NonNull(graphene.String)
def __init__(self, schedule_name):
super().__init__()
self.schedule_name = check.str_param(schedule_name, "schedule_name")
self.message = f"Schedule {self.schedule_name} could not be found."
| GrapheneScheduleNotFoundError |
python | huggingface__transformers | tests/models/pegasus_x/test_modeling_pegasus_x.py | {
"start": 2017,
"end": 7785
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=50,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = PegasusXConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
stagger_local_blocks=False,
)
inputs_dict = prepare_pegasus_x_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = PegasusXModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = PegasusXModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = PegasusXEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = PegasusXDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
| PegasusXModelTester |
python | celery__celery | t/unit/app/test_app.py | {
"start": 1318,
"end": 1434
} | class ____:
FOO = 1
BAR = 2
object_config = ObjectConfig()
dict_config = {'FOO': 10, 'BAR': 20}
| ObjectConfig |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/np_array_ops_test.py | {
"start": 41797,
"end": 43884
} | class ____(test.TestCase):
def setUp(self):
super(ArrayManipulationTest, self).setUp()
self.array_transforms = [
lambda x: x,
ops.convert_to_tensor,
np.array,
np_array_ops.array,
]
def testBroadcastTo(self):
def run_test(arr, shape):
for fn in self.array_transforms:
arg1 = fn(arr)
self.match(
np_array_ops.broadcast_to(arg1, shape),
np.broadcast_to(arg1, shape))
run_test(1, 2)
run_test(1, (2, 2))
run_test([1, 2], (2, 2))
run_test([[1], [2]], (2, 2))
run_test([[1, 2]], (3, 2))
run_test([[[1, 2]], [[3, 4]], [[5, 6]]], (3, 4, 2))
def testIx_(self):
possible_arys = [[True, True], [True, False], [False, False],
list(range(5)), np_array_ops.empty(0, dtype=np.int64)]
for r in range(len(possible_arys)):
for arys in itertools.combinations_with_replacement(possible_arys, r):
tnp_ans = np_array_ops.ix_(*arys)
onp_ans = np.ix_(*arys)
for t, o in zip(tnp_ans, onp_ans):
self.match(t, o)
def match_shape(self, actual, expected, msg=None):
if msg:
msg = 'Shape match failed for: {}. Expected: {} Actual: {}'.format(
msg, expected.shape, actual.shape)
self.assertEqual(actual.shape, expected.shape, msg=msg)
def match_dtype(self, actual, expected, msg=None):
if msg:
msg = 'Dtype match failed for: {}. Expected: {} Actual: {}.'.format(
msg, expected.dtype, actual.dtype)
self.assertEqual(actual.dtype, expected.dtype, msg=msg)
def match(self, actual, expected, msg=None):
msg_ = 'Expected: {} Actual: {}'.format(expected, actual)
if msg:
msg = '{} {}'.format(msg_, msg)
else:
msg = msg_
self.assertIsInstance(actual, np_arrays.ndarray)
self.match_dtype(actual, expected, msg)
self.match_shape(actual, expected, msg)
if not actual.shape.rank:
self.assertEqual(actual.tolist(), expected.tolist())
else:
self.assertSequenceEqual(actual.tolist(), expected.tolist())
| ArrayManipulationTest |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 22704,
"end": 26783
} | class ____(ZoomPanBase):
"""A Tool for zooming using a rectangle selector."""
description = 'Zoom to rectangle'
image = 'mpl-data/images/zoom_to_rect'
default_keymap = property(lambda self: mpl.rcParams['keymap.zoom'])
cursor = cursors.SELECT_REGION
radio_group = 'default'
def __init__(self, *args):
super().__init__(*args)
self._ids_zoom = []
def _cancel_action(self):
for zoom_id in self._ids_zoom:
self.figure.canvas.mpl_disconnect(zoom_id)
self.toolmanager.trigger_tool('rubberband', self)
self.figure.canvas.draw_idle()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
def _press(self, event):
"""Callback for mouse button presses in zoom-to-rectangle mode."""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom:
self._cancel_action()
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._cancel_action()
return
x, y = event.x, event.y
self._xypress = []
for i, a in enumerate(self.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a._get_view()))
id1 = self.figure.canvas.mpl_connect(
'motion_notify_event', self._mouse_move)
id2 = self.figure.canvas.mpl_connect(
'key_press_event', self._switch_on_zoom_mode)
id3 = self.figure.canvas.mpl_connect(
'key_release_event', self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self._mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self._mouse_move(event)
def _mouse_move(self, event):
"""Callback for mouse moves in zoom-to-rectangle mode."""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, view = self._xypress[0]
(x1, y1), (x2, y2) = np.clip(
[[lastx, lasty], [x, y]], a.bbox.min, a.bbox.max)
if self._zoom_mode == "x":
y1, y2 = a.bbox.intervaly
elif self._zoom_mode == "y":
x1, x2 = a.bbox.intervalx
self.toolmanager.trigger_tool(
'rubberband', self, data=(x1, y1, x2, y2))
def _release(self, event):
"""Callback for mouse button releases in zoom-to-rectangle mode."""
for zoom_id in self._ids_zoom:
self.figure.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress:
self._cancel_action()
return
done_ax = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, _ind, view = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x - lastx) < 5 or abs(y - lasty) < 5:
self._cancel_action()
return
# detect twinx, twiny Axes and avoid double zooming
twinx = any(a.get_shared_x_axes().joined(a, a1) for a1 in done_ax)
twiny = any(a.get_shared_y_axes().joined(a, a1) for a1 in done_ax)
done_ax.append(a)
if self._button_pressed == 1:
direction = 'in'
elif self._button_pressed == 3:
direction = 'out'
else:
continue
a._set_view_from_bbox((lastx, lasty, x, y), direction,
self._zoom_mode, twinx, twiny)
self._zoom_mode = None
self.toolmanager.get_tool(_views_positions).push_current()
self._cancel_action()
| ToolZoom |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_ip_address_in_network.py | {
"start": 1936,
"end": 4893
} | class ____(ColumnMapExpectation):
"""Expect column values to be IP addresses in the specified network ranges."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_in": [
"192.168.0.0",
"192.168.0.1",
"192.168.0.2",
"192.168.0.3",
"192.168.0.254",
],
"some_other": [
"213.181.199.16",
"213.181.199.16",
"213.181.199.16",
"213.181.199.16",
"142.250.180.206",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "all_in",
"ip_network": ["192.168.0.0/24", "54.33.0.0/17"],
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "some_other",
"ip_network": ["192.168.0.0/24"],
"mostly": 0.9,
},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.ip_address_in_network"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"mostly",
"ip_network",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
}
success_keys = (
"ip_network",
"mostly",
)
if __name__ == "__main__":
ExpectColumnValuesIpAddressInNetwork().print_diagnostic_checklist()
| ExpectColumnValuesIpAddressInNetwork |
python | huggingface__transformers | tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py | {
"start": 8517,
"end": 9136
} | class ____(ASTFeatureExtractionTest):
def test_using_audio_utils(self):
# Tests that it uses audio_utils instead of torchaudio
feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
self.assertTrue(hasattr(feat_extract, "window"))
self.assertTrue(hasattr(feat_extract, "mel_filters"))
from transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer import (
is_speech_available,
)
self.assertFalse(is_speech_available())
| ASTFeatureExtractionWithoutTorchaudioTest |
python | google__jax | jax/_src/interpreters/pxla.py | {
"start": 52391,
"end": 76075
} | class ____:
"""The logic to shard inputs, execute a replicated model, returning outputs."""
__slots__ = ['xla_executable', 'name', 'backend', 'in_handler', 'out_handler',
'has_unordered_effects', 'ordered_effects', 'keepalive',
'has_host_callbacks', '_local_devices', 'kept_var_idx',
'mut', 'pgle_profiler', '__weakref__']
def __init__(self, xla_executable, name, backend, in_handler: InputsHandler,
out_handler: ResultsHandler,
unordered_effects: list[core.Effect],
ordered_effects: list[core.Effect], keepalive: Any,
has_host_callbacks: bool, kept_var_idx: set[int],
mut: MutationData | None,
pgle_profiler: profiler.PGLEProfiler | None = None):
self.xla_executable = xla_executable
self.name = name
self.backend = backend
self.in_handler = in_handler
self.out_handler = out_handler
self.has_unordered_effects = bool(unordered_effects)
self.ordered_effects = ordered_effects
self._local_devices = self.xla_executable.local_devices()
self.keepalive = keepalive
self.has_host_callbacks = has_host_callbacks
self.kept_var_idx = kept_var_idx
self.mut = mut
self.pgle_profiler = pgle_profiler
def _add_tokens_to_inputs(self, input_bufs):
if self.ordered_effects:
tokens = [
dispatch.runtime_tokens.get_token_input(eff, self._local_devices)._buf
for eff in self.ordered_effects
]
input_bufs = [*tokens, *input_bufs]
return input_bufs
def _handle_token_bufs(self, token_bufs, sharded_token):
# token_bufs: Sequence[Sequence[tokenArray]], for each effect the returned
# token buffers.
# sharded_token: ShardedToken, containing the RuntimeTokens for each device
for i, device in enumerate(self._local_devices):
dispatch.runtime_tokens.set_output_runtime_token(
device, sharded_token.get_token(i))
for eff, token_buf in zip(self.ordered_effects, token_bufs):
assert len(token_buf) > 0
if len(token_buf) == 1:
dispatch.runtime_tokens.set_token_result(eff, core.Token(token_buf[0]))
else:
token_devices = []
for token in token_buf:
assert isinstance(token.sharding, sharding_impls.SingleDeviceSharding)
token_devices.append(token.sharding._device_assignment[0])
s = NamedSharding(Mesh(token_devices, 'x'), P('x'))
global_token_array = array.make_array_from_single_device_arrays(
(0,), s, token_buf
)
dispatch.runtime_tokens.set_token_result(
eff, core.Token(global_token_array)
)
@profiler.annotate_function
def __call__(self, *args):
if config.no_execution.value:
raise RuntimeError(
f"JAX tried to execute function {self.name}, but the no_execution config "
"option is set")
args = [x for i, x in enumerate(args) if i in self.kept_var_idx]
if self.mut:
args = [*args, *self.mut.in_mut]
input_bufs = self.in_handler(args)
with profiler.PGLEProfiler.trace(self.pgle_profiler):
if (self.ordered_effects or self.has_unordered_effects
or self.has_host_callbacks):
input_bufs = self._add_tokens_to_inputs(input_bufs)
results = self.xla_executable.execute_sharded(input_bufs, with_tokens=True)
result_token_bufs = results.disassemble_prefix_into_single_device_arrays(
len(self.ordered_effects))
sharded_runtime_token = results.consume_token()
self._handle_token_bufs(result_token_bufs, sharded_runtime_token)
else:
results = self.xla_executable.execute_sharded(input_bufs)
if dispatch.needs_check_special():
out_arrays = results.disassemble_into_single_device_arrays()
for arrays in out_arrays:
dispatch.check_special(self.name, arrays)
out = self.out_handler(out_arrays)
else:
out = results.consume_with_handlers(self.out_handler.handlers)
if (self.pgle_profiler is not None and self.pgle_profiler.is_running()
and len(out) > 0):
out[0].block_until_ready()
if self.mut is None:
return out
else:
out_ = []
for i, o in zip(self.mut.out_mut, out):
if i is not None:
try: args[i]._refs._buf._replace_with(o) # type: ignore
except AttributeError: pass # TODO(mattjj): remove float0
else:
out_.append(o)
return out_
xla_pmap_p = core.MapPrimitive('xla_pmap')
xla_pmap = xla_pmap_p.bind
xla_pmap_p.def_impl(xla_pmap_impl)
def _pmap_partial_eval_custom_params_updater(
unks_in, inst_in, kept_outs_known, kept_outs_staged, num_res, params_known,
params_staged):
# prune inputs to jaxpr_known according to unks_in
donated_invars_known, _ = partition_list(unks_in, params_known['donated_invars'])
in_axes_known, _ = partition_list(unks_in, params_known['in_axes'])
_, out_axes_known = partition_list(kept_outs_known, params_known['out_axes'])
out_axes_known = out_axes_known + [0] * num_res
new_params_known = dict(params_known, in_axes=tuple(in_axes_known),
out_axes=tuple(out_axes_known),
donated_invars=tuple(donated_invars_known))
# added num_res new inputs to jaxpr_staged, pruning according to inst_in
_, donated_invars_staged = partition_list(inst_in, params_staged['donated_invars'])
donated_invars_staged = [False] * num_res + donated_invars_staged
_, in_axes_staged = partition_list(inst_in, params_staged['in_axes'])
in_axes_staged = [0] * num_res + in_axes_staged
_, out_axes_staged = partition_list(kept_outs_staged, params_staged['out_axes'])
new_params_staged = dict(params_staged, in_axes=tuple(in_axes_staged),
out_axes=tuple(out_axes_staged),
donated_invars=tuple(donated_invars_staged))
return new_params_known, new_params_staged
def _pmap_partial_eval_custom_res_maker(params_known, aval):
return core.unmapped_aval(params_known['axis_size'], 0, aval)
def _pmap_dce_rule(used_outputs, eqn):
# just like pe.dce_jaxpr_call_rule, except handles in_axes / out_axes
if not any(used_outputs) and not pe.has_effects(eqn):
return [False] * len(eqn.invars), None
axis_name = eqn.params["axis_name"]
with core.extend_axis_env_nd([(axis_name, eqn.params["global_axis_size"])]):
new_jaxpr, used_inputs = pe.dce_jaxpr(eqn.params['call_jaxpr'], used_outputs)
_, donated_invars = partition_list(used_inputs, eqn.params['donated_invars'])
_, in_axes = partition_list(used_inputs, eqn.params['in_axes'])
_, out_axes = partition_list(used_outputs, eqn.params['out_axes'])
new_params = dict(eqn.params, call_jaxpr=new_jaxpr,
donated_invars=tuple(donated_invars),
in_axes=tuple(in_axes), out_axes=tuple(out_axes))
if not any(used_inputs) and not any(used_outputs) and not new_jaxpr.effects:
return used_inputs, None
else:
effs = core.filter_named_axis_effects(new_jaxpr.effects, {axis_name})
new_eqn = pe.new_jaxpr_eqn(
[v for v, used in zip(eqn.invars, used_inputs) if used],
[v for v, used in zip(eqn.outvars, used_outputs) if used],
eqn.primitive, new_params, effs, eqn.source_info)
return used_inputs, new_eqn
def _xla_call_partial_eval_update_params(
params: core.ParamDict, kept_inputs: Sequence[bool], num_new_inputs: int
) -> core.ParamDict:
donated_invars = params['donated_invars']
if not kept_inputs and donated_invars:
# JaxprTrace.post_process_call creates a call with no input tracers
donated_invars = (False,) * num_new_inputs
else:
assert len(kept_inputs) == len(donated_invars)
# JaxprTrace.process_call drops known input tracers
donated_invars = [d for d, kept in zip(donated_invars, kept_inputs) if kept]
# Any new inputs are prepended to the left, so mark those as not donated.
donated_invars = [False] * num_new_inputs + donated_invars
return dict(params, donated_invars=tuple(donated_invars))
def xla_call_jvp_update_params(params, nz_tangents):
donated_invars = params['donated_invars']
donated_tangents = [d for d, nz in zip(donated_invars, nz_tangents) if nz]
new_donated_invars = (*donated_invars, *donated_tangents)
return dict(params, donated_invars=new_donated_invars)
def _xla_call_linearize_update_params(params, num_new_inputs, nz_tangents):
donated_invars_prev = params['donated_invars']
donated_invars = (*(False for _ in range(num_new_inputs)),
*(d for d, nz in zip(donated_invars_prev, nz_tangents) if nz))
return dict(params, donated_invars=donated_invars)
def _xla_call_transpose_update_params(params, undef_primals, nonzero_cts):
donated_invars = params['donated_invars']
donated_primals = [d for d, u in zip(donated_invars, undef_primals) if not u]
donated_cotangents = [False for nz in nonzero_cts if nz]
return dict(params, donated_invars=(*donated_primals, *donated_cotangents))
# Set param update handlers to update `donated_invars` just like xla_call_p
pe.call_param_updaters[xla_pmap_p] = _xla_call_partial_eval_update_params
pe.partial_eval_jaxpr_custom_rules[xla_pmap_p] = \
partial(pe.call_partial_eval_custom_rule,
'call_jaxpr', _pmap_partial_eval_custom_params_updater,
res_aval=_pmap_partial_eval_custom_res_maker)
pe.dce_rules[xla_pmap_p] = _pmap_dce_rule
ad.call_param_updaters[xla_pmap_p] = xla_call_jvp_update_params
ad.call_linearize_param_updaters[xla_pmap_p] = _xla_call_linearize_update_params
ad.call_transpose_param_updaters[xla_pmap_p] = _xla_call_transpose_update_params
ad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p)
def _unravel_index_hlo(axis_env):
div = mlir.ir_constant(
np.array(axis_env.nreps // math.prod(axis_env.sizes), np.uint32))
mod = mlir.ir_constant(np.array(axis_env.sizes[-1], np.uint32))
return hlo.remainder(hlo.divide(hlo.replica_id(), div), mod)
def _hlo_shard(aval, axis_env, x, in_axis):
if aval is core.abstract_token:
return x
elif isinstance(aval, core.ShapedArray):
if dtypes.issubdtype(aval.dtype, dtypes.extended):
aval = core.physical_element_aval(aval.dtype)
dims = list(aval.shape)
zero = mlir.ir_constant(np.zeros((), dtype=np.uint32))
idxs = [zero] * len(dims)
idxs.insert(in_axis, _unravel_index_hlo(axis_env))
dims_unsqueezed = dims.copy()
dims_unsqueezed.insert(in_axis, 1)
dynamic_slice_result = hlo.dynamic_slice(
x, idxs, mlir.dense_int_array(dims_unsqueezed))
return hlo.reshape(mlir.aval_to_ir_type(aval), dynamic_slice_result)
else:
raise TypeError(aval)
def _axis_read(axis_env, axis_name):
try:
return max(i for i, name in enumerate(axis_env.names) if name == axis_name)
except ValueError:
raise NameError(f"unbound axis name: {axis_name}") from None
def axis_groups(axis_env: sharding_impls.AxisEnv, name) -> tuple[tuple[int, ...]]:
if not isinstance(name, (list, tuple)):
name = (name,)
mesh_axes = tuple(unsafe_map(partial(_axis_read, axis_env), name))
trailing_size, ragged = divmod(axis_env.nreps, math.prod(axis_env.sizes))
assert not ragged
mesh_spec = axis_env.sizes + (trailing_size,)
return _axis_groups(mesh_spec, mesh_axes)
def _axis_groups(mesh_spec, mesh_axes):
"""Computes replica group ids for a collective performed over a subset of the mesh.
Args:
mesh_spec: A sequence of integers representing the mesh shape.
mesh_axes: A sequence of integers between 0 and `len(mesh_spec)` (exclusive)
indicating over which axes the collective is performed.
Returns:
A tuple of replica groups (i.e. tuples containing replica ids).
"""
iota = np.arange(math.prod(mesh_spec)).reshape(mesh_spec)
groups = np.reshape(
np.moveaxis(iota, mesh_axes, np.arange(len(mesh_axes))),
(math.prod(np.take(mesh_spec, mesh_axes)), -1))
return tuple(unsafe_map(tuple, groups.T))
# TODO(b/110096942): more efficient gather
def _hlo_unshard(ctx: mlir.LoweringRuleContext, aval, axis_env, out_axis, x):
if aval is core.abstract_token:
return x
elif isinstance(aval, core.ShapedArray):
dims = list(aval.shape)
padded_aval = aval.update(shape=[axis_env.sizes[-1]] + dims)
padded = mlir.full_like_aval(ctx, 0, padded_aval)
zero = mlir.ir_constant(np.zeros((), dtype=np.uint32))
idxs = [_unravel_index_hlo(axis_env)] + [zero] * len(dims)
broadcast_result = hlo.broadcast(x, mlir.dense_int_array([1]))
padded = hlo.dynamic_update_slice(padded, broadcast_result, idxs)
replica_groups = mlir.dense_int_elements(
axis_groups(axis_env, axis_env.names[-1]))
out = hlo.cross_replica_sum(padded, replica_groups)
if out_axis != 0:
# TODO(apaszke,mattjj): Change the indices to DynamicUpdateSlice instead
perm = list(range(1, len(dims)))
perm.insert(out_axis, 0)
transposed_dims = list(dims)
transposed_dims.insert(out_axis, axis_env.sizes[-1])
out = hlo.transpose(out, mlir.dense_int_array(perm))
return out
else:
raise TypeError(aval)
def _extend_axis_env(env: sharding_impls.AxisEnv, name, size: int):
return sharding_impls.AxisEnv(env.nreps, env.names + (name,),
env.sizes + (size,))
def _pmap_lowering(ctx: mlir.LoweringRuleContext, *in_nodes, axis_name,
axis_size, global_axis_size, devices, name,
call_jaxpr: core.Jaxpr, backend=None, in_axes, out_axes,
donated_invars, is_explicit_global_axis_size):
del donated_invars # Unused.
mlir.check_backend_matches(backend, ctx.module_context.platforms)
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
if ctx.module_context.axis_env.names and devices is not None:
raise ValueError("Nested pmap with explicit devices argument.")
new_env = _extend_axis_env(ctx.module_context.axis_env, axis_name,
global_axis_size)
# Shard the in_nodes that are mapped
in_avals = [v.aval for v in call_jaxpr.invars]
in_nodes_sharded = (
_hlo_shard(aval, new_env, in_node, in_axis)
if in_axis is not None else in_node
for aval, in_node, in_axis in zip(in_avals, in_nodes, in_axes))
with core.extend_axis_env_nd([(axis_name, global_axis_size)]):
sub_ctx = ctx.module_context.replace(
axis_context=sharding_impls.ReplicaAxisContext(new_env))
sharded_outs, _ = mlir.jaxpr_subcomp(
sub_ctx, call_jaxpr,
ctx.name_stack.extend(util.wrap_name('pmap', name)),
mlir.TokenSet(), (), *in_nodes_sharded,
dim_var_values=ctx.dim_var_values, const_lowering=ctx.const_lowering)
out_avals = [v.aval for v in call_jaxpr.outvars]
outs = [_hlo_unshard(ctx, aval, new_env, out_axis, shard)
for aval, out_axis, shard in zip(out_avals, out_axes, sharded_outs)]
return outs
mlir.register_lowering(xla_pmap_p, _pmap_lowering)
def tile_aval_nd(axis_sizes, in_axes: ArrayMapping, aval):
assert isinstance(aval, ShapedArray)
shape = list(aval.shape)
for name, axis in in_axes.items():
assert shape[axis] % axis_sizes[name] == 0
shape[axis] //= axis_sizes[name]
return aval.update(shape=tuple(shape))
def untile_aval_nd(axis_sizes, out_axes: ArrayMapping, aval):
assert isinstance(aval, ShapedArray)
shape = list(aval.shape)
for name, axis in out_axes.items():
shape[axis] *= axis_sizes[name]
return aval.update(shape=tuple(shape))
def mesh_local_to_global(mesh, axes: ArrayMapping, aval):
return untile_aval_nd(mesh.shape, axes,
tile_aval_nd(mesh.local_mesh.shape, axes, aval))
def mesh_global_to_local(mesh, axes: ArrayMapping, aval):
return untile_aval_nd(mesh.local_mesh.shape, axes,
tile_aval_nd(mesh.shape, axes, aval))
full_to_shard_p = core.Primitive('full_to_shard')
@full_to_shard_p.def_abstract_eval
def _full_to_shard_abstract_eval(x, axes, mesh, **_):
# TODO: Assert x is a global aval! Or ideally check that it's global in dims from axes!
return tile_aval_nd(mesh.shape, axes, x)
def manual_proto(
aval: core.ShapedArray,
manual_axes_set: frozenset[sharding_impls.MeshAxisName], mesh: Mesh):
"""Create an OpSharding proto that declares all mesh axes from `axes` as manual
and all others as replicated.
"""
named_mesh_shape = mesh.shape
mesh_shape = list(named_mesh_shape.values())
axis_order = {axis: i for i, axis in enumerate(mesh.axis_names)}
manual_axes = sorted(manual_axes_set, key=str)
replicated_axes = [axis for axis in mesh.axis_names
if axis not in manual_axes_set]
tad_perm = ([axis_order[a] for a in replicated_axes] +
[axis_order[a] for a in manual_axes])
tad_shape = [1] * aval.ndim
tad_shape.append(math.prod([named_mesh_shape[a] for a in replicated_axes]))
tad_shape.append(math.prod([named_mesh_shape[a] for a in manual_axes]))
proto = xc.OpSharding()
proto.type = xc.OpSharding.Type.OTHER
proto.tile_assignment_dimensions = tad_shape
proto.iota_reshape_dims = mesh_shape
proto.iota_transpose_perm = tad_perm
proto.last_tile_dims = [xc.OpSharding.Type.REPLICATED, xc.OpSharding.Type.MANUAL]
return proto
@partial(mlir.register_lowering, full_to_shard_p)
def _full_to_shard_lowering(ctx, x, *, axes: ArrayMapping, mesh: Mesh,
manual_axes: frozenset[sharding_impls.MeshAxisName]):
# TODO: Can we short-circuit for replicated values? Probably not.
aval_in, = ctx.avals_in
aval_out, = ctx.avals_out
sharding_proto = (
NamedSharding(mesh, array_mapping_to_axis_resources(axes))
._to_xla_hlo_sharding(aval_in.ndim).to_proto())
unspecified_dims = set(range(aval_in.ndim)) - set(axes.values())
sx = mlir.wrap_with_sharding_op(ctx, x, aval_in, sharding_proto,
unspecified_dims=unspecified_dims)
proto = manual_proto(aval_in, manual_axes, mesh)
return (mlir.wrap_with_full_to_shard_op(ctx, sx, aval_out, proto,
unspecified_dims=unspecified_dims),)
shard_to_full_p = core.Primitive('shard_to_full')
@shard_to_full_p.def_abstract_eval
def _shard_to_full_abstract_eval(x, axes, mesh, **_):
# TODO: Assert x is a global aval! Or ideally check that it's global in dims from axes!
return untile_aval_nd(mesh.shape, axes, x)
@partial(mlir.register_lowering, shard_to_full_p)
def _shard_to_full_lowering(ctx: mlir.LoweringRuleContext, x, *, axes: ArrayMapping, mesh: Mesh,
manual_axes: frozenset[sharding_impls.MeshAxisName]):
aval_in, = ctx.avals_in
aval_out, = ctx.avals_out
proto = manual_proto(aval_in, manual_axes, mesh) # type: ignore
unspecified_dims = set(range(aval_in.ndim)) - set(axes.values()) # type: ignore
sx = mlir.wrap_with_sharding_op(ctx, x, aval_in, proto,
unspecified_dims=unspecified_dims)
sharding_proto = (
NamedSharding(mesh, array_mapping_to_axis_resources(axes))
._to_xla_hlo_sharding(aval_out.ndim).to_proto())
return (mlir.wrap_with_shard_to_full_op(ctx, sx, aval_out, sharding_proto,
unspecified_dims),)
def check_if_any_auto(
shardings: Iterable[(JSharding | AUTO | UnspecifiedValue)]) -> bool:
for s in shardings:
if isinstance(s, AUTO):
return True
return False
ShardingInfo = tuple[
Union[JSharding, UnspecifiedValue, AUTO],
stages.MismatchType,
Union[Any, None], # Any is dispatch.SourceInfo to avoid circular imports
]
def get_default_device() -> xc.Device:
if isinstance(config.default_device.value, str):
return xb.get_backend(config.default_device.value).local_devices()[0]
else:
return config.default_device.value or xb.local_devices()[0]
def _get_and_check_device_assignment(
shardings: Iterable[ShardingInfo],
context_devices: Sequence[xc.Device] | None,
) -> tuple[xc.Client, tuple[xc.Device, ...] | None, int]:
first_sharding_info = None
context_devices = () if context_devices is None else tuple(context_devices)
abstract_mesh = None
any_concrete_sharding = True if context_devices else False
for sh, s_type, source_info in shardings:
if isinstance(sh, UnspecifiedValue):
continue
elif isinstance(sh, NamedSharding) and isinstance(sh.mesh, AbstractMesh):
if (abstract_mesh is not None and not sh.mesh.empty and
abstract_mesh.size != sh.mesh.size):
raise ValueError("AbstractMesh should be of the same size across all "
f"shardings. Got {abstract_mesh} and {sh.mesh}")
abstract_mesh = sh.mesh
else:
any_concrete_sharding = True
arr_device_assignment = sh._device_assignment
if first_sharding_info is None:
first_sharding_info = (arr_device_assignment, s_type, source_info)
if not context_devices:
if first_sharding_info[0] != arr_device_assignment:
raise stages.DeviceAssignmentMismatchError([
stages.DeviceAssignmentMismatch(*first_sharding_info),
stages.DeviceAssignmentMismatch(
arr_device_assignment, s_type, source_info)])
else:
if context_devices != arr_device_assignment:
raise stages.DeviceAssignmentMismatchError([
stages.DeviceAssignmentMismatch(
context_devices, stages.MismatchType.CONTEXT_DEVICES, None),
stages.DeviceAssignmentMismatch(
arr_device_assignment, s_type, source_info)])
if first_sharding_info is None and context_devices:
device_assignment = context_devices
elif first_sharding_info is None:
device_assignment = (get_default_device(),)
else:
device_assignment = first_sharding_info[0] # type: ignore
backend = xb.get_device_backend(device_assignment[0])
if (any_concrete_sharding and abstract_mesh is not None and
len(device_assignment) != abstract_mesh.size):
raise ValueError(
f"AbstractMesh size: {abstract_mesh.size} does not match the"
f" device assignment size: {len(device_assignment)}")
if any_concrete_sharding or abstract_mesh is None:
return backend, device_assignment, len(device_assignment) # type: ignore
else:
return backend, None, abstract_mesh.size
MaybeSharding = Union[JSharding, UnspecifiedValue]
def prune_unused_inputs(
jaxpr: core.Jaxpr,
) -> tuple[core.Jaxpr, set[int], set[int]]:
used_outputs = [True] * len(jaxpr.outvars)
new_jaxpr, used_consts, used_inputs = pe.dce_jaxpr_consts(jaxpr, used_outputs)
kept_const_idx = {i for i, b in enumerate(used_consts) if b}
kept_var_idx = {i for i, b in enumerate(used_inputs) if b}
return new_jaxpr, kept_const_idx, kept_var_idx
@weakref_lru_cache
def _dce_jaxpr(closed_jaxpr, keep_unused, donated_invars, auto_spmd_lowering):
assert isinstance(closed_jaxpr, core.ClosedJaxpr)
jaxpr = closed_jaxpr.jaxpr
consts = closed_jaxpr.consts
in_avals = closed_jaxpr.in_avals
if (keep_unused or auto_spmd_lowering or
any(hasattr(a, "shape") and not core.is_constant_shape(a.shape)
for a in in_avals)):
kept_var_idx = set(range(len(in_avals)))
else:
jaxpr, kept_const_idx, kept_var_idx = prune_unused_inputs(jaxpr)
consts = [c for i, c in enumerate(consts) if i in kept_const_idx]
donated_invars = tuple(x for i, x in enumerate(donated_invars) if i in kept_var_idx)
del kept_const_idx
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
return closed_jaxpr, donated_invars, kept_var_idx
| ExecuteReplicated |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/pareto.py | {
"start": 3693,
"end": 11678
} | class ____:
"""Maintains an approximate pareto front of ConjectureData objects. That
is, we try to maintain a collection of objects such that no element of the
collection is pareto dominated by any other. In practice we don't quite
manage that, because doing so is computationally very expensive. Instead
we maintain a random sample of data objects that are "rarely" dominated by
any other element of the collection (roughly, no more than about 10%).
Only valid test cases are considered to belong to the pareto front - any
test case with a status less than valid is discarded.
Note that the pareto front is potentially quite large, and currently this
will store the entire front in memory. This is bounded by the number of
valid examples we run, which is max_examples in normal execution, and
currently we do not support workflows with large max_examples which have
large values of max_examples very well anyway, so this isn't a major issue.
In future we may weish to implement some sort of paging out to disk so that
we can work with larger fronts.
Additionally, because this is only an approximate pareto front, there are
scenarios where it can be much larger than the actual pareto front. There
isn't a huge amount we can do about this - checking an exact pareto front
is intrinsically quadratic.
"Most" of the time we should be relatively close to the true pareto front,
say within an order of magnitude, but it's not hard to construct scenarios
where this is not the case. e.g. suppose we enumerate all valid test cases
in increasing shortlex order as s_1, ..., s_n, ... and have scores f and
g such that f(s_i) = min(i, N) and g(s_i) = 1 if i >= N, then the pareto
front is the set {s_1, ..., S_N}, but the only element of the front that
will dominate s_i when i > N is S_N, which we select with probability
1 / N. A better data structure could solve this, but at the cost of more
expensive operations and higher per element memory use, so we'll wait to
see how much of a problem this is in practice before we try that.
"""
def __init__(self, random: Random) -> None:
self.__random = random
self.__eviction_listeners: list[Callable[[ConjectureResult], None]] = []
self.front: SortedList[ConjectureResult] = SortedList(
key=lambda d: sort_key(d.nodes)
)
self.__pending: ConjectureResult | None = None
def add(self, data: ConjectureData | ConjectureResult | _Overrun) -> bool:
"""Attempts to add ``data`` to the pareto front. Returns True if
``data`` is now in the front, including if data is already in the
collection, and False otherwise"""
if data.status < Status.VALID:
return False
assert not isinstance(data, _Overrun)
data = data.as_result()
assert not isinstance(data, _Overrun)
if not self.front:
self.front.add(data)
return True
if data in self.front:
return True
# We add data to the pareto front by adding it unconditionally and then
# doing a certain amount of randomized "clear down" - testing a random
# set of elements (currently 10) to see if they are dominated by
# something else in the collection. If they are, we remove them.
self.front.add(data)
assert self.__pending is None
try:
self.__pending = data
# We maintain a set of the current exact pareto front of the
# values we've sampled so far. When we sample a new element we
# either add it to this exact pareto front or remove it from the
# collection entirely.
front = LazySequenceCopy(self.front)
# We track which values we are going to remove and remove them all
# at the end so the shape of the front doesn't change while we're
# using it.
to_remove: list[ConjectureResult] = []
# We now iteratively sample elements from the approximate pareto
# front to check whether they should be retained. When the set of
# dominators gets too large we have sampled at least 10 elements
# and it gets too expensive to continue, so we consider that enough
# due diligence.
i = self.front.index(data)
# First we attempt to look for values that must be removed by the
# addition of the data. These are necessarily to the right of it
# in the list.
failures = 0
while i + 1 < len(front) and failures < 10:
j = self.__random.randrange(i + 1, len(front))
candidate = front.pop(j)
dom = dominance(data, candidate)
assert dom != DominanceRelation.RIGHT_DOMINATES
if dom == DominanceRelation.LEFT_DOMINATES:
to_remove.append(candidate)
failures = 0
else:
failures += 1
# Now we look at the points up to where we put data in to see if
# it is dominated. While we're here we spend some time looking for
# anything else that might be dominated too, compacting down parts
# of the list.
dominators = [data]
while i >= 0 and len(dominators) < 10:
front.swap(i, self.__random.randint(0, i))
candidate = front[i]
already_replaced = False
j = 0
while j < len(dominators):
v = dominators[j]
dom = dominance(candidate, v)
if dom == DominanceRelation.LEFT_DOMINATES:
if not already_replaced:
already_replaced = True
dominators[j] = candidate
j += 1
else: # pragma: no cover # flaky, by test_database_contains_only_pareto_front
dominators[j], dominators[-1] = (
dominators[-1],
dominators[j],
)
dominators.pop()
to_remove.append(v)
elif dom == DominanceRelation.RIGHT_DOMINATES:
to_remove.append(candidate)
break
elif dom == DominanceRelation.EQUAL:
break
else:
j += 1
else:
dominators.append(candidate)
i -= 1
for v in to_remove:
self._remove(v)
return data in self.front
finally:
self.__pending = None
def on_evict(self, f: Callable[[ConjectureResult], None]) -> None:
"""Register a listener function that will be called with data when it
gets removed from the front because something else dominates it."""
self.__eviction_listeners.append(f)
def __contains__(self, data: object) -> bool:
if not isinstance(data, (ConjectureData, ConjectureResult)):
return False
result = data.as_result()
if isinstance(result, _Overrun):
return False
return result in self.front
def __iter__(self) -> Iterator[ConjectureResult]:
return iter(self.front)
def __getitem__(self, i: int) -> ConjectureResult:
return self.front[i]
def __len__(self) -> int:
return len(self.front)
def _remove(self, data: ConjectureResult) -> None:
try:
self.front.remove(data)
except ValueError:
return
if data is not self.__pending:
for f in self.__eviction_listeners:
f(data)
| ParetoFront |
python | paramiko__paramiko | paramiko/server.py | {
"start": 1078,
"end": 25190
} | class ____:
"""
This class defines an interface for controlling the behavior of Paramiko
in server mode.
Methods on this class are called from Paramiko's primary thread, so you
shouldn't do too much work in them. (Certainly nothing that blocks or
sleeps.)
"""
def check_channel_request(self, kind, chanid):
"""
Determine if a channel request of a given type will be granted, and
return ``OPEN_SUCCEEDED`` or an error code. This method is
called in server mode when the client requests a channel, after
authentication is complete.
If you allow channel requests (and an ssh server that didn't would be
useless), you should also override some of the channel request methods
below, which are used to determine which services will be allowed on
a given channel:
- `check_channel_pty_request`
- `check_channel_shell_request`
- `check_channel_subsystem_request`
- `check_channel_window_change_request`
- `check_channel_x11_request`
- `check_channel_forward_agent_request`
The ``chanid`` parameter is a small number that uniquely identifies the
channel within a `.Transport`. A `.Channel` object is not created
unless this method returns ``OPEN_SUCCEEDED`` -- once a
`.Channel` object is created, you can call `.Channel.get_id` to
retrieve the channel ID.
The return value should either be ``OPEN_SUCCEEDED`` (or
``0``) to allow the channel request, or one of the following error
codes to reject it:
- ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
:param str kind:
the kind of channel the client would like to open (usually
``"session"``).
:param int chanid: ID of the channel
:return: an `int` success or failure code (listed above)
"""
return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def get_allowed_auths(self, username):
"""
Return a list of authentication methods supported by the server.
This list is sent to clients attempting to authenticate, to inform them
of authentication methods that might be successful.
The "list" is actually a string of comma-separated names of types of
authentication. Possible values are ``"password"``, ``"publickey"``,
and ``"none"``.
The default implementation always returns ``"password"``.
:param str username: the username requesting authentication.
:return: a comma-separated `str` of authentication types
"""
return "password"
def check_auth_none(self, username):
"""
Determine if a client may open channels with no (further)
authentication.
Return ``AUTH_FAILED`` if the client must authenticate, or
``AUTH_SUCCESSFUL`` if it's okay for the client to not
authenticate.
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the client.
:return:
``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds.
:rtype: int
"""
return AUTH_FAILED
def check_auth_password(self, username, password):
"""
Determine if a given username and password supplied by the client is
acceptable for use in authentication.
Return ``AUTH_FAILED`` if the password is not accepted,
``AUTH_SUCCESSFUL`` if the password is accepted and completes
the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this key is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client.
:param str password: the password given by the client.
:return:
``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the password auth is
successful, but authentication must continue.
:rtype: int
"""
return AUTH_FAILED
def check_auth_publickey(self, username, key):
"""
Determine if a given key supplied by the client is acceptable for use
in authentication. You should override this method in server mode to
check the username and key and decide if you would accept a signature
made using this key.
Return ``AUTH_FAILED`` if the key is not accepted,
``AUTH_SUCCESSFUL`` if the key is accepted and completes the
authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this password is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
Note that you don't have to actually verify any key signtature here.
If you're willing to accept the key, Paramiko will do the work of
verifying the client's signature.
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client
:param .PKey key: the key object provided by the client
:return:
``AUTH_FAILED`` if the client can't authenticate with this key;
``AUTH_SUCCESSFUL`` if it can; ``AUTH_PARTIALLY_SUCCESSFUL`` if it
can authenticate with this key but must continue with
authentication
:rtype: int
"""
return AUTH_FAILED
def check_auth_interactive(self, username, submethods):
"""
Begin an interactive authentication challenge, if supported. You
should override this method in server mode if you want to support the
``"keyboard-interactive"`` auth type, which requires you to send a
series of questions for the client to answer.
Return ``AUTH_FAILED`` if this auth method isn't supported. Otherwise,
you should return an `.InteractiveQuery` object containing the prompts
and instructions for the user. The response will be sent via a call
to `check_auth_interactive_response`.
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client
:param str submethods:
a comma-separated list of methods preferred by the client (usually
empty)
:return:
``AUTH_FAILED`` if this auth method isn't supported; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
"""
return AUTH_FAILED
def check_auth_interactive_response(self, responses):
"""
Continue or finish an interactive authentication challenge, if
supported. You should override this method in server mode if you want
to support the ``"keyboard-interactive"`` auth type.
Return ``AUTH_FAILED`` if the responses are not accepted,
``AUTH_SUCCESSFUL`` if the responses are accepted and complete
the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this set of responses is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
If you wish to continue interactive authentication with more questions,
you may return an `.InteractiveQuery` object, which should cause the
client to respond with more answers, calling this method again. This
cycle can continue indefinitely.
The default implementation always returns ``AUTH_FAILED``.
:param responses: list of `str` responses from the client
:return:
``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the interactive auth
is successful, but authentication must continue; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
"""
return AUTH_FAILED
def check_auth_gssapi_with_mic(
self, username, gss_authenticated=AUTH_FAILED, cc_file=None
):
"""
Authenticate the given user to the server if he is a valid krb5
principal.
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
:return: ``AUTH_FAILED`` if the user is not authenticated otherwise
``AUTH_SUCCESSFUL``
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a certain
platform like Linux, you should call C{krb5_kuserok()} in
your local kerberos library to make sure that the
krb5_principal has an account on the server and is allowed to
log in as a user.
:see: http://www.unix.com/man-page/all/3/krb5_kuserok/
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def check_auth_gssapi_keyex(
self, username, gss_authenticated=AUTH_FAILED, cc_file=None
):
"""
Authenticate the given user to the server if he is a valid krb5
principal and GSS-API Key Exchange was performed.
If GSS-API Key Exchange was not performed, this authentication method
won't be available.
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
:return: ``AUTH_FAILED`` if the user is not authenticated otherwise
``AUTH_SUCCESSFUL``
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss` `.kex_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a certain
platform like Linux, you should call C{krb5_kuserok()} in
your local kerberos library to make sure that the
krb5_principal has an account on the server and is allowed
to log in as a user.
:see: http://www.unix.com/man-page/all/3/krb5_kuserok/
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def enable_auth_gssapi(self):
"""
Overwrite this function in your SSH server to enable GSSAPI
authentication.
The default implementation always returns false.
:returns bool: Whether GSSAPI authentication is enabled.
:see: `.ssh_gss`
"""
UseGSSAPI = False
return UseGSSAPI
def check_port_forward_request(self, address, port):
"""
Handle a request for port forwarding. The client is asking that
connections to the given address and port be forwarded back across
this ssh connection. An address of ``"0.0.0.0"`` indicates a global
address (any address associated with this server) and a port of ``0``
indicates that no specific port is requested (usually the OS will pick
a port).
The default implementation always returns ``False``, rejecting the
port forwarding request. If the request is accepted, you should return
the port opened for listening.
:param str address: the requested address
:param int port: the requested port
:return:
the port number (`int`) that was opened for listening, or ``False``
to reject
"""
return False
def cancel_port_forward_request(self, address, port):
"""
The client would like to cancel a previous port-forwarding request.
If the given address and port is being forwarded across this ssh
connection, the port should be closed.
:param str address: the forwarded address
:param int port: the forwarded port
"""
pass
def check_global_request(self, kind, msg):
"""
Handle a global request of the given ``kind``. This method is called
in server mode and client mode, whenever the remote host makes a global
request. If there are any arguments to the request, they will be in
``msg``.
There aren't any useful global requests defined, aside from port
forwarding, so usually this type of request is an extension to the
protocol.
If the request was successful and you would like to return contextual
data to the remote host, return a tuple. Items in the tuple will be
sent back with the successful result. (Note that the items in the
tuple can only be strings, ints, or bools.)
The default implementation always returns ``False``, indicating that it
does not support any global requests.
.. note:: Port forwarding requests are handled separately, in
`check_port_forward_request`.
:param str kind: the kind of global request being made.
:param .Message msg: any extra arguments to the request.
:return:
``True`` or a `tuple` of data if the request was granted; ``False``
otherwise.
"""
return False
# ...Channel requests...
def check_channel_pty_request(
self, channel, term, width, height, pixelwidth, pixelheight, modes
):
"""
Determine if a pseudo-terminal of the given dimensions (usually
requested for shell access) can be provided on the given channel.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str term: type of terminal requested (for example, ``"vt100"``).
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return:
``True`` if the pseudo-terminal has been allocated; ``False``
otherwise.
"""
return False
def check_channel_shell_request(self, channel):
"""
Determine if a shell will be provided to the client on the given
channel. If this method returns ``True``, the channel should be
connected to the stdin/stdout of a shell (or something that acts like
a shell).
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:return:
``True`` if this channel is now hooked up to a shell; ``False`` if
a shell can't or won't be provided.
"""
return False
def check_channel_exec_request(self, channel, command):
"""
Determine if a shell command will be executed for the client. If this
method returns ``True``, the channel should be connected to the stdin,
stdout, and stderr of the shell command.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:param str command: the command to execute.
:return:
``True`` if this channel is now hooked up to the stdin, stdout, and
stderr of the executing command; ``False`` if the command will not
be executed.
.. versionadded:: 1.1
"""
return False
def check_channel_subsystem_request(self, channel, name):
"""
Determine if a requested subsystem will be provided to the client on
the given channel. If this method returns ``True``, all future I/O
through this channel will be assumed to be connected to the requested
subsystem. An example of a subsystem is ``sftp``.
The default implementation checks for a subsystem handler assigned via
`.Transport.set_subsystem_handler`.
If one has been set, the handler is invoked and this method returns
``True``. Otherwise it returns ``False``.
.. note:: Because the default implementation uses the `.Transport` to
identify valid subsystems, you probably won't need to override this
method.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str name: name of the requested subsystem.
:return:
``True`` if this channel is now hooked up to the requested
subsystem; ``False`` if that subsystem can't or won't be provided.
"""
transport = channel.get_transport()
handler_class, args, kwargs = transport._get_subsystem_handler(name)
if handler_class is None:
return False
handler = handler_class(channel, name, self, *args, **kwargs)
handler.start()
return True
def check_channel_window_change_request(
self, channel, width, height, pixelwidth, pixelheight
):
"""
Determine if the pseudo-terminal on the given channel can be resized.
This only makes sense if a pty was previously allocated on it.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return: ``True`` if the terminal was resized; ``False`` if not.
"""
return False
def check_channel_x11_request(
self,
channel,
single_connection,
auth_protocol,
auth_cookie,
screen_number,
):
"""
Determine if the client will be provided with an X11 session. If this
method returns ``True``, X11 applications should be routed through new
SSH channels, using `.Transport.open_x11_channel`.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the X11 request arrived on
:param bool single_connection:
``True`` if only a single X11 channel should be opened, else
``False``.
:param str auth_protocol: the protocol used for X11 authentication
:param str auth_cookie: the cookie used to authenticate to X11
:param int screen_number: the number of the X11 screen to connect to
:return: ``True`` if the X11 session was opened; ``False`` if not
"""
return False
def check_channel_forward_agent_request(self, channel):
"""
Determine if the client will be provided with an forward agent session.
If this method returns ``True``, the server will allow SSH Agent
forwarding.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on
:return: ``True`` if the AgentForward was loaded; ``False`` if not
If ``True`` is returned, the server should create an
:class:`AgentServerProxy` to access the agent.
"""
return False
def check_channel_direct_tcpip_request(self, chanid, origin, destination):
"""
Determine if a local port forwarding channel will be granted, and
return ``OPEN_SUCCEEDED`` or an error code. This method is
called in server mode when the client requests a channel, after
authentication is complete.
The ``chanid`` parameter is a small number that uniquely identifies the
channel within a `.Transport`. A `.Channel` object is not created
unless this method returns ``OPEN_SUCCEEDED`` -- once a
`.Channel` object is created, you can call `.Channel.get_id` to
retrieve the channel ID.
The origin and destination parameters are (ip_address, port) tuples
that correspond to both ends of the TCP connection in the forwarding
tunnel.
The return value should either be ``OPEN_SUCCEEDED`` (or
``0``) to allow the channel request, or one of the following error
codes to reject it:
- ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
:param int chanid: ID of the channel
:param tuple origin:
2-tuple containing the IP address and port of the originator
(client side)
:param tuple destination:
2-tuple containing the IP address and port of the destination
(server side)
:return: an `int` success or failure code (listed above)
"""
return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_channel_env_request(self, channel, name, value):
"""
Check whether a given environment variable can be specified for the
given channel. This method should return ``True`` if the server
is willing to set the specified environment variable. Note that
some environment variables (e.g., PATH) can be exceedingly
dangerous, so blindly allowing the client to set the environment
is almost certainly not a good idea.
The default implementation always returns ``False``.
:param channel: the `.Channel` the env request arrived on
:param str name: name
:param str value: Channel value
:returns: A boolean
"""
return False
def get_banner(self):
"""
A pre-login banner to display to the user. The message may span
multiple lines separated by crlf pairs. The language should be in
rfc3066 style, for example: en-US
The default implementation always returns ``(None, None)``.
:returns: A tuple containing the banner and language code.
.. versionadded:: 2.3
"""
return (None, None)
| ServerInterface |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 7931,
"end": 8371
} | class ____(ProjectMixin, UpdateView):
form_class = UpdateProjectForm
success_message = _("Project settings updated")
template_name = "projects/project_edit.html"
def get_success_url(self):
return reverse("projects_detail", args=[self.object.slug])
def get_form(self, data=None, files=None, **kwargs):
kwargs["user"] = self.request.user
return super().get_form(data, files, **kwargs)
| ProjectUpdate |
python | Lightning-AI__lightning | src/lightning/pytorch/profilers/xla.py | {
"start": 793,
"end": 3004
} | class ____(Profiler):
STEP_FUNCTIONS = {"validation_step", "test_step", "predict_step"}
RECORD_FUNCTIONS = {
"training_step",
"backward",
"validation_step",
"test_step",
"predict_step",
}
def __init__(self, port: int = 9012) -> None:
"""XLA Profiler will help you debug and optimize training workload performance for your models using Cloud TPU
performance tools.
Args:
port: the port to start the profiler server on. An exception is
raised if the provided port is invalid or busy.
"""
if not _XLA_AVAILABLE:
raise ModuleNotFoundError(str(_XLA_AVAILABLE))
super().__init__(dirpath=None, filename=None)
self.port = port
self._recording_map: dict = {}
self._step_recoding_map: dict = {}
self._start_trace: bool = False
@override
def start(self, action_name: str) -> None:
import torch_xla.debug.profiler as xp
# The action name is formatted as '[TYPE]{class name}.{hook name}'
# Example: [LightningModule]BoringModel.training_step
if action_name.split(".")[-1] in self.RECORD_FUNCTIONS:
if not self._start_trace:
self.server = xp.start_server(self.port)
self._start_trace = True
if action_name.split(".")[-1] in self.STEP_FUNCTIONS:
step = self._get_step_num(action_name)
recording = xp.StepTrace(action_name, step_num=step)
else:
recording = xp.Trace(action_name)
recording.__enter__()
self._recording_map[action_name] = recording
@override
def stop(self, action_name: str) -> None:
if action_name in self._recording_map:
self._recording_map[action_name].__exit__(None, None, None)
del self._recording_map[action_name]
def _get_step_num(self, action_name: str) -> int:
if action_name not in self._step_recoding_map:
self._step_recoding_map[action_name] = 1
else:
self._step_recoding_map[action_name] += 1
return self._step_recoding_map[action_name]
| XLAProfiler |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_dag_processor_command.py | {
"start": 1055,
"end": 2815
} | class ____:
"""
Tests the CLI interface and that it correctly calls the DagProcessor
"""
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
@conf_vars({("core", "load_examples"): "False"})
@mock.patch("airflow.cli.commands.dag_processor_command.DagProcessorJobRunner")
def test_start_job(self, mock_runner):
"""Ensure that DagProcessorJobRunner is started"""
mock_runner.return_value.job_type = "DagProcessorJob"
args = self.parser.parse_args(["dag-processor"])
dag_processor_command.dag_processor(args)
mock_runner.return_value._execute.assert_called()
@conf_vars({("core", "load_examples"): "False"})
@mock.patch("airflow.cli.commands.dag_processor_command.DagProcessorJobRunner")
def test_bundle_names_passed(self, mock_runner, configure_testing_dag_bundle):
mock_runner.return_value.job_type = "DagProcessorJob"
args = self.parser.parse_args(["dag-processor", "--bundle-name", "testing"])
with configure_testing_dag_bundle(os.devnull):
dag_processor_command.dag_processor(args)
assert mock_runner.call_args.kwargs["processor"].bundle_names_to_parse == ["testing"]
@mock.patch("airflow.cli.hot_reload.run_with_reloader")
def test_dag_processor_with_dev_flag(self, mock_reloader):
"""Ensure that dag-processor with --dev flag uses hot-reload"""
args = self.parser.parse_args(["dag-processor", "--dev"])
dag_processor_command.dag_processor(args)
# Verify that run_with_reloader was called
mock_reloader.assert_called_once()
# The callback function should be callable
assert callable(mock_reloader.call_args[0][0])
| TestDagProcessorCommand |
python | matplotlib__matplotlib | lib/matplotlib/colorbar.py | {
"start": 5887,
"end": 7612
} | class ____:
"""
Shrink the Axes if there are triangular or rectangular extends.
"""
def __init__(self, cbar):
self._cbar = cbar
self._orig_locator = cbar.ax._axes_locator
def __call__(self, ax, renderer):
if self._orig_locator is not None:
pos = self._orig_locator(ax, renderer)
else:
pos = ax.get_position(original=True)
if self._cbar.extend == 'neither':
return pos
y, extendlen = self._cbar._proportional_y()
if not self._cbar._extend_lower():
extendlen[0] = 0
if not self._cbar._extend_upper():
extendlen[1] = 0
len = sum(extendlen) + 1
shrink = 1 / len
offset = extendlen[0] / len
# we need to reset the aspect ratio of the axes to account
# of the extends...
if hasattr(ax, '_colorbar_info'):
aspect = ax._colorbar_info['aspect']
else:
aspect = False
# now shrink and/or offset to take into account the
# extend tri/rectangles.
if self._cbar.orientation == 'vertical':
if aspect:
self._cbar.ax.set_box_aspect(aspect*shrink)
pos = pos.shrunk(1, shrink).translated(0, offset * pos.height)
else:
if aspect:
self._cbar.ax.set_box_aspect(1/(aspect * shrink))
pos = pos.shrunk(shrink, 1).translated(offset * pos.width, 0)
return pos
def get_subplotspec(self):
# make tight_layout happy..
return (
self._cbar.ax.get_subplotspec()
or getattr(self._orig_locator, "get_subplotspec", lambda: None)())
@_docstring.interpd
| _ColorbarAxesLocator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-outbrain-amplify/source_outbrain_amplify/auth.py | {
"start": 223,
"end": 1313
} | class ____(TokenAuthenticator):
def __init__(self, config, url_base):
self.config = config
self.url_auth = url_base + "login"
self.token = ""
def generate_cache_token(
self,
):
r = requests.get(
self.url_auth,
auth=HTTPBasicAuth(self.config.get("credentials").get("username"), self.config.get("credentials").get("password")),
)
if r.status_code == 200:
self.token = r.json().get("OB-TOKEN-V1")
else:
raise ConnectionError(r.json().get("message"))
def get_auth_header(self) -> Mapping[dict, Any]:
if self.config.get("credentials").get("type") == "access_token":
self.token = self.config.get("credentials").get("access_token")
return {"OB-TOKEN-V1": "{}".format(self.token)}
else:
if self.token:
return {"OB-TOKEN-V1": "{}".format(self.token)}
else:
self.generate_cache_token()
return {"OB-TOKEN-V1": "{}".format(self.token)}
| OutbrainAmplifyAuthenticator |
python | getsentry__responses | responses/registries.py | {
"start": 277,
"end": 2588
} | class ____:
def __init__(self) -> None:
self._responses: List["BaseResponse"] = []
@property
def registered(self) -> List["BaseResponse"]:
return self._responses
def reset(self) -> None:
self._responses = []
def find(
self, request: "PreparedRequest"
) -> Tuple[Optional["BaseResponse"], List[str]]:
found = None
found_match = None
match_failed_reasons = []
for i, response in enumerate(self.registered):
match_result, reason = response.matches(request)
if match_result:
if found is None:
found = i
found_match = response
else:
if self.registered[found].call_count > 0:
# that assumes that some responses were added between calls
self.registered.pop(found)
found_match = response
break
# Multiple matches found. Remove & return the first response.
return self.registered.pop(found), match_failed_reasons
else:
match_failed_reasons.append(reason)
return found_match, match_failed_reasons
def add(self, response: "BaseResponse") -> "BaseResponse":
if any(response is resp for resp in self.registered):
# if user adds multiple responses that reference the same instance.
# do a comparison by memory allocation address.
# see https://github.com/getsentry/responses/issues/479
response = copy.deepcopy(response)
self.registered.append(response)
return response
def remove(self, response: "BaseResponse") -> List["BaseResponse"]:
removed_responses = []
while response in self.registered:
self.registered.remove(response)
removed_responses.append(response)
return removed_responses
def replace(self, response: "BaseResponse") -> "BaseResponse":
try:
index = self.registered.index(response)
except ValueError:
raise ValueError(f"Response is not registered for URL {response.url}")
self.registered[index] = response
return response
| FirstMatchRegistry |
python | joke2k__faker | faker/providers/phone_number/tr_TR/__init__.py | {
"start": 49,
"end": 349
} | class ____(PhoneNumberProvider):
formats = (
"+90(###)#######",
"+90 (###) #######",
"0### ### ## ##",
"0##########",
"0###-### ####",
"(###)### ####",
"### # ###",
"+90(###)###-####x###",
"+90(###)###-####x####",
)
| Provider |
python | walkccc__LeetCode | solutions/3075. Maximize Happiness of Selected Children/3075.py | {
"start": 0,
"end": 263
} | class ____:
def maximumHappinessSum(self, happiness: list[int], k: int) -> int:
ans = 0
decremented = 0
happiness.sort(reverse=True)
for i in range(k):
ans += max(0, happiness[i] - decremented)
decremented += 1
return ans
| Solution |
python | django__django | tests/tasks/test_custom_backend.py | {
"start": 1023,
"end": 2529
} | class ____(SimpleTestCase):
def test_using_correct_backend(self):
self.assertEqual(default_task_backend, task_backends["default"])
self.assertIsInstance(task_backends["default"], CustomBackend)
self.assertEqual(default_task_backend.alias, "default")
self.assertEqual(default_task_backend.options, {"prefix": "PREFIX: "})
@mock.patch.multiple(CustomBackend, supports_async_task=False)
def test_enqueue_async_task_on_non_async_backend(self):
with self.assertRaisesMessage(
InvalidTask, "Backend does not support async Tasks."
):
default_task_backend.validate_task(test_tasks.noop_task_async)
def test_backend_does_not_support_priority(self):
with self.assertRaisesMessage(
InvalidTask, "Backend does not support setting priority of tasks."
):
test_tasks.noop_task.using(priority=10)
def test_options(self):
with self.assertLogs(__name__, level="INFO") as captured_logs:
test_tasks.noop_task.enqueue()
self.assertEqual(len(captured_logs.output), 1)
self.assertIn("PREFIX: Task enqueued", captured_logs.output[0])
def test_no_enqueue(self):
with self.assertRaisesMessage(
TypeError,
"Can't instantiate abstract class CustomBackendNoEnqueue "
"without an implementation for abstract method 'enqueue'",
):
test_tasks.noop_task.using(backend="no_enqueue")
| CustomBackendTestCase |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 75079,
"end": 75938
} | class ____(PerceiverAbstractDecoder):
"""
Baseline projection decoder (no cross-attention).
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config):
super().__init__()
self.classifier = nn.Linear(config.d_latents, config.num_labels)
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
return None
def forward(
self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None
) -> torch.FloatTensor:
# (batch_size, num_latents, d_latents) -> (batch_size, d_latents)
z = torch.mean(z, dim=1)
# (batch_size, d_latents) -> (batch_size, config.num_labels)
logits = self.classifier(z)
return logits
| PerceiverProjectionDecoder |
python | jazzband__django-oauth-toolkit | tests/test_implicit.py | {
"start": 1395,
"end": 9017
} | class ____(BaseTest):
def test_pre_auth_valid_client_default_scopes(self):
"""
Test response for a valid client_id with response_type: token and default_scopes
"""
self.client.login(username="test_user", password="123456")
query_data = {
"client_id": self.application.client_id,
"response_type": "token",
"state": "random_state_string",
"redirect_uri": "http://example.org",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 200)
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form["scope"].value(), "read")
def test_pre_auth_valid_client(self):
"""
Test response for a valid client_id with response_type: token
"""
self.client.login(username="test_user", password="123456")
query_data = {
"client_id": self.application.client_id,
"response_type": "token",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "http://example.org")
self.assertEqual(form["state"].value(), "random_state_string")
self.assertEqual(form["scope"].value(), "read write")
self.assertEqual(form["client_id"].value(), self.application.client_id)
def test_pre_auth_invalid_client(self):
"""
Test error for an invalid client_id with response_type: token
"""
self.client.login(username="test_user", password="123456")
query_data = {
"client_id": "fakeclientid",
"response_type": "token",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 400)
def test_pre_auth_default_redirect(self):
"""
Test for default redirect uri if omitted from query string with response_type: token
"""
self.client.login(username="test_user", password="123456")
self.application.redirect_uris = "http://localhost"
self.application.save()
query_data = {
"client_id": self.application.client_id,
"response_type": "token",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 200)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "http://localhost")
def test_pre_auth_forbibben_redirect(self):
"""
Test error when passing a forbidden redirect_uri in query string with response_type: token
"""
self.client.login(username="test_user", password="123456")
query_data = {
"client_id": self.application.client_id,
"response_type": "token",
"redirect_uri": "http://forbidden.it",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 400)
def test_post_auth_allow(self):
"""
Test authorization code is given for an allowed request with response_type: token
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "token",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org#", response["Location"])
self.assertIn("access_token=", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
def test_skip_authorization_completely(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_data = {
"client_id": self.application.client_id,
"response_type": "token",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org#", response["Location"])
self.assertIn("access_token=", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
def test_token_post_auth_deny(self):
"""
Test error when resource owner deny access
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"response_type": "token",
"allow": False,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("error=access_denied", response["Location"])
def test_implicit_redirection_uri_with_querystring(self):
"""
Tests that a redirection uri with query string is allowed
and query string is retained on redirection.
See https://rfc-editor.org/rfc/rfc6749.html#section-3.1.2
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.com?foo=bar",
"response_type": "token",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.com?foo=bar", response["Location"])
self.assertIn("access_token=", response["Location"])
def test_implicit_fails_when_redirect_uri_path_is_invalid(self):
"""
Tests that a redirection uri is matched using scheme + netloc + path
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.com/a?foo=bar",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 400)
@pytest.mark.oauth2_settings(presets.DEFAULT_SCOPES_RO)
| TestImplicitAuthorizationCodeView |
python | huggingface__transformers | src/transformers/models/sew/modeling_sew.py | {
"start": 4656,
"end": 6375
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
stride=config.squeeze_factor,
)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = weight_norm(self.conv, name="weight", dim=2)
if hasattr(self.conv, "parametrizations"):
weight_g = self.conv.parametrizations.weight.original0
weight_v = self.conv.parametrizations.weight.original1
else:
weight_g = self.conv.weight_g
weight_v = self.conv.weight_v
deepspeed.zero.register_external_parameter(self, weight_v)
deepspeed.zero.register_external_parameter(self, weight_g)
else:
self.conv = weight_norm(self.conv, name="weight", dim=2)
self.padding = SEWSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
| SEWPositionalConvEmbedding |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/base_env.py | {
"start": 12198,
"end": 12769
} | class ____(_ActionTupleBase):
"""
An object whose fields correspond to actions of different types.
Continuous and discrete actions are numpy arrays of type float32 and
int32, respectively and are type checked on construction.
Dimensions are of (n_agents, continuous_size) and (n_agents, discrete_size),
respectively. Note, this also holds when continuous or discrete size is
zero.
"""
@property
def discrete_dtype(self) -> np.dtype:
"""
The dtype of a discrete action.
"""
return np.int32
| ActionTuple |
python | joke2k__faker | faker/providers/geo/__init__.py | {
"start": 163,
"end": 71507
} | class ____(BaseProvider):
"""
land_coords data extracted from geonames.org, under the Creative Commons Attribution 3.0 License.
Coordinates are in decimal format for mapping purposes.
Country code is in Alpha 2 format (https://www.nationsonline.org/oneworld/country_code_list.htm).
Timezones are canonical (https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
"""
land_coords: Tuple[PlaceType, ...] = (
("42.50729", "1.53414", "les Escaldes", "AD", "Europe/Andorra"),
("36.21544", "65.93249", "Sar-e Pul", "AF", "Asia/Kabul"),
("40.49748", "44.7662", "Hrazdan", "AM", "Asia/Yerevan"),
("-11.78333", "19.91667", "Luena", "AO", "Africa/Luanda"),
("-37.32167", "-59.13316", "Tandil", "AR", "America/Argentina/Buenos_Aires"),
(
"-34.74785",
"-58.70072",
"Pontevedra",
"AR",
"America/Argentina/Buenos_Aires",
),
("-34.64966", "-58.38341", "Barracas", "AR", "America/Argentina/Buenos_Aires"),
("-54.8", "-68.3", "Ushuaia", "AR", "America/Argentina/Ushuaia"),
("-31.25033", "-61.4867", "Rafaela", "AR", "America/Argentina/Cordoba"),
("-31.4488", "-60.93173", "Esperanza", "AR", "America/Argentina/Cordoba"),
("-34.64167", "-60.47389", "Chacabuco", "AR", "America/Argentina/Buenos_Aires"),
("-27.4338", "-65.61427", "Aguilares", "AR", "America/Argentina/Tucuman"),
("47.05", "15.46667", "Sankt Peter", "AT", "Europe/Vienna"),
("48.25", "16.4", "Floridsdorf", "AT", "Europe/Vienna"),
("-31.95224", "115.8614", "Perth", "AU", "Australia/Perth"),
("-37.9", "145.18333", "Wheelers Hill", "AU", "Australia/Melbourne"),
("-33.88096", "151.07986", "Strathfield", "AU", "Australia/Sydney"),
("-34.88422", "150.60036", "Nowra", "AU", "Australia/Sydney"),
("-25.54073", "152.70493", "Maryborough", "AU", "Australia/Brisbane"),
("-34.28853", "146.05093", "Griffith", "AU", "Australia/Sydney"),
("-33.79176", "151.08057", "Eastwood", "AU", "Australia/Sydney"),
("-37.88333", "145.06667", "Carnegie", "AU", "Australia/Melbourne"),
("-33.75881", "150.99292", "Baulkham Hills", "AU", "Australia/Sydney"),
("-27.50578", "153.10236", "Carindale", "AU", "Australia/Brisbane"),
("-32.05251", "115.88782", "Willetton", "AU", "Australia/Perth"),
("-38.16604", "145.13643", "Frankston South", "AU", "Australia/Melbourne"),
("38.45598", "48.87498", "Astara", "AZ", "Asia/Baku"),
("41.09246", "45.36561", "Qazax", "AZ", "Asia/Baku"),
("44.75874", "19.21437", "Bijeljina", "BA", "Europe/Sarajevo"),
("23.9028", "89.11943", "Kushtia", "BD", "Asia/Dhaka"),
("22.83957", "91.84128", "Manikchari", "BD", "Asia/Dhaka"),
("50.8", "3.16667", "Wevelgem", "BE", "Europe/Brussels"),
("51.12794", "4.21372", "Temse", "BE", "Europe/Brussels"),
("50.71229", "4.52529", "Rixensart", "BE", "Europe/Brussels"),
("50.74497", "3.20639", "Mouscron", "BE", "Europe/Brussels"),
("51.24197", "4.82313", "Lille", "BE", "Europe/Brussels"),
("51.03427", "5.37429", "Houthalen", "BE", "Europe/Brussels"),
("50.56149", "4.69889", "Gembloux", "BE", "Europe/Brussels"),
("50.88506", "4.07601", "Denderleeuw", "BE", "Europe/Brussels"),
("51.21187", "4.25633", "Beveren", "BE", "Europe/Brussels"),
("41.57439", "24.71204", "Smolyan", "BG", "Europe/Sofia"),
("43.4125", "23.225", "Montana", "BG", "Europe/Sofia"),
("42.7", "27.25", "Aytos", "BG", "Europe/Sofia"),
("8.88649", "2.59753", "Tchaourou", "BJ", "Africa/Porto-Novo"),
("-21.44345", "-65.71875", "Tupiza", "BO", "America/La_Paz"),
("-0.71667", "-48.52333", "Soure", "BR", "America/Belem"),
("-8.05389", "-34.88111", "Recife", "BR", "America/Recife"),
("-4.42472", "-41.45861", "Pedro II", "BR", "America/Fortaleza"),
("-3.14306", "-58.44417", "Itacoatiara", "BR", "America/Manaus"),
("-4.16694", "-40.7475", "Guaraciaba do Norte", "BR", "America/Fortaleza"),
("-8.66667", "-35.71667", "Catende", "BR", "America/Recife"),
("-8.28333", "-35.03333", "Cabo", "BR", "America/Recife"),
("-4.24444", "-42.29444", "Barras", "BR", "America/Fortaleza"),
("-3.20333", "-52.20639", "Altamira", "BR", "America/Santarem"),
("-20.87306", "-48.29694", "Viradouro", "BR", "America/Sao_Paulo"),
("-22.97056", "-46.99583", "Valinhos", "BR", "America/Sao_Paulo"),
("-10.95817", "-38.79084", "Tucano", "BR", "America/Bahia"),
("-28.81833", "-52.51028", "Soledade", "BR", "America/Sao_Paulo"),
("-23.44361", "-51.87389", "Sarandi", "BR", "America/Sao_Paulo"),
("-22.45667", "-47.53028", "Santa Gertrudes", "BR", "America/Sao_Paulo"),
("-11.48472", "-37.93278", "Rio Real", "BR", "America/Bahia"),
("-19.32556", "-41.25528", "Resplendor", "BR", "America/Sao_Paulo"),
("-26.22861", "-52.67056", "Pato Branco", "BR", "America/Sao_Paulo"),
("-25.42944", "-50.00639", "Palmeira", "BR", "America/Sao_Paulo"),
("-12.91667", "-39.25", "Muritiba", "BR", "America/Bahia"),
("-21.41222", "-42.19667", "Miracema", "BR", "America/Sao_Paulo"),
("-28.44917", "-52.2", "Marau", "BR", "America/Sao_Paulo"),
("-22.92306", "-53.13722", "Loanda", "BR", "America/Sao_Paulo"),
("-10.91722", "-37.65", "Lagarto", "BR", "America/Maceio"),
("-19.72806", "-50.19556", "Iturama", "BR", "America/Sao_Paulo"),
("-21.205", "-41.88778", "Itaperuna", "BR", "America/Sao_Paulo"),
("-20.25333", "-43.80139", "Itabirito", "BR", "America/Sao_Paulo"),
("-28.24", "-48.67028", "Imbituba", "BR", "America/Sao_Paulo"),
("-22.53722", "-42.98194", "Guapimirim", "BR", "America/Sao_Paulo"),
("-19.7625", "-44.31389", "Esmeraldas", "BR", "America/Sao_Paulo"),
("-25.42778", "-49.27306", "Curitiba", "BR", "America/Sao_Paulo"),
("-14.66463", "-52.35558", "Nova Xavantina", "BR", "America/Cuiaba"),
("-29.2975", "-51.50361", "Carlos Barbosa", "BR", "America/Sao_Paulo"),
("-15.675", "-38.94722", "Canavieiras", "BR", "America/Bahia"),
("-17.74431", "-48.62789", "Caldas Novas", "BR", "America/Sao_Paulo"),
("-23.7975", "-48.59278", "Buri", "BR", "America/Sao_Paulo"),
("-10.90889", "-37.03861", "Barra dos Coqueiros", "BR", "America/Maceio"),
("-22.57306", "-47.1725", "Artur Nogueira", "BR", "America/Sao_Paulo"),
("-10.91111", "-37.07167", "Aracaju", "BR", "America/Maceio"),
("-21.42917", "-45.94722", "Alfenas", "BR", "America/Sao_Paulo"),
("-8.76194", "-63.90389", "Porto Velho", "BR", "America/Porto_Velho"),
("-21.44236", "27.46153", "Tonota", "BW", "Africa/Gaborone"),
("55.1904", "30.2049", "Vitebsk", "BY", "Europe/Minsk"),
("53.5942", "25.8191", "Novogrudok", "BY", "Europe/Minsk"),
("52.4089", "31.3237", "Dobrush", "BY", "Europe/Minsk"),
("45.43341", "-73.86586", "Beaconsfield", "CA", "America/Toronto"),
("46.23899", "-63.13414", "Charlottetown", "CA", "America/Halifax"),
("45.4473", "-73.75335", "Dorval", "CA", "America/Toronto"),
("49.88307", "-119.48568", "Kelowna", "CA", "America/Vancouver"),
("43.86682", "-79.2663", "Markham", "CA", "America/Toronto"),
("42.8334", "-80.38297", "Norfolk County", "CA", "America/Toronto"),
("45.44868", "-73.81669", "Pointe-Claire", "CA", "America/Toronto"),
("45.40008", "-73.58248", "Sainte-Catherine", "CA", "America/Toronto"),
("53.51684", "-113.3187", "Sherwood Park", "CA", "America/Edmonton"),
("50.26729", "-119.27337", "Vernon", "CA", "America/Vancouver"),
("46.1351", "-60.1831", "Sydney", "CA", "America/Glace_Bay"),
("0.76755", "24.43973", "Yangambi", "CD", "Africa/Lubumbashi"),
("-8.73508", "24.99798", "Kamina", "CD", "Africa/Lubumbashi"),
("0.49113", "29.47306", "Beni", "CD", "Africa/Lubumbashi"),
("-4.5833", "15.16554", "Kasangulu", "CD", "Africa/Kinshasa"),
("4.94273", "15.87735", "Carnot", "CF", "Africa/Bangui"),
("-4.26613", "15.28318", "Brazzaville", "CG", "Africa/Brazzaville"),
("46.18396", "6.10237", "Onex", "CH", "Europe/Zurich"),
("47.30997", "8.52462", "Adliswil", "CH", "Europe/Zurich"),
("5.84752", "-5.682", "Lakota", "CI", "Africa/Abidjan"),
("5.27247", "-3.59625", "Bonoua", "CI", "Africa/Abidjan"),
("-33.59217", "-70.6996", "San Bernardo", "CL", "America/Santiago"),
("-30.60106", "-71.19901", "Ovalle", "CL", "America/Santiago"),
("-32.45242", "-71.23106", "La Ligua", "CL", "America/Santiago"),
("-36.9256", "-73.02841", "Chiguayante", "CL", "America/Santiago"),
("4.96667", "10.7", "Tonga", "CM", "Africa/Douala"),
("3.51667", "11.5", "Mbalmayo", "CM", "Africa/Douala"),
("4.2475", "9.00472", "Idenao", "CM", "Africa/Douala"),
("46.51872", "86.00214", "Hoxtolgay", "CN", "Asia/Urumqi"),
("36.81667", "117.81667", "Zhoucun", "CN", "Asia/Shanghai"),
("34.86472", "117.55417", "Zaozhuang", "CN", "Asia/Shanghai"),
("23.73333", "114.68333", "Heyuan", "CN", "Asia/Shanghai"),
("34.65918", "109.22921", "Yanliang", "CN", "Asia/Shanghai"),
("38.40917", "112.73333", "Xinzhou", "CN", "Asia/Shanghai"),
("33.78333", "114.51667", "Wacheng", "CN", "Asia/Shanghai"),
("27.85", "112.9", "Xiangtan", "CN", "Asia/Shanghai"),
("37.19723", "122.05228", "Tianfu", "CN", "Asia/Shanghai"),
("34.85", "117.33333", "Taozhuang", "CN", "Asia/Shanghai"),
("35.64889", "117.27583", "Sishui", "CN", "Asia/Shanghai"),
("27.34089", "117.4831", "Shaowu", "CN", "Asia/Shanghai"),
("37.30553", "120.82747", "Zhuangyuan", "CN", "Asia/Shanghai"),
("35.50056", "117.63083", "Pingyi", "CN", "Asia/Shanghai"),
("27.92333", "118.53333", "Pucheng", "CN", "Asia/Shanghai"),
("24.28859", "116.11768", "Meizhou", "CN", "Asia/Shanghai"),
("37.65181", "120.33063", "Longgang", "CN", "Asia/Shanghai"),
("23.29549", "113.82465", "Licheng", "CN", "Asia/Shanghai"),
("36.19278", "117.65694", "Laiwu", "CN", "Asia/Shanghai"),
("30.35028", "112.19028", "Jingzhou", "CN", "Asia/Shanghai"),
("32.50611", "120.14278", "Jiangyan", "CN", "Asia/Shanghai"),
("30.24706", "115.04814", "Huangshi", "CN", "Asia/Shanghai"),
("37.73222", "115.70111", "Hengshui", "CN", "Asia/Shanghai"),
("28.88162", "120.03308", "Guli", "CN", "Asia/Shanghai"),
("23.02677", "113.13148", "Foshan", "CN", "Asia/Shanghai"),
("35.85", "117.7", "Dongdu", "CN", "Asia/Shanghai"),
("32.54278", "111.50861", "Danjiangkou", "CN", "Asia/Shanghai"),
("35.20889", "111.73861", "Changzhi", "CN", "Asia/Shanghai"),
("34.56861", "105.89333", "Beidao", "CN", "Asia/Shanghai"),
("29.98869", "122.20488", "Zhoushan", "CN", "Asia/Shanghai"),
("40.66482", "122.22833", "Yingkou", "CN", "Asia/Shanghai"),
("46.08333", "122.08333", "Ulanhot", "CN", "Asia/Shanghai"),
("45.35", "126.28333", "Shuangcheng", "CN", "Asia/Shanghai"),
("41.09822", "120.74792", "Nanpiao", "CN", "Asia/Shanghai"),
("41.27194", "123.17306", "Liaoyang", "CN", "Asia/Shanghai"),
("41.94175", "123.50266", "Hushitai", "CN", "Asia/Shanghai"),
("40.85158", "122.74754", "Haicheng", "CN", "Asia/Shanghai"),
("42.64031", "125.51176", "Dongfeng", "CN", "Asia/Shanghai"),
("45.75279", "130.57211", "Boli", "CN", "Asia/Shanghai"),
("31.64615", "120.74221", "Changshu City", "CN", "Asia/Shanghai"),
("7.83389", "-72.47417", "Villa del Rosario", "CO", "America/Bogota"),
("6.46838", "-73.26022", "Socorro", "CO", "America/Bogota"),
("8.79577", "-75.69947", "San Carlos", "CO", "America/Bogota"),
("10.98778", "-74.95472", "Puerto Colombia", "CO", "America/Bogota"),
("4.73245", "-74.26419", "Madrid", "CO", "America/Bogota"),
("5.20856", "-74.73584", "Honda", "CO", "America/Bogota"),
("10.15031", "-73.9614", "El Copey", "CO", "America/Bogota"),
("3.8801", "-77.03116", "Buenaventura", "CO", "America/Bogota"),
("5.6561", "-75.87877", "Andes", "CO", "America/Bogota"),
("9.92787", "-84.13722", "San Rafael", "CR", "America/Costa_Rica"),
("10.63504", "-85.43772", "Liberia", "CR", "America/Costa_Rica"),
("23.15678", "-81.24441", "Varadero", "CU", "America/Havana"),
("20.14298", "-77.43532", "Media Luna", "CU", "America/Havana"),
("23.04419", "-82.00919", "Jaruco", "CU", "America/Havana"),
("22.98212", "-80.58556", "Corralillo", "CU", "America/Havana"),
("23.0072", "-82.4017", "Boyeros", "CU", "America/Havana"),
("50.50301", "13.63617", "Most", "CZ", "Europe/Prague"),
("50.23271", "12.87117", "Karlovy Vary", "CZ", "Europe/Prague"),
("50.073658", "14.418540", "Praha", "CZ", "Europe/Prague"),
("49.144482", "15.006139", "Jindřichův Hradec", "CZ", "Europe/Prague"),
("48.975658", "14.480255", "České Budějovice", "CZ", "Europe/Prague"),
("50.511002", "14.150558", "Terezín", "CZ", "Europe/Prague"),
("49.183239", "15.454273", "Telč", "CZ", "Europe/Prague"),
("49.952431", "15.268654", "Kutná Hora", "CZ", "Europe/Prague"),
("49.593777", "17.250879", "Olomouc", "CZ", "Europe/Prague"),
("49.738430", "13.373637", "Plzeň", "CZ", "Europe/Prague"),
("48.812737", "14.317466", "Český Krumlov", "CZ", "Europe/Prague"),
("49.195061", "16.606836", "Brno", "CZ", "Europe/Prague"),
("50.598427", "13.610242", "Litvínov", "CZ", "Europe/Prague"),
("49.820923", "18.262524", "Ostrava", "CZ", "Europe/Prague"),
("51.04962", "12.1369", "Zeitz", "DE", "Europe/Berlin"),
("52.59319", "13.32127", "Wittenau", "DE", "Europe/Berlin"),
("50.82709", "6.9747", "Wesseling", "DE", "Europe/Berlin"),
("50.9803", "11.32903", "Weimar", "DE", "Europe/Berlin"),
("52.86147", "9.5926", "Walsrode", "DE", "Europe/Berlin"),
("51.88333", "8.51667", "Verl", "DE", "Europe/Berlin"),
("48.07667", "8.64409", "Trossingen", "DE", "Europe/Berlin"),
("48.78232", "9.17702", "Stuttgart", "DE", "Europe/Berlin"),
("53.59337", "9.47629", "Stade", "DE", "Europe/Berlin"),
("50.80019", "7.20769", "Siegburg", "DE", "Europe/Berlin"),
("51.21667", "6.26667", "Schwalmtal", "DE", "Europe/Berlin"),
("54.52156", "9.5586", "Schleswig", "DE", "Europe/Berlin"),
("50.72043", "11.34046", "Rudolstadt", "DE", "Europe/Berlin"),
("48.49144", "9.20427", "Reutlingen", "DE", "Europe/Berlin"),
("51.20219", "7.36027", "Radevormwald", "DE", "Europe/Berlin"),
("48.46458", "9.22796", "Pfullingen", "DE", "Europe/Berlin"),
("51.30001", "13.10984", "Oschatz", "DE", "Europe/Berlin"),
("51.47805", "6.8625", "Oberhausen", "DE", "Europe/Berlin"),
("50.23805", "8.86704", "Nidderau", "DE", "Europe/Berlin"),
("48.73218", "11.18709", "Neuburg an der Donau", "DE", "Europe/Berlin"),
("47.98372", "10.18527", "Memmingen", "DE", "Europe/Berlin"),
("50.80904", "8.77069", "Marburg an der Lahn", "DE", "Europe/Berlin"),
("49.5099", "6.74549", "Losheim", "DE", "Europe/Berlin"),
("48.52961", "12.16179", "Landshut", "DE", "Europe/Berlin"),
("51.19139", "6.51352", "Korschenbroich", "DE", "Europe/Berlin"),
("52.2", "8.63333", "Kirchlengern", "DE", "Europe/Berlin"),
("50.23019", "8.77155", "Karben", "DE", "Europe/Berlin"),
("50.09019", "8.4493", "Hofheim am Taunus", "DE", "Europe/Berlin"),
("52.61131", "13.31783", "Hermsdorf", "DE", "Europe/Berlin"),
("48.35149", "8.96317", "Hechingen", "DE", "Europe/Berlin"),
("53.63333", "9.85", "Halstenbek", "DE", "Europe/Berlin"),
("52.21099", "7.02238", "Gronau", "DE", "Europe/Berlin"),
("52.47774", "10.5511", "Gifhorn", "DE", "Europe/Berlin"),
("48.06919", "11.37703", "Gauting", "DE", "Europe/Berlin"),
("48.35693", "10.98461", "Friedberg", "DE", "Europe/Berlin"),
("51.168", "7.973", "Finnentrop", "DE", "Europe/Berlin"),
("49.13645", "8.91229", "Eppingen", "DE", "Europe/Berlin"),
("48.28259", "9.72749", "Ehingen", "DE", "Europe/Berlin"),
("52.4581", "13.28702", "Dahlem", "DE", "Europe/Berlin"),
("51.08468", "7.11393", "Burscheid", "DE", "Europe/Berlin"),
("49.03685", "8.70745", "Bretten", "DE", "Europe/Berlin"),
("49.68369", "8.61839", "Bensheim", "DE", "Europe/Berlin"),
("53.94313", "10.30215", "Bad Segeberg", "DE", "Europe/Berlin"),
("50.64336", "7.2278", "Bad Honnef", "DE", "Europe/Berlin"),
("49.97704", "9.15214", "Aschaffenburg", "DE", "Europe/Berlin"),
("48.21644", "9.02596", "Albstadt", "DE", "Europe/Berlin"),
("52.53048", "13.29371", "Charlottenburg-Nord", "DE", "Europe/Berlin"),
("53.6052", "10.03988", "Barmbek-Nord", "DE", "Europe/Berlin"),
("11.15583", "42.7125", "'Ali Sabieh", "DJ", "Africa/Djibouti"),
("55.67938", "12.53463", "Frederiksberg", "DK", "Europe/Copenhagen"),
(
"18.20854",
"-71.10077",
"Santa Cruz de Barahona",
"DO",
"America/Santo_Domingo",
),
("36.76639", "3.47717", "Boumerdas", "DZ", "Africa/Algiers"),
("36.72544", "3.55665", "Thenia", "DZ", "Africa/Algiers"),
("34.15429", "3.50309", "Messaad", "DZ", "Africa/Algiers"),
("35.21222", "2.31889", "Ksar Chellala", "DZ", "Africa/Algiers"),
("35.06544", "1.04945", "Frenda", "DZ", "Africa/Algiers"),
("36.06386", "4.62744", "El Achir", "DZ", "Africa/Algiers"),
("36.76775", "2.95924", "Cheraga", "DZ", "Africa/Algiers"),
("36.27462", "4.85668", "Bordj Zemoura", "DZ", "Africa/Algiers"),
("36.61954", "4.08282", "Beni Douala", "DZ", "Africa/Algiers"),
("-2.13404", "-79.59415", "Milagro", "EC", "America/Guayaquil"),
("-2.90055", "-79.00453", "Cuenca", "EC", "America/Guayaquil"),
("59.37722", "28.19028", "Narva", "EE", "Europe/Tallinn"),
("26.67319", "31.4976", "Juhaynah", "EG", "Africa/Cairo"),
("31.20176", "29.91582", "Alexandria", "EG", "Africa/Cairo"),
("39.96348", "-4.83076", "Talavera de la Reina", "ES", "Europe/Madrid"),
("37.35813", "-6.03731", "San Juan de Aznalfarache", "ES", "Europe/Madrid"),
("38.68712", "-4.10734", "Puertollano", "ES", "Europe/Madrid"),
("38.38479", "-0.76773", "Novelda", "ES", "Europe/Madrid"),
("27.76056", "-15.58602", "Maspalomas", "ES", "Atlantic/Canary"),
("38.47917", "-1.325", "Jumilla", "ES", "Europe/Madrid"),
("38.96667", "-0.18333", "Gandia", "ES", "Europe/Madrid"),
("38.10558", "-1.86343", "Caravaca", "ES", "Europe/Madrid"),
("37.49073", "-2.77259", "Baza", "ES", "Europe/Madrid"),
("42.64685", "-5.55835", "Villaquilambre", "ES", "Europe/Madrid"),
("42.06166", "-1.60452", "Tudela", "ES", "Europe/Madrid"),
("40.42386", "-3.53261", "San Fernando de Henares", "ES", "Europe/Madrid"),
("41.15612", "1.10687", "Reus", "ES", "Europe/Madrid"),
("41.91738", "3.1631", "Palafrugell", "ES", "Europe/Madrid"),
("43.32686", "-2.98884", "Leioa", "ES", "Europe/Madrid"),
("43.31667", "-2.68333", "Gernika-Lumo", "ES", "Europe/Madrid"),
("43.48961", "-8.2194", "Ferrol", "ES", "Europe/Madrid"),
("41.63976", "2.35739", "Cardedeu", "ES", "Europe/Madrid"),
("40.70995", "0.57856", "Amposta", "ES", "Europe/Madrid"),
("37.13548", "-3.67029", "Las Gabias", "ES", "Europe/Madrid"),
("42.8139", "-1.64295", "Segundo Ensanche", "ES", "Europe/Madrid"),
("41.41204", "2.18247", "el Camp de l'Arpa del Clot", "ES", "Europe/Madrid"),
("11.85", "38.01667", "Debre Tabor", "ET", "Africa/Addis_Ababa"),
("6.03333", "37.55", "Arba Minch", "ET", "Africa/Addis_Ababa"),
("65.84811", "24.14662", "Tornio", "FI", "Europe/Helsinki"),
("60.18427", "24.95034", "Kallio", "FI", "Europe/Helsinki"),
("60.2052", "24.6522", "Espoo", "FI", "Europe/Helsinki"),
("45.51667", "4.86667", "Vienne", "FR", "Europe/Paris"),
("44.92801", "4.8951", "Valence", "FR", "Europe/Paris"),
("44.80477", "-0.59543", "Talence", "FR", "Europe/Paris"),
("48.77644", "2.29026", "Sceaux", "FR", "Europe/Paris"),
("50.75", "2.25", "Saint-Omer", "FR", "Europe/Paris"),
("45.69558", "4.7934", "Saint-Genis-Laval", "FR", "Europe/Paris"),
("48.8765", "2.18967", "Rueil-Malmaison", "FR", "Europe/Paris"),
("48", "-4.1", "Quimper", "FR", "Europe/Paris"),
("43.11667", "1.6", "Pamiers", "FR", "Europe/Paris"),
("46.32313", "-0.45877", "Niort", "FR", "Europe/Paris"),
("43.61092", "3.87723", "Montpellier", "FR", "Europe/Paris"),
("48.98333", "2.61667", "Mitry-Mory", "FR", "Europe/Paris"),
("48.86667", "2.08333", "Marly-le-Roi", "FR", "Europe/Paris"),
("46.67535", "5.55575", "Lons-le-Saunier", "FR", "Europe/Paris"),
("43.32393", "5.4584", "Les Olives", "FR", "Europe/Paris"),
("48.8222", "2.12213", "Le Chesnay", "FR", "Europe/Paris"),
("48.90472", "2.2469", "La Garenne-Colombes", "FR", "Europe/Paris"),
("48.98994", "2.1699", "Herblay", "FR", "Europe/Paris"),
("48.98693", "2.44892", "Gonesse", "FR", "Europe/Paris"),
("48.79325", "2.29275", "Fontenay-aux-Roses", "FR", "Europe/Paris"),
("49.28669", "1.00288", "Elbeuf", "FR", "Europe/Paris"),
("43.71032", "-1.05366", "Dax", "FR", "Europe/Paris"),
("43.61058", "1.33467", "Colomiers", "FR", "Europe/Paris"),
("43.83125", "5.03586", "Cavaillon", "FR", "Europe/Paris"),
("45.73333", "4.91667", "Bron", "FR", "Europe/Paris"),
("48.90982", "2.45012", "Bobigny", "FR", "Europe/Paris"),
("48.77275", "5.16108", "Bar-le-Duc", "FR", "Europe/Paris"),
("43.67681", "4.63031", "Arles", "FR", "Europe/Paris"),
("41.91886", "8.73812", "Ajaccio", "FR", "Europe/Paris"),
("43.2907", "5.4384", "Marseille 11", "FR", "Europe/Paris"),
("-1.63333", "13.58357", "Franceville", "GA", "Africa/Libreville"),
("53.19146", "-2.52398", "Winsford", "GB", "Europe/London"),
("51.26", "-2.1875", "Westbury", "GB", "Europe/London"),
("51.84819", "1.26738", "Walton-on-the-Naze", "GB", "Europe/London"),
("52.41667", "0.75", "Thetford", "GB", "Europe/London"),
("51.39323", "0.47713", "Strood", "GB", "Europe/London"),
("50.79205", "-1.08593", "Southsea", "GB", "Europe/London"),
("53.78333", "-1.06667", "Selby", "GB", "Europe/London"),
("55.82885", "-4.21376", "Rutherglen", "GB", "Europe/London"),
("53.00974", "-3.05814", "Rhosllanerchrugog", "GB", "Europe/London"),
("53.83333", "-2.98333", "Poulton-le-Fylde", "GB", "Europe/London"),
("50.11861", "-5.53715", "Penzance", "GB", "Europe/London"),
("50.82882", "-0.32247", "Lancing", "GB", "Europe/London"),
("51.40148", "-1.32471", "Newbury", "GB", "Europe/London"),
("53.49389", "-1.29243", "Mexborough", "GB", "Europe/London"),
("50.75767", "-1.5443", "Lymington", "GB", "Europe/London"),
("53.69786", "-2.68758", "Leyland", "GB", "Europe/London"),
("53.7446", "-0.33525", "Kingston upon Hull", "GB", "Europe/London"),
("57.47908", "-4.22398", "Inverness", "GB", "Europe/London"),
("51.62907", "-0.74934", "High Wycombe", "GB", "Europe/London"),
("51.38673", "0.30367", "Hartley", "GB", "Europe/London"),
("52.66277", "-2.01111", "Great Wyrley", "GB", "Europe/London"),
("53.38333", "-0.76667", "Gainsborough", "GB", "Europe/London"),
("50.7236", "-3.52751", "Exeter", "GB", "Europe/London"),
("52.68333", "0.93333", "East Dereham", "GB", "Europe/London"),
("51.35084", "-1.99421", "Devizes", "GB", "Europe/London"),
("50.76306", "-1.29772", "Cowes", "GB", "Europe/London"),
("51.78967", "1.15597", "Clacton-on-Sea", "GB", "Europe/London"),
("53.46506", "-1.47217", "Chapletown", "GB", "Europe/London"),
("51.64316", "-0.36053", "Bushey", "GB", "Europe/London"),
("52.48173", "-2.12139", "Brierley Hill", "GB", "Europe/London"),
("53.81667", "-3.05", "Blackpool", "GB", "Europe/London"),
("53.0233", "-1.48119", "Belper", "GB", "Europe/London"),
("51.65", "-0.2", "Barnet", "GB", "Europe/London"),
("56.56317", "-2.58736", "Arbroath", "GB", "Europe/London"),
("57.14369", "-2.09814", "Aberdeen", "GB", "Europe/London"),
("51.39148", "-0.29825", "Surbiton", "GB", "Europe/London"),
("51.42708", "-0.91979", "Lower Earley", "GB", "Europe/London"),
("55.82737", "-4.0573", "Viewpark", "GB", "Europe/London"),
("41.82143", "41.77921", "Kobuleti", "GE", "Asia/Tbilisi"),
("5.30383", "-1.98956", "Tarkwa", "GH", "Africa/Accra"),
("7.06273", "-1.4001", "Mampong", "GH", "Africa/Accra"),
("6.46346", "-2.31938", "Bibiani", "GH", "Africa/Accra"),
("13.56667", "-15.6", "Farafenni", "GM", "Africa/Banjul"),
("9.535", "-13.68778", "Camayenne", "GN", "Africa/Conakry"),
("14.93333", "-91.11667", "Chichicastenango", "GT", "America/Guatemala"),
("22.37066", "114.10479", "Tsuen Wan", "HK", "Asia/Hong_Kong"),
("15.48131", "-86.57415", "Olanchito", "HN", "America/Tegucigalpa"),
("43.50891", "16.43915", "Split", "HR", "Europe/Zagreb"),
("18.65297", "-72.09391", "Thomazeau", "HT", "America/Port-au-Prince"),
("18.57677", "-72.22625", "Croix-des-Bouquets", "HT", "America/Port-au-Prince"),
("3.3285", "99.1625", "Tebingtinggi", "ID", "Asia/Jakarta"),
("3.7278", "98.6738", "Labuhan Deli", "ID", "Asia/Jakarta"),
("-7.51611", "109.05389", "Wangon", "ID", "Asia/Jakarta"),
("3.31332", "117.59152", "Tarakan", "ID", "Asia/Makassar"),
("-6.91806", "106.92667", "Sukabumi", "ID", "Asia/Jakarta"),
("-1.26424", "104.09701", "Simpang", "ID", "Asia/Jakarta"),
("-7.0981", "109.3243", "Randudongkal", "ID", "Asia/Jakarta"),
("0.51667", "101.44167", "Pekanbaru", "ID", "Asia/Jakarta"),
("-7.01833", "107.60389", "Pameungpeuk", "ID", "Asia/Jakarta"),
("-8.43333", "114.33333", "Muncar", "ID", "Asia/Jakarta"),
("-3.5403", "118.9707", "Majene", "ID", "Asia/Makassar"),
("-6.8048", "110.8405", "Kudus", "ID", "Asia/Jakarta"),
("-7.81667", "112.01667", "Kediri", "ID", "Asia/Jakarta"),
("-1.6", "103.61667", "Jambi City", "ID", "Asia/Jakarta"),
("-7.57897", "112.23109", "Diwek", "ID", "Asia/Jakarta"),
("-6.48167", "106.85417", "Cibinong", "ID", "Asia/Jakarta"),
("-7.73379", "113.69785", "Besuki", "ID", "Asia/Jakarta"),
("-1.26753", "116.82887", "Balikpapan", "ID", "Asia/Makassar"),
("-7.54972", "110.71639", "Ngemplak", "ID", "Asia/Jakarta"),
("53.53333", "-7.35", "An Muileann gCearr", "IE", "Europe/Dublin"),
("53.43333", "-7.95", "Athlone", "IE", "Europe/Dublin"),
("31.92923", "34.86563", "Ramla", "IL", "Asia/Jerusalem"),
("32.05971", "34.8732", "Ganei Tikva", "IL", "Asia/Jerusalem"),
("31.39547", "34.75699", "Rahat", "IL", "Asia/Jerusalem"),
("18.87813", "72.93924", "Uran", "IN", "Asia/Kolkata"),
("10.58806", "77.24779", "Udumalaippettai", "IN", "Asia/Kolkata"),
("9.82564", "78.25795", "Tiruppuvanam", "IN", "Asia/Kolkata"),
("25.49043", "85.94001", "Teghra", "IN", "Asia/Kolkata"),
("12.04161", "75.35927", "Talipparamba", "IN", "Asia/Kolkata"),
("26.11527", "86.59509", "Supaul", "IN", "Asia/Kolkata"),
("34.08565", "74.80555", "Srinagar", "IN", "Asia/Kolkata"),
("25.92493", "73.66633", "Sojat", "IN", "Asia/Kolkata"),
("14.62072", "74.83554", "Sirsi", "IN", "Asia/Kolkata"),
("25.13915", "73.06784", "Sheoganj", "IN", "Asia/Kolkata"),
("11.50526", "77.23826", "Sathyamangalam", "IN", "Asia/Kolkata"),
("21.46527", "83.97573", "Sambalpur", "IN", "Asia/Kolkata"),
("25.87498", "86.59611", "Saharsa", "IN", "Asia/Kolkata"),
("12.95629", "78.27539", "Robertsonpet", "IN", "Asia/Kolkata"),
("26.44931", "91.61356", "Rangia", "IN", "Asia/Kolkata"),
("33.37526", "74.3092", "Rajaori", "IN", "Asia/Kolkata"),
("24.81757", "84.63445", "Rafiganj", "IN", "Asia/Kolkata"),
("18.51957", "73.85535", "Pune", "IN", "Asia/Kolkata"),
("11.93381", "79.82979", "Puducherry", "IN", "Asia/Kolkata"),
("28.71271", "77.656", "Pilkhua", "IN", "Asia/Kolkata"),
("10.12268", "77.54372", "Periyakulam", "IN", "Asia/Kolkata"),
("31.28092", "74.85849", "Patti", "IN", "Asia/Kolkata"),
("20.88098", "75.11937", "Parola", "IN", "Asia/Kolkata"),
("23.07492", "88.28637", "Pandua", "IN", "Asia/Kolkata"),
("18.18158", "76.03889", "Osmanabad", "IN", "Asia/Kolkata"),
("25.6439", "77.9129", "Narwar", "IN", "Asia/Kolkata"),
("30.81383", "75.16878", "Moga", "IN", "Asia/Kolkata"),
("28.98002", "77.70636", "Meerut", "IN", "Asia/Kolkata"),
("11.12018", "76.11996", "Manjeri", "IN", "Asia/Kolkata"),
("30.21121", "74.4818", "Malaut", "IN", "Asia/Kolkata"),
("25.92127", "86.79271", "Madhipura", "IN", "Asia/Kolkata"),
("24.05979", "77.40858", "Leteri", "IN", "Asia/Kolkata"),
("21.34222", "71.30633", "Kundla", "IN", "Asia/Kolkata"),
("22.75218", "72.68533", "Kheda", "IN", "Asia/Kolkata"),
("23.1959", "86.51499", "Kenda", "IN", "Asia/Kolkata"),
("29.21399", "78.95693", "Kashipur", "IN", "Asia/Kolkata"),
("11.00599", "77.5609", "Kangayam", "IN", "Asia/Kolkata"),
("22.88783", "84.13864", "Jashpurnagar", "IN", "Asia/Kolkata"),
("26.2649", "81.54855", "Jais", "IN", "Asia/Kolkata"),
("16.06213", "76.0586", "Hungund", "IN", "Asia/Kolkata"),
("29.22254", "79.5286", "Haldwani", "IN", "Asia/Kolkata"),
("26.76628", "83.36889", "Gorakhpur", "IN", "Asia/Kolkata"),
("12.25282", "79.41727", "Gingee", "IN", "Asia/Kolkata"),
("21.53889", "71.57737", "Gariadhar", "IN", "Asia/Kolkata"),
("15.73628", "75.96976", "Gajendragarh", "IN", "Asia/Kolkata"),
("17.54907", "82.85749", "Elamanchili", "IN", "Asia/Kolkata"),
("19.21667", "73.08333", "Dombivli", "IN", "Asia/Kolkata"),
("22.19303", "88.18466", "Diamond Harbour", "IN", "Asia/Kolkata"),
("12.1277", "78.15794", "Dharmapuri", "IN", "Asia/Kolkata"),
("25.75728", "75.37991", "Deoli", "IN", "Asia/Kolkata"),
("14.46693", "75.92694", "Davangere", "IN", "Asia/Kolkata"),
("25.66795", "85.83636", "Dalsingh Sarai", "IN", "Asia/Kolkata"),
("15.5439", "73.7553", "Calangute", "IN", "Asia/Kolkata"),
("27.9247", "78.40102", "Chharra", "IN", "Asia/Kolkata"),
("32.55531", "76.12647", "Chamba", "IN", "Asia/Kolkata"),
("20.88197", "85.83334", "Bhuban", "IN", "Asia/Kolkata"),
("19.30157", "72.85107", "Bhayandar", "IN", "Asia/Kolkata"),
("15.45144", "78.14797", "Betamcherla", "IN", "Asia/Kolkata"),
("26.32293", "91.00632", "Barpeta", "IN", "Asia/Kolkata"),
("28.92694", "78.23456", "Bachhraon", "IN", "Asia/Kolkata"),
("21.59983", "71.21169", "Amreli", "IN", "Asia/Kolkata"),
("10.10649", "76.35484", "Alwaye", "IN", "Asia/Kolkata"),
("24.41288", "76.56719", "Aklera", "IN", "Asia/Kolkata"),
("23.49668", "86.68363", "Adra", "IN", "Asia/Kolkata"),
("22.4711", "88.1453", "Pujali", "IN", "Asia/Kolkata"),
("22.10194", "85.37752", "Barbil", "IN", "Asia/Kolkata"),
("17.34769", "78.55757", "Lal Bahadur Nagar", "IN", "Asia/Kolkata"),
("23.18", "88.58", "Aistala", "IN", "Asia/Kolkata"),
("9.57046", "76.32756", "Kalavoor", "IN", "Asia/Kolkata"),
("32.61603", "44.02488", "Karbala", "IQ", "Asia/Baghdad"),
("35.6803", "51.0193", "Shahre Jadide Andisheh", "IR", "Asia/Tehran"),
("36.64852", "51.49621", "Nowshahr", "IR", "Asia/Tehran"),
("33.14447", "47.3799", "Darreh Shahr", "IR", "Asia/Tehran"),
("33.86419", "48.26258", "Aleshtar", "IR", "Asia/Tehran"),
("32.65246", "51.67462", "Isfahan", "IR", "Asia/Tehran"),
("38.07789", "13.44275", "Villabate", "IT", "Europe/Rome"),
("36.92574", "14.72443", "Ragusa", "IT", "Europe/Rome"),
("37.51803", "15.00913", "Misterbianco", "IT", "Europe/Rome"),
("37.49223", "15.07041", "Catania", "IT", "Europe/Rome"),
("37.31065", "13.57661", "Agrigento", "IT", "Europe/Rome"),
("43.78956", "7.60872", "Ventimiglia", "IT", "Europe/Rome"),
("44.89784", "8.86374", "Tortona", "IT", "Europe/Rome"),
("40.87329", "14.43865", "Somma Vesuviana", "IT", "Europe/Rome"),
("40.72586", "8.55552", "Sassari", "IT", "Europe/Rome"),
("45.39402", "9.29109", "San Giuliano Milanese", "IT", "Europe/Rome"),
("42.67164", "14.01481", "Roseto degli Abruzzi", "IT", "Europe/Rome"),
("45.78071", "12.84052", "Portogruaro", "IT", "Europe/Rome"),
("43.1122", "12.38878", "Perugia", "IT", "Europe/Rome"),
("45.44694", "8.62118", "Novara", "IT", "Europe/Rome"),
("45.50369", "11.412", "Montecchio Maggiore-Alte Ceccato", "IT", "Europe/Rome"),
("40.55851", "17.80774", "Mesagne", "IT", "Europe/Rome"),
("45.79377", "8.88104", "Malnate", "IT", "Europe/Rome"),
("42.22718", "14.39024", "Lanciano", "IT", "Europe/Rome"),
("45.53069", "9.40531", "Gorgonzola", "IT", "Europe/Rome"),
("40.53123", "17.58522", "Francavilla Fontana", "IT", "Europe/Rome"),
("43.62558", "13.39954", "Falconara Marittima", "IT", "Europe/Rome"),
("45.9836", "12.70038", "Cordenons", "IT", "Europe/Rome"),
("44.31771", "9.32241", "Chiavari", "IT", "Europe/Rome"),
("44.59445", "11.04979", "Castelfranco Emilia", "IT", "Europe/Rome"),
("41.55947", "14.66737", "Campobasso", "IT", "Europe/Rome"),
("41.24264", "16.50104", "Bisceglie", "IT", "Europe/Rome"),
("41.72063", "12.6723", "Ariccia", "IT", "Europe/Rome"),
("40.92298", "14.30935", "Afragola", "IT", "Europe/Rome"),
("40.87363", "14.34085", "Volla", "IT", "Europe/Rome"),
("18.00747", "-76.78319", "New Kingston", "JM", "America/Jamaica"),
("35.8", "137.23333", "Gero", "JP", "Asia/Tokyo"),
("34.61667", "135.6", "Yao", "JP", "Asia/Tokyo"),
("34.75856", "136.13108", "Ueno-ebisumachi", "JP", "Asia/Tokyo"),
("34.81667", "137.4", "Toyokawa", "JP", "Asia/Tokyo"),
("34.4833", "136.84186", "Toba", "JP", "Asia/Tokyo"),
("36.65", "138.31667", "Suzaka", "JP", "Asia/Tokyo"),
("34.9", "137.5", "Shinshiro", "JP", "Asia/Tokyo"),
("35.06667", "135.21667", "Sasayama", "JP", "Asia/Tokyo"),
("36", "139.55722", "Okegawa", "JP", "Asia/Tokyo"),
("36.53333", "136.61667", "Nonoichi", "JP", "Asia/Tokyo"),
("36.75965", "137.36215", "Namerikawa", "JP", "Asia/Tokyo"),
("35", "136.51667", "Komono", "JP", "Asia/Tokyo"),
("33.4425", "129.96972", "Karatsu", "JP", "Asia/Tokyo"),
("35.30889", "139.55028", "Kamakura", "JP", "Asia/Tokyo"),
("34.25", "135.31667", "Iwade", "JP", "Asia/Tokyo"),
("35.82756", "137.95378", "Ina", "JP", "Asia/Tokyo"),
("33.3213", "130.94098", "Hita", "JP", "Asia/Tokyo"),
("36.24624", "139.07204", "Fujioka", "JP", "Asia/Tokyo"),
("36.33011", "138.89585", "Annaka", "JP", "Asia/Tokyo"),
("35.815", "139.6853", "Shimotoda", "JP", "Asia/Tokyo"),
("39.46667", "141.95", "Yamada", "JP", "Asia/Tokyo"),
("37.56667", "140.11667", "Inawashiro", "JP", "Asia/Tokyo"),
("43.82634", "144.09638", "Motomachi", "JP", "Asia/Tokyo"),
("44.35056", "142.45778", "Nayoro", "JP", "Asia/Tokyo"),
("41.77583", "140.73667", "Hakodate", "JP", "Asia/Tokyo"),
("35.48199", "137.02166", "Minokamo", "JP", "Asia/Tokyo"),
("0.03813", "36.36339", "Nyahururu", "KE", "Africa/Nairobi"),
("3.11988", "35.59642", "Lodwar", "KE", "Africa/Nairobi"),
("0.46005", "34.11169", "Busia", "KE", "Africa/Nairobi"),
("40.93333", "73", "Jalal-Abad", "KG", "Asia/Bishkek"),
("13.65805", "102.56365", "Paoy Paet", "KH", "Asia/Phnom_Penh"),
("36.82167", "128.63083", "Eisen", "KR", "Asia/Seoul"),
("37.1759", "128.9889", "T’aebaek", "KR", "Asia/Seoul"),
("36.20389", "127.08472", "Nonsan", "KR", "Asia/Seoul"),
("37.65639", "126.835", "Goyang-si", "KR", "Asia/Seoul"),
("36.6009", "126.665", "Hongseong", "KR", "Asia/Seoul"),
("34.8825", "128.62667", "Sinhyeon", "KR", "Asia/Seoul"),
("47.83333", "59.6", "Shalqar", "KZ", "Asia/Aqtobe"),
("47.46657", "84.87144", "Zaysan", "KZ", "Asia/Almaty"),
("44.85278", "65.50917", "Kyzylorda", "KZ", "Asia/Qyzylorda"),
("43.41949", "77.0202", "Otegen Batyra", "KZ", "Asia/Almaty"),
("6.84019", "79.87116", "Dehiwala-Mount Lavinia", "LK", "Asia/Colombo"),
("6.9909", "79.883", "Hendala", "LK", "Asia/Colombo"),
("7.57944", "-8.53778", "New Yekepa", "LR", "Africa/Monrovia"),
("55.25", "24.75", "Ukmerge", "LT", "Europe/Vilnius"),
("54.39635", "24.04142", "Alytus", "LT", "Europe/Vilnius"),
("30.75545", "20.22625", "Ajdabiya", "LY", "Africa/Tripoli"),
("24.96334", "10.18003", "Ghat", "LY", "Africa/Tripoli"),
("33.92866", "-6.90656", "Temara", "MA", "Africa/Casablanca"),
("33.42585", "-6.00137", "Oulmes", "MA", "Africa/Casablanca"),
("34.31", "-2.16", "Jerada", "MA", "Africa/Casablanca"),
("33.43443", "-5.22126", "Azrou", "MA", "Africa/Casablanca"),
("48.15659", "28.28489", "Soroca", "MD", "Europe/Chisinau"),
("42.28639", "18.84", "Budva", "ME", "Europe/Podgorica"),
("-22.9", "44.53333", "Sakaraha", "MG", "Indian/Antananarivo"),
("-21.15", "46.58333", "Ikalamavony", "MG", "Indian/Antananarivo"),
("-19.65", "47.31667", "Antanifotsy", "MG", "Indian/Antananarivo"),
("-17.83333", "48.41667", "Ambatondrazaka", "MG", "Indian/Antananarivo"),
("42", "21.32778", "Saraj", "MK", "Europe/Skopje"),
("41.92361", "20.91361", "Bogovinje", "MK", "Europe/Skopje"),
("12.74409", "-8.07257", "Kati", "ML", "Africa/Bamako"),
("14.0823", "98.19151", "Dawei", "MM", "Asia/Yangon"),
("16.68911", "98.50893", "Myawadi", "MM", "Asia/Yangon"),
("17.30858", "97.01124", "Kyaikto", "MM", "Asia/Yangon"),
("47.90771", "106.88324", "Ulan Bator", "MN", "Asia/Ulaanbaatar"),
("14.67751", "-60.94228", "Le Robert", "MQ", "America/Martinique"),
("35.89972", "14.51472", "Valletta", "MT", "Europe/Malta"),
("-13.7804", "34.4587", "Salima", "MW", "Africa/Blantyre"),
("16.75973", "-93.11308", "Tuxtla", "MX", "America/Mexico_City"),
("19.8173", "-97.35992", "Teziutlan", "MX", "America/Mexico_City"),
("21.28306", "-89.66123", "Progreso", "MX", "America/Merida"),
("17.06542", "-96.72365", "Oaxaca", "MX", "America/Mexico_City"),
("25.87972", "-97.50417", "Heroica Matamoros", "MX", "America/Matamoros"),
("19.32932", "-98.1664", "Contla", "MX", "America/Mexico_City"),
("17.94979", "-94.91386", "Acayucan", "MX", "America/Mexico_City"),
("19.32889", "-99.32556", "San Lorenzo Acopilco", "MX", "America/Mexico_City"),
("20.22816", "-103.5687", "Zacoalco de Torres", "MX", "America/Mexico_City"),
("20.74122", "-100.44843", "Santa Rosa Jauregui", "MX", "America/Mexico_City"),
("20.21322", "-100.88023", "Salvatierra", "MX", "America/Mexico_City"),
("19.64745", "-102.04897", "Paracho de Verduzco", "MX", "America/Mexico_City"),
("20.28527", "-103.42897", "Jocotepec", "MX", "America/Mexico_City"),
("21.01858", "-101.2591", "Guanajuato", "MX", "America/Mexico_City"),
("22.49396", "-105.36369", "Acaponeta", "MX", "America/Mazatlan"),
("19.04222", "-98.11889", "Casa Blanca", "MX", "America/Mexico_City"),
("1.6561", "103.6032", "Kulai", "MY", "Asia/Kuala_Lumpur"),
("5.90702", "116.10146", "Donggongon", "MY", "Asia/Kuching"),
("4.88441", "101.96857", "Gua Musang", "MY", "Asia/Kuala_Lumpur"),
("5.4709", "100.24529", "Batu Feringgi", "MY", "Asia/Kuala_Lumpur"),
("4.02219", "101.02083", "Teluk Intan", "MY", "Asia/Kuala_Lumpur"),
("1.6", "103.81667", "Ulu Tiram", "MY", "Asia/Kuala_Lumpur"),
("2.2139", "102.3278", "Kampung Ayer Molek", "MY", "Asia/Kuala_Lumpur"),
("-23.85972", "35.34722", "Maxixe", "MZ", "Africa/Maputo"),
("-21.98333", "16.91667", "Okahandja", "NA", "Africa/Windhoek"),
("13.70727", "9.15013", "Mirriah", "NE", "Africa/Niamey"),
("4.92675", "6.26764", "Yenagoa", "NG", "Africa/Lagos"),
("6.8485", "3.64633", "Shagamu", "NG", "Africa/Lagos"),
("7.6", "4.18333", "Olupona", "NG", "Africa/Lagos"),
("6.15038", "6.83042", "Nkpor", "NG", "Africa/Lagos"),
("6.45407", "3.39467", "Lagos", "NG", "Africa/Lagos"),
("9.58126", "8.2926", "Kafanchan", "NG", "Africa/Lagos"),
("7.62789", "4.74161", "Ilesa", "NG", "Africa/Lagos"),
("7.50251", "5.06258", "Igbara-Odo", "NG", "Africa/Lagos"),
("11.86064", "9.0027", "Gaya", "NG", "Africa/Lagos"),
("7.65649", "4.92235", "Efon-Alaaye", "NG", "Africa/Lagos"),
("10.61285", "12.19458", "Biu", "NG", "Africa/Lagos"),
("12.74482", "4.52514", "Argungu", "NG", "Africa/Lagos"),
("13.48082", "-86.58208", "Somoto", "NI", "America/Managua"),
("11.84962", "-86.19903", "Jinotepe", "NI", "America/Managua"),
("52.09", "5.23333", "Zeist", "NL", "Europe/Amsterdam"),
("51.65333", "5.2875", "Vught", "NL", "Europe/Amsterdam"),
("51.44889", "5.51978", "Tongelre", "NL", "Europe/Amsterdam"),
("51.95838", "4.47124", "Schiebroek", "NL", "Europe/Amsterdam"),
("52.31333", "6.92917", "Oldenzaal", "NL", "Europe/Amsterdam"),
("52.26083", "7.00417", "Losser", "NL", "Europe/Amsterdam"),
("53.16167", "6.76111", "Hoogezand", "NL", "Europe/Amsterdam"),
("52.57583", "6.61944", "Hardenberg", "NL", "Europe/Amsterdam"),
("52.71083", "5.74861", "Emmeloord", "NL", "Europe/Amsterdam"),
("51.955", "5.22778", "Culemborg", "NL", "Europe/Amsterdam"),
("52.14", "5.58472", "Barneveld", "NL", "Europe/Amsterdam"),
("68.79833", "16.54165", "Harstad", "NO", "Europe/Oslo"),
("-44.39672", "171.25364", "Timaru", "NZ", "Pacific/Auckland"),
("-38.65333", "178.00417", "Gisborne", "NZ", "Pacific/Auckland"),
("8.88988", "-79.62603", "Veracruz", "PA", "America/Panama"),
("9.15093", "-79.62098", "Chilibre", "PA", "America/Panama"),
("-3.74912", "-73.25383", "Iquitos", "PE", "America/Lima"),
("-16.25", "-69.08333", "Yunguyo", "PE", "America/Lima"),
("-15.21194", "-75.11028", "Minas de Marcona", "PE", "America/Lima"),
("-11.94306", "-76.70944", "Chosica", "PE", "America/Lima"),
("-5.85746", "144.23058", "Mount Hagen", "PG", "Pacific/Port_Moresby"),
("6.33444", "124.95278", "Tupi", "PH", "Asia/Manila"),
("10.7375", "122.9666", "Talisay", "PH", "Asia/Manila"),
("12.97389", "123.99333", "Sorsogon", "PH", "Asia/Manila"),
("9.3337", "122.8637", "Santa Catalina", "PH", "Asia/Manila"),
("12.35275", "121.06761", "San Jose", "PH", "Asia/Manila"),
("6.95194", "121.96361", "Recodo", "PH", "Asia/Manila"),
("14.66", "120.56528", "Pilar", "PH", "Asia/Manila"),
("10.20898", "123.758", "Naga", "PH", "Asia/Manila"),
("12.37169", "123.62494", "Masbate", "PH", "Asia/Manila"),
("16.0438", "120.4861", "Manaoag", "PH", "Asia/Manila"),
("10.13361", "124.84472", "Maasin", "PH", "Asia/Manila"),
("16.455", "120.5875", "La Trinidad", "PH", "Asia/Manila"),
("9.6531", "124.3697", "Jagna", "PH", "Asia/Manila"),
("14.8361", "120.97844", "Guyong", "PH", "Asia/Manila"),
("8.56697", "123.33471", "Dipolog", "PH", "Asia/Manila"),
("10.31672", "123.89071", "Cebu City", "PH", "Asia/Manila"),
("14.14989", "121.3152", "Calauan", "PH", "Asia/Manila"),
("15.72892", "120.57224", "Burgos", "PH", "Asia/Manila"),
("14.95472", "120.89694", "Baliuag", "PH", "Asia/Manila"),
("14.62578", "121.12251", "Antipolo", "PH", "Asia/Manila"),
("27.52948", "68.75915", "Khairpur Mir’s", "PK", "Asia/Karachi"),
("26.9423", "68.11759", "Tharu Shah", "PK", "Asia/Karachi"),
("31.82539", "72.54064", "Sillanwali", "PK", "Asia/Karachi"),
("31.71667", "73.38333", "Sangla Hill", "PK", "Asia/Karachi"),
("30.29184", "71.67164", "Qadirpur Ran", "PK", "Asia/Karachi"),
("31.96258", "73.97117", "Naushahra Virkan", "PK", "Asia/Karachi"),
("32.57756", "71.52847", "Mianwali", "PK", "Asia/Karachi"),
("27.55898", "68.21204", "Larkana", "PK", "Asia/Karachi"),
("30.46907", "70.96699", "Kot Addu", "PK", "Asia/Karachi"),
("30.76468", "74.12286", "Kanganpur", "PK", "Asia/Karachi"),
("25.95533", "68.88871", "Jhol", "PK", "Asia/Karachi"),
("29.69221", "72.54566", "Hasilpur", "PK", "Asia/Karachi"),
("32.17629", "75.06583", "Fazilpur", "PK", "Asia/Karachi"),
("32.87533", "71.57118", "Daud Khel", "PK", "Asia/Karachi"),
("25.80565", "68.49143", "Bhit Shah", "PK", "Asia/Karachi"),
("29.38242", "70.91106", "Alipur", "PK", "Asia/Karachi"),
("51.14942", "15.00835", "Zgorzelec", "PL", "Europe/Warsaw"),
("54.58048", "16.86194", "Ustka", "PL", "Europe/Warsaw"),
("50.5107", "18.30056", "Strzelce Opolskie", "PL", "Europe/Warsaw"),
("54.60528", "18.34717", "Reda", "PL", "Europe/Warsaw"),
("50.20528", "19.27498", "Jaworzno", "PL", "Europe/Warsaw"),
("50.86079", "17.4674", "Brzeg", "PL", "Europe/Warsaw"),
("18.42745", "-67.15407", "Aguadilla", "PR", "America/Puerto_Rico"),
("18.03496", "-66.8499", "Yauco", "PR", "America/Puerto_Rico"),
("31.78336", "35.23388", "East Jerusalem", "PS", "Asia/Hebron"),
("38.72706", "-9.24671", "Carnaxide", "PT", "Europe/Lisbon"),
("37.08819", "-8.2503", "Albufeira", "PT", "Europe/Lisbon"),
("41.20485", "-8.33147", "Paredes", "PT", "Europe/Lisbon"),
("41.1053", "-7.32097", "Custoias", "PT", "Europe/Lisbon"),
("37.74615", "-25.66689", "Ponta Delgada", "PT", "Atlantic/Azores"),
("-20.88231", "55.4504", "Saint-Denis", "RE", "Indian/Reunion"),
("44.43579", "26.01649", "Sector 6", "RO", "Europe/Bucharest"),
("44.22639", "22.53083", "Negotin", "RS", "Europe/Belgrade"),
("44.97639", "19.61222", "Sremska Mitrovica", "RS", "Europe/Belgrade"),
("53.53395", "33.72798", "Zhukovka", "RU", "Europe/Moscow"),
("46.7055", "38.2739", "Yeysk", "RU", "Europe/Moscow"),
("44.98901", "38.94324", "Yablonovskiy", "RU", "Europe/Moscow"),
("56.03361", "35.96944", "Volokolamsk", "RU", "Europe/Moscow"),
("57.97472", "33.2525", "Valday", "RU", "Europe/Moscow"),
("56.85836", "35.90057", "Tver", "RU", "Europe/Moscow"),
("55.62047", "37.49338", "Tyoply Stan", "RU", "Europe/Moscow"),
("54.90083", "38.07083", "Stupino", "RU", "Europe/Moscow"),
("55.63711", "37.38115", "Solntsevo", "RU", "Europe/Moscow"),
("59.80917", "30.38167", "Shushary", "RU", "Europe/Moscow"),
("64.5635", "39.8302", "Severodvinsk", "RU", "Europe/Moscow"),
("51.78771", "56.36091", "Saraktash", "RU", "Asia/Yekaterinburg"),
("53.95278", "32.86389", "Roslavl’", "RU", "Europe/Moscow"),
("51.40944", "46.04833", "Privolzhskiy", "RU", "Europe/Saratov"),
("61.78491", "34.34691", "Petrozavodsk", "RU", "Europe/Moscow"),
("53.37596", "51.3452", "Otradnyy", "RU", "Europe/Samara"),
("54.48147", "53.47103", "Oktyabr’skiy", "RU", "Asia/Yekaterinburg"),
("43.96222", "43.63417", "Novopavlovsk", "RU", "Europe/Moscow"),
("53.53041", "43.67663", "Nizhniy Lomov", "RU", "Europe/Moscow"),
("55.38752", "36.73307", "Naro-Fominsk", "RU", "Europe/Moscow"),
("50.06", "43.2379", "Mikhaylovka", "RU", "Europe/Volgograd"),
("55.64776", "38.02486", "Malakhovka", "RU", "Europe/Moscow"),
("55.85", "37.56667", "Likhobory", "RU", "Europe/Moscow"),
("51.4781", "57.3552", "Kuvandyk", "RU", "Asia/Yekaterinburg"),
("44.92934", "37.99117", "Krymsk", "RU", "Europe/Moscow"),
("54.03876", "43.91385", "Kovylkino", "RU", "Europe/Moscow"),
("60.02427", "30.28491", "Kolomyagi", "RU", "Europe/Moscow"),
("53.93361", "37.92792", "Kireyevsk", "RU", "Europe/Moscow"),
("54.84444", "38.16694", "Kashira", "RU", "Europe/Moscow"),
("58.7002", "59.4839", "Kachkanar", "RU", "Asia/Yekaterinburg"),
("43.35071", "46.10925", "Gudermes", "RU", "Europe/Moscow"),
("57.30185", "39.85331", "Gavrilov-Yam", "RU", "Europe/Moscow"),
("53.59782", "34.33825", "Dyat’kovo", "RU", "Europe/Moscow"),
("58.1908", "40.17171", "Danilov", "RU", "Europe/Moscow"),
("42.819", "47.1192", "Buynaksk", "RU", "Europe/Moscow"),
("53.77166", "38.12408", "Bogoroditsk", "RU", "Europe/Moscow"),
("54.39304", "53.26023", "Bavly", "RU", "Europe/Moscow"),
("55.39485", "43.83992", "Arzamas", "RU", "Europe/Moscow"),
("54.8421", "46.5813", "Alatyr’", "RU", "Europe/Moscow"),
("58.63667", "59.80222", "Lesnoy", "RU", "Asia/Yekaterinburg"),
("55.8736", "85.4265", "Yashkino", "RU", "Asia/Novokuznetsk"),
("58.04254", "65.27258", "Tavda", "RU", "Asia/Yekaterinburg"),
("55.54028", "89.20083", "Sharypovo", "RU", "Asia/Krasnoyarsk"),
("53.30972", "83.62389", "Novosilikatnyy", "RU", "Asia/Barnaul"),
("58.23583", "92.48278", "Lesosibirsk", "RU", "Asia/Krasnoyarsk"),
("56.11281", "69.49015", "Ishim", "RU", "Asia/Yekaterinburg"),
("56.9083", "60.8019", "Beryozovsky", "RU", "Asia/Yekaterinburg"),
("55.75556", "60.70278", "Ozersk", "RU", "Asia/Yekaterinburg"),
("51.82721", "107.60627", "Ulan-Ude", "RU", "Asia/Irkutsk"),
("45.47885", "133.42825", "Lesozavodsk", "RU", "Asia/Vladivostok"),
("65.93381", "111.4834", "Aykhal", "RU", "Asia/Yakutsk"),
("53.14657", "140.72287", "Nikolayevsk-on-Amure", "RU", "Asia/Vladivostok"),
("60.97944", "76.92421", "Izluchinsk", "RU", "Asia/Yekaterinburg"),
("-1.9487", "30.4347", "Rwamagana", "RW", "Africa/Kigali"),
("27.0174", "49.62251", "Al Jubayl", "SA", "Asia/Riyadh"),
("11.8659", "34.3869", "Ar Ruseris", "SD", "Africa/Khartoum"),
("61.72744", "17.10558", "Hudiksvall", "SE", "Europe/Stockholm"),
("59.33333", "18.28333", "Boo", "SE", "Europe/Stockholm"),
("48.8449", "17.22635", "Skalica", "SK", "Europe/Bratislava"),
("48.43174", "17.8031", "Hlohovec", "SK", "Europe/Bratislava"),
("8.48714", "-13.2356", "Freetown", "SL", "Africa/Freetown"),
("-0.35817", "42.54536", "Kismayo", "SO", "Africa/Mogadishu"),
("9.89206", "43.38531", "Baki", "SO", "Africa/Mogadishu"),
("13.73417", "-89.71472", "Sonzacate", "SV", "America/El_Salvador"),
("13.70167", "-89.10944", "Ilopango", "SV", "America/El_Salvador"),
("34.5624", "38.28402", "Tadmur", "SY", "Asia/Damascus"),
("35.95664", "36.7138", "Binnish", "SY", "Asia/Damascus"),
("12.18441", "18.69303", "Mongo", "TD", "Africa/Ndjamena"),
("15.46063", "99.89166", "Thap Than", "TH", "Asia/Bangkok"),
("8.43333", "99.96667", "Nakhon Si Thammarat", "TH", "Asia/Bangkok"),
("13.51825", "99.95469", "Damnoen Saduak", "TH", "Asia/Bangkok"),
("15.79408", "104.1451", "Yasothon", "TH", "Asia/Bangkok"),
("6.25947", "102.05461", "Tak Bai", "TH", "Asia/Bangkok"),
("16.0567", "103.65309", "Roi Et", "TH", "Asia/Bangkok"),
("13.44581", "101.18445", "Phanat Nikhom", "TH", "Asia/Bangkok"),
("13.8196", "100.04427", "Nakhon Pathom", "TH", "Asia/Bangkok"),
("14.64056", "104.64992", "Kantharalak", "TH", "Asia/Bangkok"),
("15.58552", "102.42587", "Bua Yai", "TH", "Asia/Bangkok"),
("14.37395", "100.48528", "Bang Ban", "TH", "Asia/Bangkok"),
("38.55632", "69.01354", "Vahdat", "TJ", "Asia/Dushanbe"),
("-8.99167", "125.21972", "Maliana", "TL", "Asia/Dili"),
("36.08497", "9.37082", "Siliana", "TN", "Africa/Tunis"),
("35.72917", "10.58082", "Msaken", "TN", "Africa/Tunis"),
("36.46917", "10.78222", "Beni Khiar", "TN", "Africa/Tunis"),
("37.16911", "10.03478", "El Alia", "TN", "Africa/Tunis"),
("38.13708", "41.00817", "Silvan", "TR", "Europe/Istanbul"),
("39.22493", "42.85693", "Patnos", "TR", "Europe/Istanbul"),
("37.31309", "40.74357", "Mardin", "TR", "Europe/Istanbul"),
("37.58105", "29.26639", "Serinhisar", "TR", "Europe/Istanbul"),
("37.05944", "37.3825", "Gaziantep", "TR", "Europe/Istanbul"),
("39.59611", "27.02444", "Edremit", "TR", "Europe/Istanbul"),
("39.12074", "27.18052", "Bergama", "TR", "Europe/Istanbul"),
("38.37255", "34.02537", "Aksaray", "TR", "Europe/Istanbul"),
("40.98894", "28.67582", "Yakuplu", "TR", "Europe/Istanbul"),
("40.1675", "34.37389", "Sungurlu", "TR", "Europe/Istanbul"),
("40.37528", "28.88222", "Mudanya", "TR", "Europe/Istanbul"),
("10.66668", "-61.51889", "Port of Spain", "TT", "America/Port_of_Spain"),
("23.5654", "119.58627", "Magong", "TW", "Asia/Taipei"),
("-2.68333", "33", "Usagara", "TZ", "Africa/Dar_es_Salaam"),
("-4.06667", "37.73333", "Same", "TZ", "Africa/Dar_es_Salaam"),
("-6.25", "38.66667", "Mvomero", "TZ", "Africa/Dar_es_Salaam"),
("-4.83", "29.65806", "Mwandiga", "TZ", "Africa/Dar_es_Salaam"),
("-6.8", "39.25", "Magomeni", "TZ", "Africa/Dar_es_Salaam"),
("-7.60361", "37.00438", "Kidodi", "TZ", "Africa/Dar_es_Salaam"),
("-7.76667", "35.7", "Iringa", "TZ", "Africa/Dar_es_Salaam"),
("-5.41667", "38.01667", "Chanika", "TZ", "Africa/Dar_es_Salaam"),
("-10.33333", "39.28333", "Nyangao", "TZ", "Africa/Dar_es_Salaam"),
("49.07866", "30.96755", "Zvenihorodka", "UA", "Europe/Kyiv"),
("47.56494", "31.33078", "Voznesensk", "UA", "Europe/Kyiv"),
("49.41029", "38.15035", "Svatove", "UA", "Europe/Zaporozhye"),
("50.18545", "27.06365", "Shepetivka", "UA", "Europe/Kyiv"),
("47.48444", "36.25361", "Polohy", "UA", "Europe/Zaporozhye"),
("46.75451", "33.34864", "Nova Kakhovka", "UA", "Europe/Kyiv"),
("50.75932", "25.34244", "Lutsk", "UA", "Europe/Kyiv"),
("49.65186", "26.97253", "Krasyliv", "UA", "Europe/Kyiv"),
("46.65581", "32.6178", "Kherson", "UA", "Europe/Kyiv"),
("51.67822", "33.9162", "Hlukhiv", "UA", "Europe/Kyiv"),
("45.99194", "29.41824", "Artsyz", "UA", "Europe/Kyiv"),
("2.41669", "30.98551", "Paidha", "UG", "Africa/Kampala"),
("3.27833", "32.88667", "Kitgum", "UG", "Africa/Kampala"),
("3.02013", "30.91105", "Arua", "UG", "Africa/Kampala"),
("33.45122", "-86.99666", "Hueytown", "US", "America/Chicago"),
("33.44872", "-86.78777", "Vestavia Hills", "US", "America/Chicago"),
("35.25064", "-91.73625", "Searcy", "US", "America/Chicago"),
("26.68451", "-80.66756", "Belle Glade", "US", "America/New_York"),
("28.54944", "-81.77285", "Clermont", "US", "America/New_York"),
("28.90054", "-81.26367", "Deltona", "US", "America/New_York"),
("29.65163", "-82.32483", "Gainesville", "US", "America/New_York"),
("25.67927", "-80.31727", "Kendall", "US", "America/New_York"),
("28.15112", "-82.46148", "Lutz", "US", "America/New_York"),
("26.2173", "-80.22588", "North Lauderdale", "US", "America/New_York"),
("30.17746", "-81.38758", "Palm Valley", "US", "America/New_York"),
("26.91756", "-82.07842", "Punta Gorda Isles", "US", "America/New_York"),
("27.71809", "-82.35176", "Sun City Center", "US", "America/New_York"),
("27.09978", "-82.45426", "Venice", "US", "America/New_York"),
("34.06635", "-84.67837", "Acworth", "US", "America/New_York"),
("32.54044", "-82.90375", "Dublin", "US", "America/New_York"),
("33.08014", "-83.2321", "Milledgeville", "US", "America/New_York"),
("33.54428", "-84.23381", "Stockbridge", "US", "America/New_York"),
("38.58894", "-89.99038", "Fairview Heights", "US", "America/Chicago"),
("39.78504", "-85.76942", "Greenfield", "US", "America/Indiana/Indianapolis"),
("38.06084", "-97.92977", "Hutchinson", "US", "America/Chicago"),
("39.08367", "-84.50855", "Covington", "US", "America/New_York"),
("36.61033", "-88.31476", "Murray", "US", "America/Chicago"),
("29.84576", "-90.10674", "Estelle", "US", "America/Chicago"),
("32.52515", "-93.75018", "Shreveport", "US", "America/Chicago"),
("38.96372", "-76.99081", "Chillum", "US", "America/New_York"),
("38.70734", "-77.02303", "Fort Washington", "US", "America/New_York"),
("39.33427", "-76.43941", "Middle River", "US", "America/New_York"),
("39.32011", "-76.51552", "Rosedale", "US", "America/New_York"),
("39.32288", "-76.72803", "Woodlawn", "US", "America/New_York"),
("39.09112", "-94.41551", "Independence", "US", "America/Chicago"),
("37.95143", "-91.77127", "Rolla", "US", "America/Chicago"),
("33.41012", "-91.06177", "Greenville", "US", "America/Chicago"),
("34.25807", "-88.70464", "Tupelo", "US", "America/Chicago"),
("35.05266", "-78.87836", "Fayetteville", "US", "America/New_York"),
("34.25628", "-78.04471", "Leland", "US", "America/New_York"),
("35.88264", "-80.08199", "Thomasville", "US", "America/New_York"),
("39.71734", "-74.96933", "Sicklerville", "US", "America/New_York"),
("39.43534", "-84.20299", "Lebanon", "US", "America/New_York"),
("34.77453", "-96.67834", "Ada", "US", "America/Chicago"),
("35.74788", "-95.36969", "Muskogee", "US", "America/Chicago"),
("39.96097", "-75.60804", "West Chester", "US", "America/New_York"),
("33.98154", "-81.23621", "Lexington", "US", "America/New_York"),
("36.02506", "-86.77917", "Brentwood Estates", "US", "America/Chicago"),
("35.61452", "-88.81395", "Jackson", "US", "America/Chicago"),
("32.44874", "-99.73314", "Abilene", "US", "America/Chicago"),
("30.16688", "-96.39774", "Brenham", "US", "America/Chicago"),
("31.12406", "-97.90308", "Copperas Cove", "US", "America/Chicago"),
("29.53885", "-95.44744", "Fresno", "US", "America/Chicago"),
("30.5427", "-97.54667", "Hutto", "US", "America/Chicago"),
("32.5007", "-94.74049", "Longview", "US", "America/Chicago"),
("31.76212", "-95.63079", "Palestine", "US", "America/Chicago"),
("26.18924", "-98.15529", "San Juan", "US", "America/Chicago"),
("32.35126", "-95.30106", "Tyler", "US", "America/Chicago"),
("37.52487", "-77.55777", "Bon Air", "US", "America/New_York"),
("38.91817", "-78.19444", "Front Royal", "US", "America/New_York"),
("37.60876", "-77.37331", "Mechanicsville", "US", "America/New_York"),
("39.00622", "-77.4286", "Sterling", "US", "America/New_York"),
("39.45621", "-77.96389", "Martinsburg", "US", "America/New_York"),
("41.27621", "-72.86843", "East Haven", "US", "America/New_York"),
("41.14676", "-73.49484", "New Canaan", "US", "America/New_York"),
("41.55815", "-73.0515", "Waterbury", "US", "America/New_York"),
("41.6764", "-91.58045", "Coralville", "US", "America/Chicago"),
("41.57721", "-93.71133", "West Des Moines", "US", "America/Chicago"),
("41.15376", "-87.88754", "Bourbonnais", "US", "America/Chicago"),
("42.24113", "-88.3162", "Crystal Lake", "US", "America/Chicago"),
("41.72059", "-87.70172", "Evergreen Park", "US", "America/Chicago"),
("42.16808", "-88.42814", "Huntley", "US", "America/Chicago"),
("41.8542", "-87.66561", "Lower West Side", "US", "America/Chicago"),
("41.80753", "-87.65644", "New City", "US", "America/Chicago"),
("40.56754", "-89.64066", "Pekin", "US", "America/Chicago"),
("41.84364", "-87.71255", "South Lawndale", "US", "America/Chicago"),
("41.85059", "-87.882", "Westchester", "US", "America/Chicago"),
("41.75338", "-86.11084", "Granger", "US", "America/Indiana/Indianapolis"),
("41.47892", "-87.45476", "Schererville", "US", "America/Chicago"),
("42.35843", "-71.05977", "Boston", "US", "America/New_York"),
("42.58342", "-71.8023", "Fitchburg", "US", "America/New_York"),
("42.4251", "-71.06616", "Malden", "US", "America/New_York"),
("42.52787", "-70.92866", "Peabody", "US", "America/New_York"),
("41.9001", "-71.08977", "Taunton", "US", "America/New_York"),
("43.91452", "-69.96533", "Brunswick", "US", "America/New_York"),
("42.30865", "-83.48216", "Canton", "US", "America/Detroit"),
("46.09273", "-88.64235", "Iron River", "US", "America/Menominee"),
("42.97086", "-82.42491", "Port Huron", "US", "America/Detroit"),
("42.7392", "-84.62081", "Waverly", "US", "America/Detroit"),
("45.0408", "-93.263", "Columbia Heights", "US", "America/Chicago"),
("45.16024", "-93.08883", "Lino Lakes", "US", "America/Chicago"),
("44.73941", "-93.12577", "Rosemount", "US", "America/Chicago"),
("47.92526", "-97.03285", "Grand Forks", "US", "America/Chicago"),
("42.93369", "-72.27814", "Keene", "US", "America/New_York"),
("40.94065", "-73.99681", "Dumont", "US", "America/New_York"),
("40.72816", "-74.07764", "Jersey City", "US", "America/New_York"),
("40.82232", "-74.15987", "Nutley", "US", "America/New_York"),
("40.65538", "-74.38987", "Scotch Plains", "US", "America/New_York"),
("40.5576", "-74.28459", "Woodbridge", "US", "America/New_York"),
("40.57788", "-73.95958", "Brighton Beach", "US", "America/New_York"),
("40.67705", "-73.89125", "Cypress Hills", "US", "America/New_York"),
("40.60538", "-73.75513", "Far Rockaway", "US", "America/New_York"),
("40.72371", "-73.95097", "Greenpoint", "US", "America/New_York"),
("40.64621", "-73.97069", "Kensington", "US", "America/New_York"),
("40.68066", "-73.47429", "Massapequa", "US", "America/New_York"),
("41.50343", "-74.01042", "Newburgh", "US", "America/New_York"),
("40.63316", "-74.13653", "Port Richmond", "US", "America/New_York"),
("41.0051", "-73.78458", "Scarsdale", "US", "America/New_York"),
("43.1009", "-75.23266", "Utica", "US", "America/New_York"),
("40.93121", "-73.89875", "Yonkers", "US", "America/New_York"),
("41.55838", "-81.56929", "Collinwood", "US", "America/New_York"),
("41.48199", "-81.79819", "Lakewood", "US", "America/New_York"),
("41.24255", "-82.61573", "Norwalk", "US", "America/New_York"),
("41.66394", "-83.55521", "Toledo", "US", "America/New_York"),
("40.2737", "-76.88442", "Harrisburg", "US", "America/New_York"),
("40.24537", "-75.64963", "Pottstown", "US", "America/New_York"),
("41.54566", "-71.29144", "Middletown", "US", "America/New_York"),
("43.61062", "-72.97261", "Rutland", "US", "America/New_York"),
("44.27804", "-88.27205", "Kaukauna", "US", "America/Chicago"),
("42.55308", "-87.93341", "Pleasant Prairie", "US", "America/Chicago"),
("41.16704", "-73.20483", "Bridgeport", "US", "America/New_York"),
("33.35283", "-111.78903", "Gilbert", "US", "America/Phoenix"),
("33.50921", "-111.89903", "Scottsdale", "US", "America/Phoenix"),
("38.17492", "-122.2608", "American Canyon", "US", "America/Los_Angeles"),
("33.92946", "-116.97725", "Beaumont", "US", "America/Los_Angeles"),
("34.21639", "-119.0376", "Camarillo", "US", "America/Los_Angeles"),
("34.09668", "-117.71978", "Claremont", "US", "America/Los_Angeles"),
("38.54491", "-121.74052", "Davis", "US", "America/Los_Angeles"),
("33.03699", "-117.29198", "Encinitas", "US", "America/Los_Angeles"),
("34.14251", "-118.25508", "Glendale", "US", "America/Los_Angeles"),
("33.7207", "-116.21677", "Indio", "US", "America/Los_Angeles"),
("33.52253", "-117.70755", "Laguna Niguel", "US", "America/Los_Angeles"),
("34.63915", "-120.45794", "Lompoc", "US", "America/Los_Angeles"),
("32.9156", "-117.14392", "Mira Mesa", "US", "America/Los_Angeles"),
("33.93113", "-117.54866", "Norco", "US", "America/Los_Angeles"),
("33.72255", "-116.37697", "Palm Desert", "US", "America/Los_Angeles"),
("36.06523", "-119.01677", "Porterville", "US", "America/Los_Angeles"),
("37.73604", "-120.93549", "Riverbank", "US", "America/Los_Angeles"),
("34.09611", "-118.10583", "San Gabriel", "US", "America/Los_Angeles"),
("34.95303", "-120.43572", "Santa Maria", "US", "America/Los_Angeles"),
("33.95015", "-118.03917", "South Whittier", "US", "America/Los_Angeles"),
("33.76446", "-117.79394", "North Tustin", "US", "America/Los_Angeles"),
("36.91023", "-121.75689", "Watsonville", "US", "America/Los_Angeles"),
("39.72943", "-104.83192", "Aurora", "US", "America/Denver"),
("39.57582", "-105.11221", "Ken Caryl", "US", "America/Denver"),
("32.42067", "-104.22884", "Carlsbad", "US", "America/Denver"),
("36.20829", "-115.98391", "Pahrump", "US", "America/Los_Angeles"),
("31.84568", "-102.36764", "Odessa", "US", "America/Chicago"),
("40.58654", "-122.39168", "Redding", "US", "America/Los_Angeles"),
("43.54072", "-116.56346", "Nampa", "US", "America/Boise"),
("45.49428", "-122.86705", "Aloha", "US", "America/Los_Angeles"),
("44.99012", "-123.02621", "Keizer", "US", "America/Los_Angeles"),
("45.53929", "-122.38731", "Troutdale", "US", "America/Los_Angeles"),
("40.65995", "-111.99633", "Kearns", "US", "America/Denver"),
("40.34912", "-111.90466", "Saratoga Springs", "US", "America/Denver"),
("47.76232", "-122.2054", "Bothell", "US", "America/Los_Angeles"),
("47.38093", "-122.23484", "Kent", "US", "America/Los_Angeles"),
("47.64995", "-117.23991", "Opportunity", "US", "America/Los_Angeles"),
("46.32374", "-120.00865", "Sunnyside", "US", "America/Los_Angeles"),
("20.88953", "-156.47432", "Kahului", "US", "Pacific/Honolulu"),
("40.81", "-73.9625", "Morningside Heights", "US", "America/New_York"),
("43.16547", "-77.70066", "Gates-North Gates", "US", "America/New_York"),
("47.4943", "-122.24092", "Bryn Mawr-Skyway", "US", "America/Los_Angeles"),
("47.80527", "-122.24064", "Bothell West", "US", "America/Los_Angeles"),
("37.71715", "-122.40433", "Visitacion Valley", "US", "America/Los_Angeles"),
("-33.38056", "-56.52361", "Durazno", "UY", "America/Montevideo"),
("41.29444", "69.67639", "Parkent", "UZ", "Asia/Tashkent"),
("40.11583", "67.84222", "Jizzax", "UZ", "Asia/Samarkand"),
("40.78206", "72.34424", "Andijon", "UZ", "Asia/Tashkent"),
("9.91861", "-68.30472", "Tinaquillo", "VE", "America/Caracas"),
("10.22677", "-67.33122", "La Victoria", "VE", "America/Caracas"),
("8.35122", "-62.64102", "Ciudad Guayana", "VE", "America/Caracas"),
("8.62261", "-70.20749", "Barinas", "VE", "America/Caracas"),
("10.29085", "105.75635", "Sa Dec", "VN", "Asia/Ho_Chi_Minh"),
("-17.73648", "168.31366", "Port-Vila", "VU", "Pacific/Efate"),
("42.62833", "20.89389", "Glogovac", "XK", "Europe/Belgrade"),
("14.53767", "46.83187", "Ataq", "YE", "Asia/Aden"),
("-27.76952", "30.79165", "Vryheid", "ZA", "Africa/Johannesburg"),
("-26.93366", "29.24152", "Standerton", "ZA", "Africa/Johannesburg"),
("-24.19436", "29.00974", "Mokopane", "ZA", "Africa/Johannesburg"),
("12.12278", "-61.62498", "Grenville", "GD", "America/Grenada"),
)
def coordinate(self, center: Optional[float] = None, radius: Union[float, int] = 0.001) -> Decimal:
"""
Optionally center the coord and pick a point within radius.
"""
if center is None:
return Decimal(str(self.generator.random.randint(-180000000, 180000000) / 1000000)).quantize(
Decimal(".000001"),
)
else:
center = float(center)
radius = float(radius)
geo = self.generator.random.uniform(center - radius, center + radius)
return Decimal(str(geo)).quantize(Decimal(".000001"))
def latitude(self) -> Decimal:
# Latitude has a range of -90 to 90, so divide by two.
return self.coordinate() / 2
def longitude(self) -> Decimal:
return self.coordinate()
def latlng(self) -> Tuple[Decimal, Decimal]:
return (self.latitude(), self.longitude())
def local_latlng(
self,
country_code: str = "US",
coords_only: bool = False,
) -> Optional[Tuple[str, ...]]:
"""Returns a location known to exist on land in a country specified by `country_code`.
Defaults to 'en_US'. See the `land_coords` list for available locations/countries.
"""
results = [loc for loc in self.land_coords if loc[3] == country_code]
if results:
place: PlaceType = self.random_element(results)
return (place[0], place[1]) if coords_only else place
return None
def location_on_land(self, coords_only: bool = False) -> Tuple[str, ...]:
"""Returns a random tuple specifying a coordinate set guaranteed to exist on land.
Format is `(latitude, longitude, place name, two-letter country code, timezone)`
Pass `coords_only` to return coordinates without metadata.
"""
place: PlaceType = self.random_element(self.land_coords)
return (place[0], place[1]) if coords_only else place
| Provider |
python | doocs__leetcode | solution/2600-2699/2655.Find Maximal Uncovered Ranges/Solution.py | {
"start": 0,
"end": 402
} | class ____:
def findMaximalUncoveredRanges(
self, n: int, ranges: List[List[int]]
) -> List[List[int]]:
ranges.sort()
last = -1
ans = []
for l, r in ranges:
if last + 1 < l:
ans.append([last + 1, l - 1])
last = max(last, r)
if last + 1 < n:
ans.append([last + 1, n - 1])
return ans
| Solution |
python | pypa__setuptools | setuptools/tests/config/test_apply_pyprojecttoml.py | {
"start": 25979,
"end": 26734
} | class ____:
def test_version(self, tmp_path, monkeypatch, capsys):
# See pypa/setuptools#4047
# This test can be removed once the CLI interface of setup.py is removed
monkeypatch.chdir(tmp_path)
toml_config = """
[project]
name = "test"
version = "42.0"
"""
pyproject = Path(tmp_path, "pyproject.toml")
pyproject.write_text(cleandoc(toml_config), encoding="utf-8")
opts = {"script_args": ["--version"]}
dist = pyprojecttoml.apply_configuration(Distribution(opts), pyproject)
dist.parse_command_line() # <-- there should be no exception here.
captured = capsys.readouterr()
assert "42.0" in captured.out
| TestInteropCommandLineParsing |
python | pytorch__pytorch | tools/linter/adapters/shellcheck_linter.py | {
"start": 342,
"end": 2982
} | class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def run_command(
args: list[str],
) -> subprocess.CompletedProcess[bytes]:
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(
args,
capture_output=True,
)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def check_files(
files: list[str],
) -> list[LintMessage]:
try:
proc = run_command(
["shellcheck", "--external-sources", "--format=json1"] + files
)
except OSError as err:
return [
LintMessage(
path=None,
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description=(f"Failed due to {err.__class__.__name__}:\n{err}"),
)
]
stdout = str(proc.stdout, "utf-8").strip()
results = json.loads(stdout)["comments"]
return [
LintMessage(
path=result["file"],
name=f"SC{result['code']}",
description=result["message"],
line=result["line"],
char=result["column"],
code=LINTER_CODE,
severity=LintSeverity.ERROR,
original=None,
replacement=None,
)
for result in results
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="shellcheck runner",
fromfile_prefix_chars="@",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
if shutil.which("shellcheck") is None:
err_msg = LintMessage(
path="<none>",
line=None,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="command-failed",
original=None,
replacement=None,
description="shellcheck is not installed, did you forget to run `lintrunner init`?",
)
print(json.dumps(err_msg._asdict()), flush=True)
sys.exit(0)
args = parser.parse_args()
lint_messages = check_files(args.filenames)
for lint_message in lint_messages:
print(json.dumps(lint_message._asdict()), flush=True)
| LintMessage |
python | kubernetes-client__python | kubernetes/client/models/v1_allocated_device_status.py | {
"start": 383,
"end": 10367
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'conditions': 'list[V1Condition]',
'data': 'object',
'device': 'str',
'driver': 'str',
'network_data': 'V1NetworkDeviceData',
'pool': 'str',
'share_id': 'str'
}
attribute_map = {
'conditions': 'conditions',
'data': 'data',
'device': 'device',
'driver': 'driver',
'network_data': 'networkData',
'pool': 'pool',
'share_id': 'shareID'
}
def __init__(self, conditions=None, data=None, device=None, driver=None, network_data=None, pool=None, share_id=None, local_vars_configuration=None): # noqa: E501
"""V1AllocatedDeviceStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._conditions = None
self._data = None
self._device = None
self._driver = None
self._network_data = None
self._pool = None
self._share_id = None
self.discriminator = None
if conditions is not None:
self.conditions = conditions
if data is not None:
self.data = data
self.device = device
self.driver = driver
if network_data is not None:
self.network_data = network_data
self.pool = pool
if share_id is not None:
self.share_id = share_id
@property
def conditions(self):
"""Gets the conditions of this V1AllocatedDeviceStatus. # noqa: E501
Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True. Must not contain more than 8 entries. # noqa: E501
:return: The conditions of this V1AllocatedDeviceStatus. # noqa: E501
:rtype: list[V1Condition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1AllocatedDeviceStatus.
Conditions contains the latest observation of the device's state. If the device has been configured according to the class and claim config references, the `Ready` condition should be True. Must not contain more than 8 entries. # noqa: E501
:param conditions: The conditions of this V1AllocatedDeviceStatus. # noqa: E501
:type: list[V1Condition]
"""
self._conditions = conditions
@property
def data(self):
"""Gets the data of this V1AllocatedDeviceStatus. # noqa: E501
Data contains arbitrary driver-specific data. The length of the raw data must be smaller or equal to 10 Ki. # noqa: E501
:return: The data of this V1AllocatedDeviceStatus. # noqa: E501
:rtype: object
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this V1AllocatedDeviceStatus.
Data contains arbitrary driver-specific data. The length of the raw data must be smaller or equal to 10 Ki. # noqa: E501
:param data: The data of this V1AllocatedDeviceStatus. # noqa: E501
:type: object
"""
self._data = data
@property
def device(self):
"""Gets the device of this V1AllocatedDeviceStatus. # noqa: E501
Device references one device instance via its name in the driver's resource pool. It must be a DNS label. # noqa: E501
:return: The device of this V1AllocatedDeviceStatus. # noqa: E501
:rtype: str
"""
return self._device
@device.setter
def device(self, device):
"""Sets the device of this V1AllocatedDeviceStatus.
Device references one device instance via its name in the driver's resource pool. It must be a DNS label. # noqa: E501
:param device: The device of this V1AllocatedDeviceStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and device is None: # noqa: E501
raise ValueError("Invalid value for `device`, must not be `None`") # noqa: E501
self._device = device
@property
def driver(self):
"""Gets the driver of this V1AllocatedDeviceStatus. # noqa: E501
Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:return: The driver of this V1AllocatedDeviceStatus. # noqa: E501
:rtype: str
"""
return self._driver
@driver.setter
def driver(self, driver):
"""Sets the driver of this V1AllocatedDeviceStatus.
Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node. Must be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. # noqa: E501
:param driver: The driver of this V1AllocatedDeviceStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver is None: # noqa: E501
raise ValueError("Invalid value for `driver`, must not be `None`") # noqa: E501
self._driver = driver
@property
def network_data(self):
"""Gets the network_data of this V1AllocatedDeviceStatus. # noqa: E501
:return: The network_data of this V1AllocatedDeviceStatus. # noqa: E501
:rtype: V1NetworkDeviceData
"""
return self._network_data
@network_data.setter
def network_data(self, network_data):
"""Sets the network_data of this V1AllocatedDeviceStatus.
:param network_data: The network_data of this V1AllocatedDeviceStatus. # noqa: E501
:type: V1NetworkDeviceData
"""
self._network_data = network_data
@property
def pool(self):
"""Gets the pool of this V1AllocatedDeviceStatus. # noqa: E501
This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`). Must not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes. # noqa: E501
:return: The pool of this V1AllocatedDeviceStatus. # noqa: E501
:rtype: str
"""
return self._pool
@pool.setter
def pool(self, pool):
"""Sets the pool of this V1AllocatedDeviceStatus.
This name together with the driver name and the device name field identify which device was allocated (`<driver name>/<pool name>/<device name>`). Must not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes. # noqa: E501
:param pool: The pool of this V1AllocatedDeviceStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pool is None: # noqa: E501
raise ValueError("Invalid value for `pool`, must not be `None`") # noqa: E501
self._pool = pool
@property
def share_id(self):
"""Gets the share_id of this V1AllocatedDeviceStatus. # noqa: E501
ShareID uniquely identifies an individual allocation share of the device. # noqa: E501
:return: The share_id of this V1AllocatedDeviceStatus. # noqa: E501
:rtype: str
"""
return self._share_id
@share_id.setter
def share_id(self, share_id):
"""Sets the share_id of this V1AllocatedDeviceStatus.
ShareID uniquely identifies an individual allocation share of the device. # noqa: E501
:param share_id: The share_id of this V1AllocatedDeviceStatus. # noqa: E501
:type: str
"""
self._share_id = share_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1AllocatedDeviceStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1AllocatedDeviceStatus):
return True
return self.to_dict() != other.to_dict()
| V1AllocatedDeviceStatus |
python | scrapy__scrapy | tests/test_utils_url.py | {
"start": 8049,
"end": 18112
} | class ____:
@pytest.mark.parametrize(
"url",
[
"http://www.example.com/index.html",
"http://www.example.com/index.html?somekey=somevalue",
],
)
def test_noop(self, url: str) -> None:
assert strip_url(url) == url
def test_fragments(self):
assert (
strip_url(
"http://www.example.com/index.html?somekey=somevalue#section",
strip_fragment=False,
)
== "http://www.example.com/index.html?somekey=somevalue#section"
)
@pytest.mark.parametrize(
("url", "origin", "expected"),
[
("http://www.example.com/", False, "http://www.example.com/"),
("http://www.example.com", False, "http://www.example.com"),
("http://www.example.com", True, "http://www.example.com/"),
],
)
def test_path(self, url: str, origin: bool, expected: str) -> None:
assert strip_url(url, origin_only=origin) == expected
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username@www.example.com/index.html?somekey=somevalue#section",
"http://www.example.com/index.html?somekey=somevalue",
),
(
"https://username:@www.example.com/index.html?somekey=somevalue#section",
"https://www.example.com/index.html?somekey=somevalue",
),
(
"ftp://username:password@www.example.com/index.html?somekey=somevalue#section",
"ftp://www.example.com/index.html?somekey=somevalue",
),
# user: "username@", password: none
(
"http://username%40@www.example.com/index.html?somekey=somevalue#section",
"http://www.example.com/index.html?somekey=somevalue",
),
# user: "username:pass", password: ""
(
"https://username%3Apass:@www.example.com/index.html?somekey=somevalue#section",
"https://www.example.com/index.html?somekey=somevalue",
),
# user: "me", password: "user@domain.com"
(
"ftp://me:user%40domain.com@www.example.com/index.html?somekey=somevalue#section",
"ftp://www.example.com/index.html?somekey=somevalue",
),
],
)
def test_credentials(self, url: str, expected: str) -> None:
assert strip_url(url, strip_credentials=True) == expected
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username:password@www.example.com:80/index.html?somekey=somevalue#section",
"http://www.example.com/index.html?somekey=somevalue",
),
(
"http://username:password@www.example.com:8080/index.html#section",
"http://www.example.com:8080/index.html",
),
(
"http://username:password@www.example.com:443/index.html?somekey=somevalue&someotherkey=sov#section",
"http://www.example.com:443/index.html?somekey=somevalue&someotherkey=sov",
),
(
"https://username:password@www.example.com:443/index.html",
"https://www.example.com/index.html",
),
(
"https://username:password@www.example.com:442/index.html",
"https://www.example.com:442/index.html",
),
(
"https://username:password@www.example.com:80/index.html",
"https://www.example.com:80/index.html",
),
(
"ftp://username:password@www.example.com:21/file.txt",
"ftp://www.example.com/file.txt",
),
(
"ftp://username:password@www.example.com:221/file.txt",
"ftp://www.example.com:221/file.txt",
),
],
)
def test_default_ports_creds_off(self, url: str, expected: str) -> None:
assert strip_url(url) == expected
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username:password@www.example.com:80/index.html",
"http://username:password@www.example.com/index.html",
),
(
"http://username:password@www.example.com:8080/index.html",
"http://username:password@www.example.com:8080/index.html",
),
(
"http://username:password@www.example.com:443/index.html",
"http://username:password@www.example.com:443/index.html",
),
(
"https://username:password@www.example.com:443/index.html",
"https://username:password@www.example.com/index.html",
),
(
"https://username:password@www.example.com:442/index.html",
"https://username:password@www.example.com:442/index.html",
),
(
"https://username:password@www.example.com:80/index.html",
"https://username:password@www.example.com:80/index.html",
),
(
"ftp://username:password@www.example.com:21/file.txt",
"ftp://username:password@www.example.com/file.txt",
),
(
"ftp://username:password@www.example.com:221/file.txt",
"ftp://username:password@www.example.com:221/file.txt",
),
],
)
def test_default_ports(self, url: str, expected: str) -> None:
assert (
strip_url(url, strip_default_port=True, strip_credentials=False) == expected
)
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username:password@www.example.com:80/index.html?somekey=somevalue&someotherkey=sov#section",
"http://username:password@www.example.com:80/index.html?somekey=somevalue&someotherkey=sov",
),
(
"http://username:password@www.example.com:8080/index.html?somekey=somevalue&someotherkey=sov#section",
"http://username:password@www.example.com:8080/index.html?somekey=somevalue&someotherkey=sov",
),
(
"http://username:password@www.example.com:443/index.html",
"http://username:password@www.example.com:443/index.html",
),
(
"https://username:password@www.example.com:443/index.html",
"https://username:password@www.example.com:443/index.html",
),
(
"https://username:password@www.example.com:442/index.html",
"https://username:password@www.example.com:442/index.html",
),
(
"https://username:password@www.example.com:80/index.html",
"https://username:password@www.example.com:80/index.html",
),
(
"ftp://username:password@www.example.com:21/file.txt",
"ftp://username:password@www.example.com:21/file.txt",
),
(
"ftp://username:password@www.example.com:221/file.txt",
"ftp://username:password@www.example.com:221/file.txt",
),
],
)
def test_default_ports_keep(self, url: str, expected: str) -> None:
assert (
strip_url(url, strip_default_port=False, strip_credentials=False)
== expected
)
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username:password@www.example.com/index.html",
"http://www.example.com/",
),
(
"http://username:password@www.example.com:80/foo/bar?query=value#somefrag",
"http://www.example.com/",
),
(
"http://username:password@www.example.com:8008/foo/bar?query=value#somefrag",
"http://www.example.com:8008/",
),
(
"https://username:password@www.example.com:443/index.html",
"https://www.example.com/",
),
],
)
def test_origin_only(self, url: str, expected: str) -> None:
assert strip_url(url, origin_only=True) == expected
@pytest.mark.parametrize(
("path", "expected"),
[
# https://en.wikipedia.org/wiki/Path_(computing)#Representations_of_paths_by_operating_system_and_shell
# Unix-like OS, Microsoft Windows / cmd.exe
("/home/user/docs/Letter.txt", True),
("./inthisdir", True),
("../../greatgrandparent", True),
("~/.rcinfo", True),
(r"C:\user\docs\Letter.txt", True),
("/user/docs/Letter.txt", True),
(r"C:\Letter.txt", True),
(r"\\Server01\user\docs\Letter.txt", True),
(r"\\?\UNC\Server01\user\docs\Letter.txt", True),
(r"\\?\C:\user\docs\Letter.txt", True),
(r"C:\user\docs\somefile.ext:alternate_stream_name", True),
(r"https://example.com", False),
],
)
def test__is_filesystem_path(path: str, expected: bool) -> None:
assert _is_filesystem_path(path) == expected
@pytest.mark.parametrize(
"obj_name",
[
"_unquotepath",
"_safe_chars",
"parse_url",
*_public_w3lib_objects,
],
)
def test_deprecated_imports_from_w3lib(obj_name: str) -> None:
with warnings.catch_warnings(record=True) as warns:
obj_type = "attribute" if obj_name == "_safe_chars" else "function"
message = f"The scrapy.utils.url.{obj_name} {obj_type} is deprecated, use w3lib.url.{obj_name} instead."
getattr(import_module("scrapy.utils.url"), obj_name)
assert isinstance(warns[0].message, Warning)
assert message in warns[0].message.args
| TestStripUrl |
python | apache__airflow | providers/github/src/airflow/providers/github/hooks/github.py | {
"start": 1017,
"end": 4051
} | class ____(BaseHook):
"""
Interact with GitHub.
Performs a connection to GitHub and retrieves client.
:param github_conn_id: Reference to :ref:`GitHub connection id <howto/connection:github>`.
"""
conn_name_attr = "github_conn_id"
default_conn_name = "github_default"
conn_type = "github"
hook_name = "GitHub"
def __init__(self, github_conn_id: str = default_conn_name, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.github_conn_id = github_conn_id
self.client: GithubClient | None = None
self.get_conn()
def get_conn(self) -> GithubClient:
"""Initiate a new GitHub connection with token and hostname (for GitHub Enterprise)."""
if self.client is not None:
return self.client
conn = self.get_connection(self.github_conn_id)
access_token = conn.password
host = conn.host
extras = conn.extra_dejson or {}
if access_token:
auth: Auth.Auth = Auth.Token(access_token)
elif extras:
if key_path := extras.get("key_path"):
if not key_path.endswith(".pem"):
raise ValueError("Unrecognised key file: expected a .pem private key")
with open(key_path) as key_file:
private_key = key_file.read()
else:
raise ValueError("No key_path provided for GitHub App authentication.")
app_id = extras.get("app_id")
installation_id = extras.get("installation_id")
if not isinstance(installation_id, int):
raise ValueError("The provided installation_id should be integer.")
if not isinstance(app_id, (str | int)):
raise ValueError("The provided app_id should be integer or string.")
token_permissions = extras.get("token_permissions", None)
auth = Auth.AppAuth(app_id, private_key).get_installation_auth(installation_id, token_permissions)
else:
raise ValueError("No access token or authentication method provided.")
if not host:
self.client = GithubClient(auth=auth)
else:
self.client = GithubClient(auth=auth, base_url=host)
return self.client
@classmethod
def get_ui_field_behaviour(cls) -> dict:
"""Return custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "login", "extra"],
"relabeling": {"host": "GitHub Enterprise URL (Optional)", "password": "GitHub Access Token"},
"placeholders": {"host": "https://{hostname}/api/v3 (for GitHub Enterprise)"},
}
def test_connection(self) -> tuple[bool, str]:
"""Test GitHub connection."""
try:
if TYPE_CHECKING:
assert self.client
self.client.get_user().id
return True, "Successfully connected to GitHub."
except Exception as e:
return False, str(e)
| GithubHook |
python | pypa__pip | tests/lib/server.py | {
"start": 558,
"end": 619
} | class ____(BaseWSGIServer):
mock: Mock = Mock()
| _MockServer |
python | scipy__scipy | benchmarks/benchmarks/signal.py | {
"start": 571,
"end": 1166
} | class ____(Benchmark):
def setup(self):
rng = np.random.default_rng(5678)
# Create some long arrays for computation
x = rng.standard_normal(2**20)
y = rng.standard_normal(2**20)
self.x = x
self.y = y
def time_welch(self):
signal.welch(self.x)
def time_csd(self):
signal.csd(self.x, self.y)
def time_periodogram(self):
signal.periodogram(self.x)
def time_spectrogram(self):
signal.spectrogram(self.x)
def time_coherence(self):
signal.coherence(self.x, self.y)
| CalculateWindowedFFT |
python | kamyu104__LeetCode-Solutions | Python/final-value-of-variable-after-performing-operations.py | {
"start": 29,
"end": 253
} | class ____(object):
def finalValueAfterOperations(self, operations):
"""
:type operations: List[str]
:rtype: int
"""
return sum(1 if '+' == op[1] else -1 for op in operations)
| Solution |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/output_parsers/openai_functions.py | {
"start": 425,
"end": 3644
} | class ____(AgentOutputParser):
"""Parses a message into agent action/finish.
Is meant to be used with OpenAI models, as it relies on the specific
function_call parameter from OpenAI to convey what tools to use.
If a function_call parameter is passed, then that is used to get
the tool and tool input.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-functions-agent"
@staticmethod
def parse_ai_message(message: BaseMessage) -> AgentAction | AgentFinish:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
if len(function_call["arguments"].strip()) == 0:
# OpenAI returns an empty string for functions containing no args
_tool_input = {}
else:
# otherwise it returns a json object
_tool_input = json.loads(function_call["arguments"], strict=False)
except JSONDecodeError as e:
msg = (
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg) from e
# A hack here:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
return AgentActionMessageLog(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
@override
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> AgentAction | AgentFinish:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg) # noqa: TRY004
message = result[0].message
return self.parse_ai_message(message)
@override
def parse(self, text: str) -> AgentAction | AgentFinish:
msg = "Can only parse messages"
raise ValueError(msg)
| OpenAIFunctionsAgentOutputParser |
python | falconry__falcon | tests/test_recipes.py | {
"start": 7040,
"end": 10947
} | class ____:
@pytest.fixture(scope='class', autouse=True)
def msgspec(self):
return pytest.importorskip(
'msgspec', reason='this recipe requires msgspec [not found]'
)
def test_basic_media_handlers(self, asgi, util):
class MediaResource:
def on_post(self, req, resp):
resp.content_type = falcon.MEDIA_TEXT
resp.text = str(req.get_media())
async def on_post_async(self, req, resp):
resp.content_type = falcon.MEDIA_TEXT
resp.text = str(await req.get_media())
json_recipe = util.load_module('examples/recipes/msgspec_json_handler.py')
msgpack_recipe = util.load_module('examples/recipes/msgspec_msgpack_handler.py')
app = util.create_app(asgi)
client = falcon.testing.TestClient(app)
msgspec_handlers = {
falcon.MEDIA_JSON: json_recipe.json_handler,
falcon.MEDIA_MSGPACK: msgpack_recipe.msgpack_handler,
}
app.req_options.media_handlers.update(msgspec_handlers)
app.resp_options.media_handlers.update(msgspec_handlers)
suffix = 'async' if asgi else None
app.add_route('/media', MediaResource(), suffix=suffix)
resp0 = client.simulate_post(
'/media', body=b'Hello: world', content_type=falcon.MEDIA_JSON
)
assert resp0.status_code == 400
resp1 = client.simulate_post('/media', json=[1, 3, 3, 7])
assert resp1.status_code == 200
assert resp1.text == '[1, 3, 3, 7]'
resp2 = client.simulate_post(
'/media', body=b'\x94\x01\x03\x03\x07', content_type=falcon.MEDIA_MSGPACK
)
assert resp2.status_code == 200
assert resp2.text == '[1, 3, 3, 7]'
resp3 = client.simulate_get('/', headers={'Accept': falcon.MEDIA_JSON})
assert resp3.status_code == 404
assert resp3.json == {'title': '404 Not Found'}
resp4 = client.simulate_get('/', headers={'Accept': falcon.MEDIA_MSGPACK})
assert resp4.status_code == 404
assert resp4.content == b'\x81\xa5title\xad404 Not Found'
def test_validation_middleware(self, util, msgspec):
mw_recipe = util.load_module('examples/recipes/msgspec_media_validation.py')
class Metadata(msgspec.Struct):
name: str
class Resource:
POST_SCHEMA = Metadata
def on_post(self, req, resp, metadata):
resp.media = msgspec.to_builtins(metadata)
app = falcon.App(middleware=[mw_recipe.MsgspecMiddleware()])
app.add_route('/meta', Resource())
resp = falcon.testing.simulate_post(app, '/meta', json={'name': 'falcon'})
assert resp.json == {'name': 'falcon'}
def test_main_app(self, util):
main_recipe = util.load_module('examples/recipes/msgspec_main.py')
client = falcon.testing.TestClient(main_recipe.application)
resp1 = client.simulate_post('/notes', json={'text': 'Test note'})
assert resp1.status_code == 201
created = resp1.json
noteid = created['noteid']
assert resp1.headers.get('Location') == f'/notes/{noteid}'
resp2 = client.simulate_post('/notes', json={'note': 'Another'})
assert resp2.status_code == 422
resp3 = client.simulate_get('/notes')
assert resp3.status_code == 200
assert resp3.json == {noteid: created}
resp4 = client.simulate_get(f'/notes/{noteid}')
assert resp4.status_code == 200
assert resp4.json == created
resp5 = client.simulate_delete(f'/notes/{noteid}')
assert resp5.status_code == 204
resp6 = client.simulate_get(f'/notes/{noteid}')
assert resp6.status_code == 404
resp7 = client.simulate_get('/notes')
assert resp7.status_code == 200
assert resp7.json == {}
| TestMsgspec |
python | ray-project__ray | python/ray/serve/tests/unit/test_router.py | {
"start": 27314,
"end": 40970
} | class ____:
@pytest.mark.asyncio
async def test_num_router_requests(self):
tags = {
"deployment": "a",
"application": "b",
"route": "/alice",
"handle": "random_handle",
"actor_id": "random_actor",
}
metrics_manager = RouterMetricsManager(
DeploymentID(name="a", app_name="b"),
"random_handle",
"random_actor",
DeploymentHandleSource.UNKNOWN,
Mock(),
FakeCounter(
tag_keys=("deployment", "route", "application", "handle", "actor_id")
),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
event_loop=asyncio.get_event_loop(),
)
assert metrics_manager.num_router_requests.get_count(tags) is None
n = random.randint(1, 10)
for _ in range(n):
metrics_manager.inc_num_total_requests(route="/alice")
await asyncio.sleep(RAY_SERVE_METRICS_EXPORT_INTERVAL_MS * 2 / 1000)
assert metrics_manager.num_router_requests.get_count(tags) == n
@pytest.mark.asyncio
async def test_num_queued_requests_gauge(self):
tags = {
"deployment": "a",
"application": "b",
"handle": "random_handle",
"actor_id": "random_actor",
}
metrics_manager = RouterMetricsManager(
DeploymentID(name="a", app_name="b"),
"random_handle",
"random_actor",
DeploymentHandleSource.UNKNOWN,
Mock(),
FakeCounter(
tag_keys=("deployment", "route", "application", "handle", "actor_id")
),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
event_loop=asyncio.get_event_loop(),
)
assert metrics_manager.num_queued_requests_gauge.get_value(tags) == 0
n, m = random.randint(0, 10), random.randint(0, 5)
for _ in range(n):
metrics_manager.inc_num_queued_requests()
await asyncio.sleep(RAY_SERVE_METRICS_EXPORT_INTERVAL_MS * 2 / 1000)
assert metrics_manager.num_queued_requests_gauge.get_value(tags) == n
for _ in range(m):
metrics_manager.dec_num_queued_requests()
await asyncio.sleep(RAY_SERVE_METRICS_EXPORT_INTERVAL_MS * 2 / 1000)
assert metrics_manager.num_queued_requests_gauge.get_value(tags) == n - m
@pytest.mark.asyncio
async def test_track_requests_sent_to_replicas(self):
d_id = DeploymentID(name="a", app_name="b")
metrics_manager = RouterMetricsManager(
d_id,
"random",
"random_actor",
DeploymentHandleSource.UNKNOWN,
Mock(),
FakeCounter(
tag_keys=("deployment", "route", "application", "handle", "actor_id")
),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
event_loop=asyncio.get_event_loop(),
)
# r1: number requests -> 0, removed from list of running replicas -> prune
# r2: number requests -> 0, remains on list of running replicas -> don't prune
# r3: number requests > 0, removed from list of running replicas -> don't prune
# r4: number requests > 0, remains on list of running replicas -> don't prune
replica_ids = [
ReplicaID(unique_id=f"test-replica-{i}", deployment_id=d_id)
for i in range(1, 5)
]
r1, r2, r3, r4 = replica_ids
# ri has i requests
for i in range(4):
for _ in range(i + 1):
metrics_manager.inc_num_running_requests_for_replica(replica_ids[i])
await asyncio.sleep(RAY_SERVE_METRICS_EXPORT_INTERVAL_MS * 2 / 1000)
# All 4 replicas should have a positive number of requests
for i, r in enumerate(replica_ids):
assert metrics_manager.num_requests_sent_to_replicas[r] == i + 1
assert (
metrics_manager.num_running_requests_gauge.get_value(
{
"deployment": "a",
"application": "b",
"handle": "random",
"actor_id": "random_actor",
}
)
== 10
)
# Requests at r1 and r2 drop to 0
for _ in range(1):
metrics_manager.dec_num_running_requests_for_replica(r1)
for _ in range(2):
metrics_manager.dec_num_running_requests_for_replica(r2)
await asyncio.sleep(RAY_SERVE_METRICS_EXPORT_INTERVAL_MS * 2 / 1000)
assert metrics_manager.num_requests_sent_to_replicas[r1] == 0
assert metrics_manager.num_requests_sent_to_replicas[r2] == 0
# 3 requests finished processing
assert (
metrics_manager.num_running_requests_gauge.get_value(
{
"deployment": "a",
"application": "b",
"handle": "random",
"actor_id": "random_actor",
}
)
== 7
)
# Running replicas reduces to [r2, r4]
metrics_manager._update_running_replicas(
[
running_replica_info(r2),
running_replica_info(r4),
]
)
await asyncio.sleep(RAY_SERVE_METRICS_EXPORT_INTERVAL_MS * 2 / 1000)
# Only r1 should be pruned, the rest should still be tracked.
assert r1 not in metrics_manager.num_requests_sent_to_replicas
assert r2 in metrics_manager.num_requests_sent_to_replicas
assert r3 in metrics_manager.num_requests_sent_to_replicas
assert r4 in metrics_manager.num_requests_sent_to_replicas
@pytest.mark.asyncio
async def test_should_send_scaled_to_zero_optimized_push(self):
metrics_manager = RouterMetricsManager(
DeploymentID(name="a", app_name="b"),
"random",
"random_actor",
DeploymentHandleSource.UNKNOWN,
Mock(),
FakeCounter(
tag_keys=("deployment", "route", "application", "handle", "actor_id")
),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
event_loop=asyncio.get_event_loop(),
)
# Not an autoscaling deployment, should not push metrics
assert not metrics_manager.should_send_scaled_to_zero_optimized_push(0)
# No queued requests at the handle, should not push metrics
metrics_manager._deployment_config = DeploymentConfig(
autoscaling_config=AutoscalingConfig()
)
assert not metrics_manager.should_send_scaled_to_zero_optimized_push(0)
# Current number of replicas is non-zero, should not push metrics
metrics_manager.inc_num_queued_requests()
assert not metrics_manager.should_send_scaled_to_zero_optimized_push(1)
# All 3 conditions satisfied, should push metrics
assert metrics_manager.should_send_scaled_to_zero_optimized_push(0)
@pytest.mark.asyncio
@patch(
"ray.serve._private.router.RAY_SERVE_COLLECT_AUTOSCALING_METRICS_ON_HANDLE", "1"
)
async def test_push_autoscaling_metrics_to_controller(self):
timer = MockTimer()
start = random.randint(50, 100)
timer.reset(start)
deployment_id = DeploymentID(name="a", app_name="b")
handle_id = "random"
self_actor_id = "abc"
mock_controller_handle = Mock()
with patch("time.time", new=timer.time):
metrics_manager = RouterMetricsManager(
deployment_id,
handle_id,
self_actor_id,
DeploymentHandleSource.PROXY,
mock_controller_handle,
FakeCounter(
tag_keys=(
"deployment",
"route",
"application",
"handle",
"actor_id",
)
),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
event_loop=asyncio.get_event_loop(),
)
metrics_manager._deployment_config = DeploymentConfig(
autoscaling_config=AutoscalingConfig()
)
# Set up some requests
n = random.randint(0, 5)
replica_ids = [
ReplicaID(get_random_string(), DeploymentID("d", "a")) for _ in range(3)
]
running_requests = defaultdict(int)
for _ in range(n):
metrics_manager.inc_num_queued_requests()
for _ in range(20):
r = random.choice(replica_ids)
running_requests[r] += 1
metrics_manager.inc_num_running_requests_for_replica(r)
# Check metrics are pushed correctly
metrics_manager.push_autoscaling_metrics_to_controller()
handle_metric_report = metrics_manager._get_metrics_report()
mock_controller_handle.record_autoscaling_metrics_from_handle.remote.assert_called_with(
handle_metric_report
)
@pytest.mark.skipif(
not RAY_SERVE_COLLECT_AUTOSCALING_METRICS_ON_HANDLE,
reason="Tests handle metrics behavior.",
)
@pytest.mark.asyncio
@patch(
"ray.serve._private.router.RAY_SERVE_HANDLE_AUTOSCALING_METRIC_RECORD_INTERVAL_S",
0.01,
)
async def test_memory_cleared(self):
deployment_id = DeploymentID(name="a", app_name="b")
metrics_manager = RouterMetricsManager(
deployment_id,
"some_handle",
"some_actor",
DeploymentHandleSource.PROXY,
Mock(),
FakeCounter(
tag_keys=(
"deployment",
"route",
"application",
"handle",
"actor_id",
)
),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
event_loop=asyncio.get_event_loop(),
)
metrics_manager.update_deployment_config(
deployment_config=DeploymentConfig(
autoscaling_config=AutoscalingConfig(look_back_period_s=0.01)
),
curr_num_replicas=0,
)
r1 = ReplicaID("r1", deployment_id)
r2 = ReplicaID("r2", deployment_id)
r3 = ReplicaID("r3", deployment_id)
def check_database(expected: Set[ReplicaID]):
assert set(metrics_manager.metrics_store.data) == expected
return True
# r1: 1
metrics_manager.inc_num_running_requests_for_replica(r1)
await async_wait_for_condition(
check_database, expected={r1, QUEUED_REQUESTS_KEY}
)
# r1: 1, r2: 0
metrics_manager.inc_num_running_requests_for_replica(r2)
await async_wait_for_condition(
check_database, expected={r1, r2, QUEUED_REQUESTS_KEY}
)
metrics_manager.dec_num_running_requests_for_replica(r2)
# r1: 1, r2: 0, r3: 0
metrics_manager.inc_num_running_requests_for_replica(r3)
await async_wait_for_condition(
check_database, expected={r1, r2, r3, QUEUED_REQUESTS_KEY}
)
metrics_manager.dec_num_running_requests_for_replica(r3)
# update running replicas {r2}
metrics_manager._update_running_replicas([running_replica_info(r2)])
await async_wait_for_condition(
check_database, expected={r1, r2, QUEUED_REQUESTS_KEY}
)
@pytest.mark.asyncio
@patch(
"ray.serve._private.router.RAY_SERVE_COLLECT_AUTOSCALING_METRICS_ON_HANDLE", "1"
)
@patch("ray.serve._private.router.MetricsPusher")
async def test_update_deployment_config(self, metrics_pusher_mock):
metrics_manager = RouterMetricsManager(
DeploymentID(name="a", app_name="b"),
"random",
"random_actor",
DeploymentHandleSource.UNKNOWN,
Mock(),
FakeCounter(
tag_keys=("deployment", "route", "application", "handle", "actor_id")
),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
FakeGauge(tag_keys=("deployment", "application", "handle", "actor_id")),
event_loop=asyncio.get_event_loop(),
)
# Without autoscaling config, do nothing
metrics_manager.update_deployment_config(DeploymentConfig(), 0)
metrics_manager.metrics_pusher.register_or_update_task.assert_not_called()
# With autoscaling config, register or update task should be called
metrics_manager.update_deployment_config(
DeploymentConfig(autoscaling_config=AutoscalingConfig()), 0
)
metrics_manager.metrics_pusher.register_or_update_task.assert_called()
| TestRouterMetricsManager |
python | huggingface__transformers | tests/models/depth_pro/test_modeling_depth_pro.py | {
"start": 7535,
"end": 12435
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as DepthPro does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (DepthProModel, DepthProForDepthEstimation) if is_torch_available() else ()
pipeline_model_mapping = (
{
"depth-estimation": DepthProForDepthEstimation,
"image-feature-extraction": DepthProModel,
}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = DepthProModelTester(self)
self.config_tester = ConfigTester(self, config_class=DepthProConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Inductor error: name 'OpaqueUnaryFn_log2' is not defined")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="DepthPro does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_depth_estimation(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs)
def test_for_fov(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_fov(*config_and_inputs)
def test_training(self):
for model_class in self.all_model_classes:
if model_class.__name__ == "DepthProForDepthEstimation":
continue
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
if model_class.__name__ in MODEL_MAPPING_NAMES.values():
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
for model_class in self.all_model_classes:
if model_class.__name__ == "DepthProForDepthEstimation":
continue
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing:
continue
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
# this started when switched from normal initialization to kaiming_normal initialization
# maybe because the magnitude of offset values from ViT-encoders increases when followed by many convolution layers
def test_batching_equivalence(self, atol=1e-4, rtol=1e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
@slow
def test_model_from_pretrained(self):
model_path = "apple/DepthPro-hf"
model = DepthProModel.from_pretrained(model_path)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
@slow
| DepthProModelTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/interfaces.py | {
"start": 49336,
"end": 52228
} | class ____:
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = (
"parent_property",
"is_class_level",
"parent",
"key",
"strategy_key",
"strategy_opts",
)
_strategy_keys: ClassVar[List[_StrategyKey]]
def __init__(
self, parent: MapperProperty[Any], strategy_key: _StrategyKey
):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
self.strategy_key = strategy_key
self.strategy_opts = dict(strategy_key)
def init_class_attribute(self, mapper: Mapper[Any]) -> None:
pass
def setup_query(
self,
compile_state: _ORMCompileState,
query_entity: _MapperEntity,
path: _AbstractEntityRegistry,
loadopt: Optional[_LoadElement],
adapter: Optional[ORMAdapter],
**kwargs: Any,
) -> None:
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(
self,
context: _ORMCompileState,
query_entity: _MapperEntity,
path: _AbstractEntityRegistry,
loadopt: Optional[_LoadElement],
mapper: Mapper[Any],
result: Result[Unpack[TupleAny]],
adapter: Optional[ORMAdapter],
populators: _PopulatorDict,
) -> None:
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self) -> str:
return str(self.parent_property)
| LoaderStrategy |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.