language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/2590. Design a Todo List/2590.py | {
"start": 60,
"end": 130
} | class ____:
taskDescription: str
dueDate: int
tags: list[str]
| Task |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/schemas.py | {
"start": 33224,
"end": 34087
} | class ____:
"""
Provides information about the backward section of an exported
joint forward-backward graph.
For a particular fx GraphModule, this class contains information on:
(1) A mapping from each gradient (backwards output) to the parameter
it corresponds to (forward input)
(2) A mapping from each gradient (backwards output) to the user input
it corresponds to (forward input)
(3) Which of the forward outputs corresponds to the loss, that we backprop on.
Each string name is the `node.name` of the corresponding node in the fx graph.
"""
gradients_to_parameters: dict[str, str]
gradients_to_user_inputs: dict[str, str]
loss_output: str
GraphOutputName = NewType("GraphOutputName", str)
GraphInputName = NewType("GraphInputName", str)
FQN = NewType("FQN", str)
@dataclass
| BackwardSignature |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataform.py | {
"start": 40035,
"end": 43089
} | class ____(GoogleCloudBaseOperator):
"""
Removes file in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory located.
:param filepath: Required. The full path including name of the file, relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"repository_id",
"workspace_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
filepath: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workspace_id = workspace_id
self.filepath = filepath
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.remove_file(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
filepath=self.filepath,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
| DataformRemoveFileOperator |
python | doocs__leetcode | solution/0300-0399/0380.Insert Delete GetRandom O(1)/Solution.py | {
"start": 0,
"end": 747
} | class ____:
def __init__(self):
self.d = {}
self.q = []
def insert(self, val: int) -> bool:
if val in self.d:
return False
self.d[val] = len(self.q)
self.q.append(val)
return True
def remove(self, val: int) -> bool:
if val not in self.d:
return False
i = self.d[val]
self.d[self.q[-1]] = i
self.q[i] = self.q[-1]
self.q.pop()
self.d.pop(val)
return True
def getRandom(self) -> int:
return choice(self.q)
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| RandomizedSet |
python | lazyprogrammer__machine_learning_examples | tf2.0/rl_trader.py | {
"start": 6523,
"end": 11148
} | class ____(object):
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = ReplayBuffer(state_size, action_size, size=500)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.model = mlp(state_size, action_size)
def update_replay_memory(self, state, action, reward, next_state, done):
self.memory.store(state, action, reward, next_state, done)
def act(self, state):
if np.random.rand() <= self.epsilon:
return np.random.choice(self.action_size)
act_values = self.model.predict(state, verbose=0)
return np.argmax(act_values[0]) # returns action
@tf.function
def replay(self, batch_size=32):
# first check if replay buffer contains enough data
if self.memory.size < batch_size:
return
# sample a batch of data from the replay memory
minibatch = self.memory.sample_batch(batch_size)
states = minibatch['s']
actions = minibatch['a']
rewards = minibatch['r']
next_states = minibatch['s2']
done = minibatch['d']
# Calculate the tentative target: Q(s',a)
target = rewards + (1 - done) * self.gamma * np.amax(self.model.predict(next_states, verbose=0), axis=1)
# With the Keras API, the target (usually) must have the same
# shape as the predictions.
# However, we only need to update the network for the actions
# which were actually taken.
# We can accomplish this by setting the target to be equal to
# the prediction for all values.
# Then, only change the targets for the actions taken.
# Q(s,a)
target_full = self.model.predict(states, verbose=0)
target_full[np.arange(batch_size), actions] = target
# Run one training step
self.model.train_on_batch(states, target_full)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
def play_one_episode(agent, env, is_train):
# note: after transforming states are already 1xD
state = env.reset()
state = scaler.transform([state])
done = False
while not done:
action = agent.act(state)
next_state, reward, done, info = env.step(action)
next_state = scaler.transform([next_state])
if is_train == 'train':
agent.update_replay_memory(state, action, reward, next_state, done)
agent.replay(batch_size)
state = next_state
return info['cur_val']
if __name__ == '__main__':
# config
models_folder = 'rl_trader_models'
rewards_folder = 'rl_trader_rewards'
model_file = 'dqn.weights.h5'
num_episodes = 2000
batch_size = 32
initial_investment = 20000
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', type=str, required=True,
help='either "train" or "test"')
args = parser.parse_args()
maybe_make_dir(models_folder)
maybe_make_dir(rewards_folder)
data = get_data()
n_timesteps, n_stocks = data.shape
n_train = n_timesteps // 2
train_data = data[:n_train]
test_data = data[n_train:]
env = MultiStockEnv(train_data, initial_investment)
state_size = env.state_dim
action_size = len(env.action_space)
agent = DQNAgent(state_size, action_size)
scaler = get_scaler(env)
# store the final value of the portfolio (end of episode)
portfolio_value = []
if args.mode == 'test':
# then load the previous scaler
with open(f'{models_folder}/scaler.pkl', 'rb') as f:
scaler = pickle.load(f)
# remake the env with test data
env = MultiStockEnv(test_data, initial_investment)
# make sure epsilon is not 1!
# no need to run multiple episodes if epsilon = 0, it's deterministic
agent.epsilon = 0.01
# load trained weights
agent.load(f'{models_folder}/{model_file}')
# play the game num_episodes times
for e in range(num_episodes):
t0 = datetime.now()
val = play_one_episode(agent, env, args.mode)
dt = datetime.now() - t0
print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}")
portfolio_value.append(val) # append episode end portfolio value
# save the weights when we are done
if args.mode == 'train':
# save the DQN
agent.save(f'{models_folder}/{model_file}')
# save the scaler
with open(f'{models_folder}/scaler.pkl', 'wb') as f:
pickle.dump(scaler, f)
# save portfolio value for each episode
np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value) | DQNAgent |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 12666,
"end": 13046
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
text_document: TextDocumentIdentifier
@staticmethod
def from_json_rpc_parameters(
parameters: json_rpc.Parameters,
) -> "DidCloseTextDocumentParameters":
return _parse_parameters(parameters, target=DidCloseTextDocumentParameters)
@dataclasses.dataclass(frozen=True)
| DidCloseTextDocumentParameters |
python | kamyu104__LeetCode-Solutions | Python/find-edges-in-shortest-paths.py | {
"start": 227,
"end": 1350
} | class ____(object):
def findAnswer(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[bool]
"""
INF = float("inf")
def dijkstra(start):
best = [INF]*len(adj)
best[start] = 0
min_heap = [(0, start)]
while min_heap:
curr, u = heapq.heappop(min_heap)
if curr > best[u]:
continue
for v, w in adj[u]:
if best[v] <= curr+w:
continue
best[v] = curr+w
heapq.heappush(min_heap, (best[v], v))
return best
adj = [[] for _ in xrange(n)]
for u, v, w in edges:
adj[u].append((v, w))
adj[v].append((u, w))
dist1 = dijkstra(0)
dist2 = dijkstra(n-1)
return [(dist1[u] != INF != dist2[v] and dist1[u]+w+dist2[v] == dist1[n-1]) or
(dist2[u] != INF != dist1[v] and dist2[u]+w+dist1[v] == dist2[0])
for i, (u, v, w) in enumerate(edges)]
| Solution |
python | Textualize__textual | docs/examples/events/custom01.py | {
"start": 940,
"end": 1423
} | class ____(App):
def compose(self) -> ComposeResult:
yield ColorButton(Color.parse("#008080"))
yield ColorButton(Color.parse("#808000"))
yield ColorButton(Color.parse("#E9967A"))
yield ColorButton(Color.parse("#121212"))
def on_color_button_selected(self, message: ColorButton.Selected) -> None:
self.screen.styles.animate("background", message.color, duration=0.5)
if __name__ == "__main__":
app = ColorApp()
app.run()
| ColorApp |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 30202,
"end": 30749
} | class ____(PrefectFilterBaseModel):
"""Filter by `TaskRun.state_type`."""
any_: Optional[list[schemas.states.StateType]] = Field(
default=None, description="A list of task run state types to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.TaskRun.state_type.in_(self.any_))
return filters
| TaskRunFilterStateType |
python | scrapy__scrapy | tests/test_downloader_handlers.py | {
"start": 1022,
"end": 1198
} | class ____:
lazy = False
def __init__(self, crawler):
raise NotConfigured
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
| OffDH |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 19450,
"end": 19803
} | class ____(AdaptivePool):
"""Adaptive one-dimensional downsampling using maximum for the target shape."""
def __init__(self, target_shape: int | Sequence[int]):
"""**Arguments:**
- `target_shape`: The target output shape.
"""
super().__init__(target_shape, num_spatial_dims=1, operation=jnp.max)
| AdaptiveMaxPool1d |
python | sympy__sympy | sympy/stats/random_matrix_models.py | {
"start": 9522,
"end": 9682
} | class ____(CircularEnsembleModel):
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(2))
| CircularUnitaryEnsembleModel |
python | keon__algorithms | algorithms/linkedlist/partition.py | {
"start": 542,
"end": 1784
} | class ____():
def __init__(self, val=None):
self.val = int(val)
self.next = None
def print_linked_list(head):
string = ""
while head.next:
string += str(head.val) + " -> "
head = head.next
string += str(head.val)
print(string)
def partition(head, x):
left = None
right = None
prev = None
current = head
while current:
if int(current.val) >= x:
if not right:
right = current
else:
if not left:
left = current
else:
prev.next = current.next
left.next = current
left = current
left.next = right
if prev and prev.next is None:
break
# cache previous value in case it needs to be pointed elsewhere
prev = current
current = current.next
def test():
a = Node("3")
b = Node("5")
c = Node("8")
d = Node("5")
e = Node("10")
f = Node("2")
g = Node("1")
a.next = b
b.next = c
c.next = d
d.next = e
e.next = f
f.next = g
print_linked_list(a)
partition(a, 5)
print_linked_list(a)
if __name__ == '__main__':
test()
| Node |
python | MongoEngine__mongoengine | mongoengine/queryset/visitor.py | {
"start": 3502,
"end": 4868
} | class ____(QNode):
"""Represents the combination of several conditions by a given
logical operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def __repr__(self):
op = " & " if self.operation is self.AND else " | "
return "(%s)" % op.join([repr(node) for node in self.children])
def __bool__(self):
return bool(self.children)
def accept(self, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
@property
def empty(self):
warn_empty_is_deprecated()
return not bool(self.children)
def __eq__(self, other):
return (
self.__class__ == other.__class__
and self.operation == other.operation
and self.children == other.children
)
| QCombination |
python | great-expectations__great_expectations | tests/integration/fluent/test_databricks_datasource.py | {
"start": 320,
"end": 2846
} | class ____:
@parameterize_batch_for_data_sources(
data_source_configs=[
DatabricksDatasourceTestConfig(table_name=TEST_TABLE_NAME.lower()),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_unquoted_lower(self, batch_for_datasource):
"""Test Databricks with unquoted lower case table name"""
_run_checkpoint_test(batch_for_datasource, "databricks")
@parameterize_batch_for_data_sources(
data_source_configs=[
DatabricksDatasourceTestConfig(table_name=f"`{TEST_TABLE_NAME.lower()}`"),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_quoted_lower(self, batch_for_datasource):
"""Test Databricks with quoted lower case table name"""
_run_checkpoint_test(batch_for_datasource, "databricks")
@parameterize_batch_for_data_sources(
data_source_configs=[
DatabricksDatasourceTestConfig(table_name=TEST_TABLE_NAME.upper()),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_unquoted_upper(self, batch_for_datasource):
"""Test Databricks with unquoted upper case table name"""
_run_checkpoint_test(batch_for_datasource, "databricks")
@parameterize_batch_for_data_sources(
data_source_configs=[
DatabricksDatasourceTestConfig(table_name=f"`{TEST_TABLE_NAME.upper()}`"),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_quoted_upper(self, batch_for_datasource):
"""Test Databricks with quoted upper case table name"""
_run_checkpoint_test(batch_for_datasource, "databricks")
@parameterize_batch_for_data_sources(
data_source_configs=[
DatabricksDatasourceTestConfig(table_name=f"`{TEST_TABLE_NAME.title()}`"),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_quoted_mixed(self, batch_for_datasource):
"""Test Databricks with quoted mixed case table name"""
_run_checkpoint_test(batch_for_datasource, "databricks")
@parameterize_batch_for_data_sources(
data_source_configs=[
DatabricksDatasourceTestConfig(table_name=TEST_TABLE_NAME.title()),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_unquoted_mixed(self, batch_for_datasource):
"""Test Databricks with unquoted mixed case table name"""
_run_checkpoint_test(batch_for_datasource, "databricks")
| TestDatabricksTableIdentifiers |
python | kamyu104__LeetCode-Solutions | Python/generalized-abbreviation.py | {
"start": 35,
"end": 799
} | class ____(object):
def generateAbbreviations(self, word):
"""
:type word: str
:rtype: List[str]
"""
def generateAbbreviationsHelper(word, i, cur, res):
if i == len(word):
res.append("".join(cur))
return
cur.append(word[i])
generateAbbreviationsHelper(word, i + 1, cur, res)
cur.pop()
if not cur or not cur[-1][-1].isdigit():
for l in xrange(1, len(word) - i + 1):
cur.append(str(l))
generateAbbreviationsHelper(word, i + l, cur, res)
cur.pop()
res, cur = [], []
generateAbbreviationsHelper(word, 0, cur, res)
return res
| Solution |
python | wandb__wandb | wandb/vendor/pygments/lexers/scripting.py | {
"start": 51875,
"end": 56252
} | class ____(RegexLexer):
"""
For `Hybris <http://www.hybris-lang.org>`_ source code.
.. versionadded:: 1.4
"""
name = 'Hybris'
aliases = ['hybris', 'hy']
filenames = ['*.hy', '*.hyb']
mimetypes = ['text/x-hybris', 'application/x-hybris']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:function|method|operator\s+)+?)'
r'([a-zA-Z_]\w*)'
r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(break|case|catch|next|default|do|else|finally|for|foreach|of|'
r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword),
(r'(extends|private|protected|public|static|throws|function|method|'
r'operator)\b', Keyword.Declaration),
(r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|'
r'__INC_PATH__)\b', Keyword.Constant),
(r'(class|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(import|include)(\s+)',
bygroups(Keyword.Namespace, Text), 'import'),
(words((
'gc_collect', 'gc_mm_items', 'gc_mm_usage', 'gc_collect_threshold',
'urlencode', 'urldecode', 'base64encode', 'base64decode', 'sha1', 'crc32',
'sha2', 'md5', 'md5_file', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos',
'cosh', 'exp', 'fabs', 'floor', 'fmod', 'log', 'log10', 'pow', 'sin',
'sinh', 'sqrt', 'tan', 'tanh', 'isint', 'isfloat', 'ischar', 'isstring',
'isarray', 'ismap', 'isalias', 'typeof', 'sizeof', 'toint', 'tostring',
'fromxml', 'toxml', 'binary', 'pack', 'load', 'eval', 'var_names',
'var_values', 'user_functions', 'dyn_functions', 'methods', 'call',
'call_method', 'mknod', 'mkfifo', 'mount', 'umount2', 'umount', 'ticks',
'usleep', 'sleep', 'time', 'strtime', 'strdate', 'dllopen', 'dlllink',
'dllcall', 'dllcall_argv', 'dllclose', 'env', 'exec', 'fork', 'getpid',
'wait', 'popen', 'pclose', 'exit', 'kill', 'pthread_create',
'pthread_create_argv', 'pthread_exit', 'pthread_join', 'pthread_kill',
'smtp_send', 'http_get', 'http_post', 'http_download', 'socket', 'bind',
'listen', 'accept', 'getsockname', 'getpeername', 'settimeout', 'connect',
'server', 'recv', 'send', 'close', 'print', 'println', 'printf', 'input',
'readline', 'serial_open', 'serial_fcntl', 'serial_get_attr',
'serial_get_ispeed', 'serial_get_ospeed', 'serial_set_attr',
'serial_set_ispeed', 'serial_set_ospeed', 'serial_write', 'serial_read',
'serial_close', 'xml_load', 'xml_parse', 'fopen', 'fseek', 'ftell',
'fsize', 'fread', 'fwrite', 'fgets', 'fclose', 'file', 'readdir',
'pcre_replace', 'size', 'pop', 'unmap', 'has', 'keys', 'values',
'length', 'find', 'substr', 'replace', 'split', 'trim', 'remove',
'contains', 'join'), suffix=r'\b'),
Name.Builtin),
(words((
'MethodReference', 'Runner', 'Dll', 'Thread', 'Pipe', 'Process',
'Runnable', 'CGI', 'ClientSocket', 'Socket', 'ServerSocket',
'File', 'Console', 'Directory', 'Exception'), suffix=r'\b'),
Keyword.Type),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char),
(r'(\.)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>|+=:;,./?\-@]+', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
| HybrisLexer |
python | kamyu104__LeetCode-Solutions | Python/two-sum-less-than-k.py | {
"start": 33,
"end": 471
} | class ____(object):
def twoSumLessThanK(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
A.sort()
result = -1
left, right = 0, len(A)-1
while left < right:
if A[left]+A[right] >= K:
right -= 1
else:
result = max(result, A[left]+A[right])
left += 1
return result
| Solution |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 26217,
"end": 30807
} | class ____(ContractLogicError):
"""
An error defined in a smart contract.
"""
def __init__(
self,
abi: "ErrorABI",
inputs: dict[str, Any],
txn: Optional[FailedTxn] = None,
trace: _TRACE_ARG = None,
contract_address: Optional["AddressType"] = None,
base_err: Optional[Exception] = None,
source_traceback: _SOURCE_TRACEBACK_ARG = None,
):
self.abi = abi
self.inputs = inputs
if inputs:
message = ", ".join(sorted([f"{k}={v}" for k, v in inputs.items()]))
else:
# Name of the custom error is all custom info.
message = TransactionError.DEFAULT_MESSAGE
super().__init__(
message,
base_err=base_err,
contract_address=contract_address,
source_traceback=source_traceback,
trace=trace,
txn=txn,
)
@property
def name(self) -> str:
"""
The name of the error.
"""
return self.abi.name
def __repr__(self) -> str:
name = self.__class__.__name__ # Custom error name
calldata = ", ".join(sorted([f"{k}={v}" for k, v in self.inputs.items()])) or ""
return f"{name}({calldata})"
def _get_ape_traceback_from_tx(txn: FailedTxn) -> Optional["SourceTraceback"]:
from ape.api.transactions import ReceiptAPI
try:
receipt: ReceiptAPI = txn if isinstance(txn, ReceiptAPI) else txn.receipt # type: ignore
except Exception:
# Receipt not real enough, maybe was a re-played call.
return None
if not receipt:
return None
try:
ape_traceback = receipt.source_traceback
except (ApeException, NotImplementedError):
return None
if ape_traceback is None or not len(ape_traceback):
return None
return ape_traceback
def _get_custom_python_traceback(
err: TransactionError,
ape_traceback: "SourceTraceback",
project: Optional["ProjectManager"] = None,
) -> Optional[TracebackType]:
# Manipulate python traceback to show lines from contract.
# Help received from Jinja lib:
# https://github.com/pallets/jinja/blob/main/src/jinja2/debug.py#L142
if project is None:
from ape.utils.basemodel import ManagerAccessMixin as access
project = access.local_project
if not (base_path := getattr(project, "path", None)):
# TODO: Add support for manifest-projects.
return None
_, exc_value, tb = sys.exc_info()
depth = None
idx = len(ape_traceback) - 1
frames = []
while tb is not None:
if not tb.tb_frame.f_code.co_filename.startswith(str(base_path)):
# Ignore frames outside the project.
# This allows both contract code an scripts to appear.
tb = tb.tb_next
continue
frames.append(tb)
tb = tb.tb_next
while (depth is None or depth > 1) and idx >= 0:
exec_item = ape_traceback[idx]
if depth is not None and exec_item.depth >= depth:
# Wait for decreasing depth.
idx -= 1
continue
depth = exec_item.depth
# NOTE: Use the last lineno executed as "the line number".
lineno = exec_item.begin_lineno if exec_item.end_lineno is None else exec_item.end_lineno
if lineno is None:
idx -= 1
continue
if exec_item.source_path is None:
# File is not local. Create a temporary file in its place.
# This is necessary for tracebacks to work in Python.
temp_file = tempfile.NamedTemporaryFile(prefix="unknown_contract_")
filename = temp_file.name
else:
filename = str(exec_item.source_path)
# Raise an exception at the correct line number.
py_code: CodeType = compile(
"\n" * (lineno - 1) + "raise __ape_exception__", filename, "exec"
)
py_code = py_code.replace(co_name=exec_item.closure.name)
# Execute the new code to get a new (fake) tb with contract source info.
try:
exec(py_code, {"__ape_exception__": err}, {})
except BaseException:
real_tb = sys.exc_info()[2]
fake_tb = getattr(real_tb, "tb_next", None)
if isinstance(fake_tb, TracebackType):
frames.append(fake_tb)
idx -= 1
if not frames:
return None
tb_next = None
for tb in frames:
tb.tb_next = tb_next
tb_next = tb
return frames[-1]
| CustomError |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_return_annotation_extends.py | {
"start": 390,
"end": 719
} | class ____:
pass
def test1_alarm1() -> Test1_C1:
return _test_source()
def test1_alarm2() -> Test1_C2:
return _test_source()
def test1_alarm3() -> Test1_C3:
return _test_source()
def test1_alarm4() -> Test1_C4:
return _test_source()
def test1_noalarm1() -> Test1_C5:
return _test_source()
| Test1_C5 |
python | streamlit__streamlit | lib/tests/streamlit/elements/echo_test.py | {
"start": 750,
"end": 1565
} | class ____(DeltaGeneratorTestCase):
@parameterized.expand(
[
("code_location default", lambda: st.echo(), 0, 1),
("code_location above", lambda: st.echo("above"), 0, 1),
("code_location below", lambda: st.echo("below"), 1, 0),
]
)
def test_echo(self, _, echo, echo_index, output_index):
# The empty lines below are part of the test. Do not remove them.
with echo():
st.write("Hello")
"hi"
def foo(x):
y = x + 10
st.write(y)
class MyClass:
def do_x(self):
pass
def do_y(self):
pass
echo_str = """st.write("Hello")
"hi"
def foo(x):
y = x + 10
st.write(y)
| EchoTest |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 58938,
"end": 59425
} | class ____:
# reference values were computed with mpmath with 50 digits of precision
# from mpmath import mp
# mp.dps = 50
# mp.mpf(0.5) * (mp.erf((x - c)/mp.sqrt(2)) + mp.erf((x + c)/mp.sqrt(2)))
@pytest.mark.parametrize('x, c, ref', [(1e-4, 1e-8, 7.978845594730578e-05),
(1e-4, 1e-4, 7.97884555483635e-05)])
def test_cdf(self, x, c, ref):
assert_allclose(stats.foldnorm.cdf(x, c), ref, rtol=1e-15)
| TestFoldNorm |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/tasks.py | {
"start": 1738,
"end": 5905
} | class ____(GoogleCloudBaseOperator):
"""
Creates a queue in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueCreateOperator`
:param location: The location name in which the queue will be created.
:param task_queue: The task queue to create.
Queue's name cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf message Queue.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"task_queue",
"project_id",
"location",
"queue_name",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
task_queue: Queue,
project_id: str = PROVIDE_PROJECT_ID,
queue_name: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.task_queue = task_queue
self.project_id = project_id
self.queue_name = queue_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
queue = hook.create_queue(
location=self.location,
task_queue=self.task_queue,
project_id=self.project_id,
queue_name=self.queue_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
if self.queue_name is None:
raise RuntimeError("The queue name should be set here!")
queue = hook.get_queue(
location=self.location,
project_id=self.project_id,
queue_name=self.queue_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
| CloudTasksQueueCreateOperator |
python | ApeWorX__ape | src/ape/utils/basemodel.py | {
"start": 7654,
"end": 8053
} | class ____(ManagerAccessMixin, ABC):
"""
Abstract class that has manager access.
"""
def _get_alt(name: str) -> Optional[str]:
alt = None
if ("-" not in name and "_" not in name) or ("-" in name and "_" in name):
alt = None
elif "-" in name:
alt = name.replace("-", "_")
elif "_" in name:
alt = name.replace("_", "-")
return alt
| BaseInterface |
python | pytorch__pytorch | test/inductor/test_minifier_utils.py | {
"start": 234,
"end": 3247
} | class ____(TestCase):
def test_invalid_output(self):
class SimpleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(2, 2)
def forward(self, x):
# return a graph module
return self.linear
model = SimpleModel()
# Here we obtained a graph with invalid output by symbolic_trace for simplicity,
# it can also obtained from running functorch.compile.minifier on an exported graph.
traced = torch.fx.symbolic_trace(model)
for strict in [True, False]:
gm = export_for_aoti_minifier(traced, (torch.randn(2, 2),), strict=strict)
self.assertTrue(gm is None)
def test_non_exportable(self):
class SimpleModel(torch.nn.Module):
def forward(self, x):
return x.sum()
model = SimpleModel()
# Force export failure by providing an input with in-compatible shapes
inputs = (torch.randn(2), torch.randn(2))
for strict in [True, False]:
gm = export_for_aoti_minifier(
model, inputs, strict=strict, skip_export_error=True
)
print(gm)
self.assertTrue(gm is None)
with self.assertRaises(AOTIMinifierError):
export_for_aoti_minifier(
model, inputs, strict=strict, skip_export_error=False
)
def test_convert_module_to_string(self):
class M(torch.nn.Module):
def forward(self, x, flag):
flag = flag.item()
def true_fn(x):
return x.clone()
return torch.cond(flag > 0, true_fn, true_fn, [x])
inputs = (
torch.rand(28, 28),
torch.tensor(1),
)
model = M()
gm = torch.export.export(model, inputs, strict=False).module(check_guards=False)
# TODO: make NNModuleToString.convert() generate string for nested submodules.
model_string = get_module_string(gm)
self.assertExpectedInline(
model_string.strip(),
"""\
# from torch.nn import *
# class Repro(torch.nn.Module):
# def __init__(self) -> None:
# super().__init__()
# self.true_graph_0 = <lambda>()
# self.false_graph_0 = <lambda>()
# def forward(self, x, flag):
# x, flag, = fx_pytree.tree_flatten_spec(([x, flag], {}), self._in_spec)
# item = torch.ops.aten.item.default(flag); flag = None
# gt = item > 0; item = None
# true_graph_0 = self.true_graph_0
# false_graph_0 = self.false_graph_0
# cond = torch.ops.higher_order.cond(gt, true_graph_0, false_graph_0, (x,)); gt = true_graph_0 = false_graph_0 = x = None
# getitem = cond[0]; cond = None
# return pytree.tree_unflatten((getitem,), self._out_spec)""",
)
if __name__ == "__main__":
run_tests()
| MinifierUtilsTests |
python | pypa__setuptools | setuptools/tests/test_build_meta.py | {
"start": 2406,
"end": 6328
} | class ____(BuildBackendBase):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
(self.backend_name, _, self.backend_obj) = self.backend_name.partition(':')
def __call__(self, name, *args, **kw) -> Any:
"""Handles arbitrary function invocations on the build backend."""
os.chdir(self.cwd)
os.environ.update(self.env)
mod = importlib.import_module(self.backend_name)
if self.backend_obj:
backend = getattr(mod, self.backend_obj)
else:
backend = mod
return getattr(backend, name)(*args, **kw)
defns = [
{ # simple setup.py script
'setup.py': DALS(
"""
__import__('setuptools').setup(
name='foo',
version='0.0.0',
py_modules=['hello'],
setup_requires=['six'],
)
"""
),
'hello.py': DALS(
"""
def run():
print('hello')
"""
),
},
{ # setup.py that relies on __name__
'setup.py': DALS(
"""
assert __name__ == '__main__'
__import__('setuptools').setup(
name='foo',
version='0.0.0',
py_modules=['hello'],
setup_requires=['six'],
)
"""
),
'hello.py': DALS(
"""
def run():
print('hello')
"""
),
},
{ # setup.py script that runs arbitrary code
'setup.py': DALS(
"""
variable = True
def function():
return variable
assert variable
__import__('setuptools').setup(
name='foo',
version='0.0.0',
py_modules=['hello'],
setup_requires=['six'],
)
"""
),
'hello.py': DALS(
"""
def run():
print('hello')
"""
),
},
{ # setup.py script that constructs temp files to be included in the distribution
'setup.py': DALS(
"""
# Some packages construct files on the fly, include them in the package,
# and immediately remove them after `setup()` (e.g. pybind11==2.9.1).
# Therefore, we cannot use `distutils.core.run_setup(..., stop_after=...)`
# to obtain a distribution object first, and then run the distutils
# commands later, because these files will be removed in the meantime.
with open('world.py', 'w', encoding="utf-8") as f:
f.write('x = 42')
try:
__import__('setuptools').setup(
name='foo',
version='0.0.0',
py_modules=['world'],
setup_requires=['six'],
)
finally:
# Some packages will clean temporary files
__import__('os').unlink('world.py')
"""
),
},
{ # setup.cfg only
'setup.cfg': DALS(
"""
[metadata]
name = foo
version = 0.0.0
[options]
py_modules=hello
setup_requires=six
"""
),
'hello.py': DALS(
"""
def run():
print('hello')
"""
),
},
{ # setup.cfg and setup.py
'setup.cfg': DALS(
"""
[metadata]
name = foo
version = 0.0.0
[options]
py_modules=hello
setup_requires=six
"""
),
'setup.py': "__import__('setuptools').setup()",
'hello.py': DALS(
"""
def run():
print('hello')
"""
),
},
]
| BuildBackendCaller |
python | keras-team__keras | keras/src/metrics/probabilistic_metrics_test.py | {
"start": 7372,
"end": 8939
} | class ____(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
metrics.SparseCategoricalCrossentropy(name="scce", dtype="int32")
)
def test_unweighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
y_true = np.array([1, 2])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
result = scce_obj(y_true, y_pred)
self.assertAllClose(result, 1.176, atol=1e-3)
def test_unweighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
y_true = np.array([1, 2])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
result = scce_obj(y_true, logits)
self.assertAllClose(result, 3.5011, atol=1e-3)
def test_weighted(self):
scce_obj = metrics.SparseCategoricalCrossentropy()
y_true = np.array([1, 2])
y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
sample_weight = np.array([1.5, 2.0])
result = scce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(result, 1.338, atol=1e-3)
def test_weighted_from_logits(self):
scce_obj = metrics.SparseCategoricalCrossentropy(from_logits=True)
y_true = np.array([1, 2])
logits = np.array([[1, 9, 0], [1, 8, 1]], dtype=np.float32)
sample_weight = np.array([1.5, 2.0])
result = scce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAllClose(result, 4.0012, atol=1e-3)
| SparseCategoricalCrossentropyTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/losses_test.py | {
"start": 56885,
"end": 57504
} | class ____(test.TestCase):
def testNoCollectLossesBatch2(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
self.assertFalse(util.get_losses())
losses.absolute_difference(logits, labels, loss_collection=None)
losses.log_loss(logits, labels, loss_collection=None)
losses.mean_squared_error(logits, labels, loss_collection=None)
losses.sigmoid_cross_entropy(logits, labels, loss_collection=None)
losses.softmax_cross_entropy(logits, labels, loss_collection=None)
self.assertFalse(util.get_losses())
| AddLossTest |
python | scipy__scipy | scipy/interpolate/_cubic.py | {
"start": 5947,
"end": 13908
} | class ____(CubicHermiteSpline):
r"""PCHIP shape-preserving interpolator (C1 smooth).
``x`` and ``y`` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray, shape (npoints, )
A 1-D array of monotonically increasing real values. ``x`` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray, shape (..., npoints, ...)
An N-D array of real values. ``y``'s length along the interpolation
axis must be equal to the length of ``x``. Use the ``axis``
parameter to select the interpolation axis.
axis : int, optional
Axis in the ``y`` array corresponding to the x-coordinate values. Defaults
to ``axis=0``.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
See Also
--------
CubicHermiteSpline : Piecewise-cubic interpolator.
Akima1DInterpolator : Akima 1D interpolator.
CubicSpline : Cubic spline data interpolator.
PPoly : Piecewise polynomial in terms of coefficients and breakpoints.
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and J. Butland,
A method for constructing local
monotone piecewise cubic interpolants,
SIAM J. Sci. Comput., 5(2), 300-304 (1984).
:doi:`10.1137/0905021`.
.. [2] C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
# PchipInterpolator is not generic in scipy-stubs
__class_getitem__ = None
def __init__(self, x, y, axis=0, extrapolate=None):
xp = array_namespace(x, y)
x, _, y, axis, _ = prepare_input(x, y, axis, xp=xp)
if xp.isdtype(y.dtype, "complex floating"):
msg = ("`PchipInterpolator` only works with real values for `y`. "
"If you are trying to use the real components of the passed array, "
"use `np.real` on the array before passing to `PchipInterpolator`.")
raise ValueError(msg)
xv = xp.reshape(x, (x.shape[0],) + (1,)*(y.ndim-1))
dk = self._find_derivatives(xv, y, xp=xp)
super().__init__(x, y, dk, axis=0, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _edge_case(h0, h1, m0, m1, xp):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = xp.sign(d) != xp.sign(m0)
mask2 = (xp.sign(m0) != xp.sign(m1)) & (xp.abs(d) > 3.*xp.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y, xp):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = xp.zeros_like(y)
dk[0] = mk
dk[1] = mk
return xp.reshape(dk, y_shape)
smk = xp.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore', invalid='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m)
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1], xp=xp)
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2], xp=xp)
return xp.reshape(dk, y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `scipy.interpolate.PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
See Also
--------
PchipInterpolator : PCHIP 1-D monotonic cubic interpolator.
Examples
--------
We can interpolate 2D observed data using pchip interpolation:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import pchip_interpolate
>>> x_observed = np.linspace(0.0, 10.0, 11)
>>> y_observed = np.sin(x_observed)
>>> x = np.linspace(min(x_observed), max(x_observed), num=100)
>>> y = pchip_interpolate(x_observed, y_observed, x)
>>> plt.plot(x_observed, y_observed, "o", label="observation")
>>> plt.plot(x, y, label="pchip interpolation")
>>> plt.legend()
>>> plt.show()
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
@xp_capabilities(cpu_only=True, xfail_backends=[
("dask.array", "lacks nd fancy indexing"),
("jax.numpy", "immutable arrays"),
("array_api_strict", "fancy indexing __setitem__"),
])
| PchipInterpolator |
python | PyCQA__pylint | tests/functional/d/dataclass/dataclass_typecheck.py | {
"start": 2228,
"end": 2701
} | class ____(metaclass=OBJ.attr1): # [invalid-metaclass]
pass
{}[OBJ.attr0] = 1
{}[OBJ.attr1] = 1
{}[OBJ.attr5] = 1 # [unhashable-member]
for k, v in OBJ.attr5: # TODO: Should be a dict-iter-missing-items error
print(k, v)
__name__ = OBJ.attr0
__name__ = OBJ.attr1 # TODO: Should be a non-str-assignment-to-dunder-name error
__name__ = OBJ.attr2
print(isinstance(1, OBJ.attr0))
print(isinstance(1, OBJ.attr1)) # [isinstance-second-argument-not-valid-type]
| Test2 |
python | pandas-dev__pandas | pandas/compat/numpy/function.py | {
"start": 1490,
"end": 11756
} | class ____:
def __init__(
self,
defaults,
fname=None,
method: str | None = None,
max_fname_arg_count=None,
) -> None:
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(
self,
args,
kwargs,
fname=None,
max_fname_arg_count=None,
method: str | None = None,
) -> None:
if not args and not kwargs:
return None
fname = self.fname if fname is None else fname
max_fname_arg_count = (
self.max_fname_arg_count
if max_fname_arg_count is None
else max_fname_arg_count
)
method = self.method if method is None else method
if method == "args":
validate_args(fname, args, max_fname_arg_count, self.defaults)
elif method == "kwargs":
validate_kwargs(fname, kwargs, self.defaults)
elif method == "both":
validate_args_and_kwargs(
fname, args, kwargs, max_fname_arg_count, self.defaults
)
else:
raise ValueError(f"invalid validation method '{method}'")
ARGMINMAX_DEFAULTS = {"out": None}
validate_argmin = CompatValidator(
ARGMINMAX_DEFAULTS, fname="argmin", method="both", max_fname_arg_count=1
)
validate_argmax = CompatValidator(
ARGMINMAX_DEFAULTS, fname="argmax", method="both", max_fname_arg_count=1
)
def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]:
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
"""
If 'Series.argmin' is called via the 'numpy' library, the third parameter
in its signature is 'out', which takes either an ndarray or 'None', so
check if the 'skipna' parameter is either an instance of ndarray or is
None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool:
"""
If 'Series.argmax' is called via the 'numpy' library, the third parameter
in its signature is 'out', which takes either an ndarray or 'None', so
check if the 'skipna' parameter is either an instance of ndarray or is
None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
ARGSORT_DEFAULTS: dict[str, int | str | None] = {}
ARGSORT_DEFAULTS["axis"] = -1
ARGSORT_DEFAULTS["kind"] = "quicksort"
ARGSORT_DEFAULTS["order"] = None
ARGSORT_DEFAULTS["kind"] = None
ARGSORT_DEFAULTS["stable"] = None
validate_argsort = CompatValidator(
ARGSORT_DEFAULTS, fname="argsort", max_fname_arg_count=0, method="both"
)
# two different signatures of argsort, this second validation for when the
# `kind` param is supported
ARGSORT_DEFAULTS_KIND: dict[str, int | None] = {}
ARGSORT_DEFAULTS_KIND["axis"] = -1
ARGSORT_DEFAULTS_KIND["order"] = None
ARGSORT_DEFAULTS_KIND["stable"] = None
validate_argsort_kind = CompatValidator(
ARGSORT_DEFAULTS_KIND, fname="argsort", max_fname_arg_count=0, method="both"
)
def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool:
"""
If 'Categorical.argsort' is called via the 'numpy' library, the first
parameter in its signature is 'axis', which takes either an integer or
'None', so check if the 'ascending' parameter has either integer type or is
None, since 'ascending' itself should be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
ascending = cast(bool, ascending)
return ascending
CLIP_DEFAULTS: dict[str, Any] = {"out": None}
validate_clip = CompatValidator(
CLIP_DEFAULTS, fname="clip", method="both", max_fname_arg_count=3
)
@overload
def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None: ...
@overload
def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT: ...
def validate_clip_with_axis(
axis: ndarray | AxisNoneT, args, kwargs
) -> AxisNoneT | None:
"""
If 'NDFrame.clip' is called via the numpy library, the third parameter in
its signature is 'out', which can takes an ndarray, so check if the 'axis'
parameter is an instance of ndarray, since 'axis' itself should either be
an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
# error: Incompatible types in assignment (expression has type "None",
# variable has type "Union[ndarray[Any, Any], str, int]")
axis = None # type: ignore[assignment]
validate_clip(args, kwargs)
# error: Incompatible return value type (got "Union[ndarray[Any, Any],
# str, int]", expected "Union[str, int, None]")
return axis # type: ignore[return-value]
CUM_FUNC_DEFAULTS: dict[str, Any] = {}
CUM_FUNC_DEFAULTS["dtype"] = None
CUM_FUNC_DEFAULTS["out"] = None
validate_cum_func = CompatValidator(
CUM_FUNC_DEFAULTS, method="both", max_fname_arg_count=1
)
validate_cumsum = CompatValidator(
CUM_FUNC_DEFAULTS, fname="cumsum", method="both", max_fname_arg_count=1
)
def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
"""
If this function is called via the 'numpy' library, the third parameter in
its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so
check if the 'skipna' parameter is a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
elif isinstance(skipna, np.bool_):
skipna = bool(skipna)
validate_cum_func(args, kwargs, fname=name)
return skipna
ALLANY_DEFAULTS: dict[str, bool | None] = {}
ALLANY_DEFAULTS["dtype"] = None
ALLANY_DEFAULTS["out"] = None
ALLANY_DEFAULTS["keepdims"] = False
ALLANY_DEFAULTS["axis"] = None
validate_all = CompatValidator(
ALLANY_DEFAULTS, fname="all", method="both", max_fname_arg_count=1
)
validate_any = CompatValidator(
ALLANY_DEFAULTS, fname="any", method="both", max_fname_arg_count=1
)
LOGICAL_FUNC_DEFAULTS = {"out": None, "keepdims": False}
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method="kwargs")
MINMAX_DEFAULTS = {"axis": None, "dtype": None, "out": None, "keepdims": False}
validate_min = CompatValidator(
MINMAX_DEFAULTS, fname="min", method="both", max_fname_arg_count=1
)
validate_max = CompatValidator(
MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
)
REPEAT_DEFAULTS: dict[str, Any] = {"axis": None}
validate_repeat = CompatValidator(
REPEAT_DEFAULTS, fname="repeat", method="both", max_fname_arg_count=1
)
ROUND_DEFAULTS: dict[str, Any] = {"out": None}
validate_round = CompatValidator(
ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
)
STAT_FUNC_DEFAULTS: dict[str, Any | None] = {}
STAT_FUNC_DEFAULTS["dtype"] = None
STAT_FUNC_DEFAULTS["out"] = None
SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
SUM_DEFAULTS["axis"] = None
SUM_DEFAULTS["keepdims"] = False
SUM_DEFAULTS["initial"] = None
PROD_DEFAULTS = SUM_DEFAULTS.copy()
MEAN_DEFAULTS = SUM_DEFAULTS.copy()
MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy()
MEDIAN_DEFAULTS["overwrite_input"] = False
MEDIAN_DEFAULTS["keepdims"] = False
STAT_FUNC_DEFAULTS["keepdims"] = False
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS, method="kwargs")
validate_sum = CompatValidator(
SUM_DEFAULTS, fname="sum", method="both", max_fname_arg_count=1
)
validate_prod = CompatValidator(
PROD_DEFAULTS, fname="prod", method="both", max_fname_arg_count=1
)
validate_mean = CompatValidator(
MEAN_DEFAULTS, fname="mean", method="both", max_fname_arg_count=1
)
validate_median = CompatValidator(
MEDIAN_DEFAULTS, fname="median", method="both", max_fname_arg_count=1
)
STAT_DDOF_FUNC_DEFAULTS: dict[str, bool | None] = {}
STAT_DDOF_FUNC_DEFAULTS["dtype"] = None
STAT_DDOF_FUNC_DEFAULTS["out"] = None
STAT_DDOF_FUNC_DEFAULTS["keepdims"] = False
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method="kwargs")
TAKE_DEFAULTS: dict[str, str | None] = {}
TAKE_DEFAULTS["out"] = None
TAKE_DEFAULTS["mode"] = "raise"
validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
TRANSPOSE_DEFAULTS = {"axes": None}
validate_transpose = CompatValidator(
TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0
)
def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None:
"""
'args' and 'kwargs' should be empty, except for allowed kwargs because all
of their necessary parameters are explicitly listed in the function
signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall(
"numpy operations are not valid with groupby. "
f"Use .groupby(...).{name}() instead"
)
def validate_minmax_axis(axis: AxisInt | None, ndim: int = 1) -> None:
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is zero
or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
ndim : int, default 1
Raises
------
ValueError
"""
if axis is None:
return
if axis >= ndim or (axis < 0 and ndim + axis < 0):
raise ValueError(f"`axis` must be fewer than the number of dimensions ({ndim})")
_validation_funcs = {
"median": validate_median,
"mean": validate_mean,
"min": validate_min,
"max": validate_max,
"sum": validate_sum,
"prod": validate_prod,
}
def validate_func(fname, args, kwargs) -> None:
if fname not in _validation_funcs:
return validate_stat_func(args, kwargs, fname=fname)
validation_func = _validation_funcs[fname]
return validation_func(args, kwargs)
| CompatValidator |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/workers/cloud_run_v2.py | {
"start": 20719,
"end": 34082
} | class ____(
BaseWorker[
CloudRunWorkerJobV2Configuration,
CloudRunWorkerV2Variables,
CloudRunWorkerV2Result,
]
):
"""
The Cloud Run worker V2.
"""
type = "cloud-run-v2"
job_configuration = CloudRunWorkerJobV2Configuration
job_configuration_variables = CloudRunWorkerV2Variables
_description = "Execute flow runs within containers on Google Cloud Run (V2 API). Requires a Google Cloud Platform account." # noqa
_display_name = "Google Cloud Run V2"
_documentation_url = "https://docs.prefect.io/integrations/prefect-gcp"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/4SpnOBvMYkHp6z939MDKP6/549a91bc1ce9afd4fb12c68db7b68106/social-icon-google-cloud-1200-630.png?h=250" # noqa
async def run(
self,
flow_run: "FlowRun",
configuration: CloudRunWorkerJobV2Configuration,
task_status: Optional[TaskStatus] = None,
) -> CloudRunWorkerV2Result:
"""
Runs the flow run on Cloud Run and waits for it to complete.
Args:
flow_run: The flow run to run.
configuration: The configuration for the job.
task_status: The task status to update.
Returns:
The result of the job.
"""
logger = self.get_flow_run_logger(flow_run)
with self._get_client(configuration=configuration) as cr_client:
await run_sync_in_worker_thread(
self._create_job_and_wait_for_registration,
configuration=configuration,
cr_client=cr_client,
logger=logger,
)
execution = await run_sync_in_worker_thread(
self._begin_job_execution,
configuration=configuration,
cr_client=cr_client,
logger=logger,
)
if task_status:
task_status.started(configuration.job_name)
result = await run_sync_in_worker_thread(
self._watch_job_execution_and_get_result,
configuration=configuration,
cr_client=cr_client,
execution=execution,
logger=logger,
)
return result
@staticmethod
def _get_client(
configuration: CloudRunWorkerJobV2Configuration,
) -> ResourceWarning:
"""
Get the base client needed for interacting with GCP Cloud Run V2 API.
Returns:
Resource: The base client needed for interacting with GCP Cloud Run V2 API.
"""
api_endpoint = "https://run.googleapis.com"
gcp_creds = configuration.credentials.get_credentials_from_service_account()
options = ClientOptions(api_endpoint=api_endpoint)
return (
discovery.build(
"run",
"v2",
client_options=options,
credentials=gcp_creds,
num_retries=3, # Set to 3 in case of intermittent/connection issues
)
.projects()
.locations()
)
def _create_job_and_wait_for_registration(
self,
configuration: CloudRunWorkerJobV2Configuration,
cr_client: Resource,
logger: PrefectLogAdapter,
):
"""
Creates the Cloud Run job and waits for it to register.
Args:
configuration: The configuration for the job.
cr_client: The Cloud Run client.
logger: The logger to use.
"""
try:
logger.info(f"Creating Cloud Run JobV2 {configuration.job_name}")
JobV2.create(
cr_client=cr_client,
project=configuration.project,
location=configuration.region,
job_id=configuration.job_name,
body=configuration.job_body,
)
except HttpError as exc:
self._create_job_error(
exc=exc,
configuration=configuration,
)
try:
self._wait_for_job_creation(
cr_client=cr_client,
configuration=configuration,
logger=logger,
)
except Exception as exc:
logger.critical(
f"Failed to create Cloud Run JobV2 {configuration.job_name}.\n{exc}"
)
if not configuration.keep_job:
try:
JobV2.delete(
cr_client=cr_client,
project=configuration.project,
location=configuration.region,
job_name=configuration.job_name,
)
except Exception as exc2:
logger.critical(
f"Failed to delete Cloud Run JobV2 {configuration.job_name}."
f"\n{exc2}"
)
raise
@staticmethod
def _wait_for_job_creation(
cr_client: Resource,
configuration: CloudRunWorkerJobV2Configuration,
logger: PrefectLogAdapter,
poll_interval: int = 5,
):
"""
Waits for the Cloud Run job to be created.
Args:
cr_client: The Cloud Run client.
configuration: The configuration for the job.
logger: The logger to use.
poll_interval: The interval to poll the Cloud Run job, defaults to 5
seconds.
"""
job = JobV2.get(
cr_client=cr_client,
project=configuration.project,
location=configuration.region,
job_name=configuration.job_name,
)
while not job.is_ready():
if not (ready_condition := job.get_ready_condition()):
ready_condition = "waiting for condition update"
logger.info(f"Current Job Condition: {ready_condition}")
job = JobV2.get(
cr_client=cr_client,
project=configuration.project,
location=configuration.region,
job_name=configuration.job_name,
)
time.sleep(poll_interval)
@staticmethod
def _create_job_error(
exc: HttpError,
configuration: CloudRunWorkerJobV2Configuration,
):
"""
Creates a formatted error message for the Cloud Run V2 API errors
"""
# noinspection PyUnresolvedReferences
if exc.status_code == 404:
raise RuntimeError(
f"Failed to find resources at {exc.uri}. Confirm that region"
f" '{configuration.region}' is the correct region for your Cloud"
f" Run Job and that {configuration.project} is the correct GCP "
f" project. If your project ID is not correct, you are using a "
f"Credentials block with permissions for the wrong project."
) from exc
raise exc
def _begin_job_execution(
self,
cr_client: Resource,
configuration: CloudRunWorkerJobV2Configuration,
logger: PrefectLogAdapter,
) -> ExecutionV2:
"""
Begins the Cloud Run job execution.
Args:
cr_client: The Cloud Run client.
configuration: The configuration for the job.
logger: The logger to use.
Returns:
The Cloud Run job execution.
"""
try:
logger.info(
f"Submitting Cloud Run Job V2 {configuration.job_name} for execution..."
)
submission = JobV2.run(
cr_client=cr_client,
project=configuration.project,
location=configuration.region,
job_name=configuration.job_name,
)
job_execution = ExecutionV2.get(
cr_client=cr_client,
execution_id=submission["metadata"]["name"],
)
command = (
" ".join(configuration.command)
if configuration.command
else "default container command"
)
logger.info(
f"Cloud Run Job V2 {configuration.job_name} submitted for execution "
f"with command: {command}"
)
return job_execution
except Exception as exc:
self._job_run_submission_error(
exc=exc,
configuration=configuration,
)
raise
def _watch_job_execution_and_get_result(
self,
cr_client: Resource,
configuration: CloudRunWorkerJobV2Configuration,
execution: ExecutionV2,
logger: PrefectLogAdapter,
poll_interval: int = 5,
) -> CloudRunWorkerV2Result:
"""
Watch the job execution and get the result.
Args:
cr_client (Resource): The base client needed for interacting with GCP
Cloud Run V2 API.
configuration (CloudRunWorkerJobV2Configuration): The configuration for
the job.
execution (ExecutionV2): The execution to watch.
logger (PrefectLogAdapter): The logger to use.
poll_interval (int): The number of seconds to wait between polls.
Defaults to 5 seconds.
Returns:
The result of the job.
"""
try:
execution = self._watch_job_execution(
cr_client=cr_client,
configuration=configuration,
execution=execution,
poll_interval=poll_interval,
)
except Exception as exc:
logger.critical(
f"Encountered an exception while waiting for job run completion - {exc}"
)
raise
if execution.succeeded():
status_code = 0
logger.info(f"Cloud Run Job V2 {configuration.job_name} succeeded")
else:
status_code = 1
error_mg = execution.condition_after_completion().get("message")
logger.error(
f"Cloud Run Job V2 {configuration.job_name} failed - {error_mg}"
)
logger.info(f"Job run logs can be found on GCP at: {execution.logUri}")
if not configuration.keep_job:
logger.info(
f"Deleting completed Cloud Run Job {configuration.job_name!r} from "
"Google Cloud Run..."
)
try:
JobV2.delete(
cr_client=cr_client,
project=configuration.project,
location=configuration.region,
job_name=configuration.job_name,
)
except Exception as exc:
logger.critical(
"Received an exception while deleting the Cloud Run Job V2 "
f"- {configuration.job_name} - {exc}"
)
return CloudRunWorkerV2Result(
identifier=configuration.job_name,
status_code=status_code,
)
# noinspection DuplicatedCode
@staticmethod
def _watch_job_execution(
cr_client: Resource,
configuration: CloudRunWorkerJobV2Configuration,
execution: ExecutionV2,
poll_interval: int,
) -> ExecutionV2:
"""
Update execution status until it is no longer running.
Args:
cr_client (Resource): The base client needed for interacting with GCP
Cloud Run V2 API.
configuration (CloudRunWorkerJobV2Configuration): The configuration for
the job.
execution (ExecutionV2): The execution to watch.
poll_interval (int): The number of seconds to wait between polls.
Returns:
The execution.
"""
while execution.is_running():
execution = ExecutionV2.get(
cr_client=cr_client,
execution_id=execution.name,
)
time.sleep(poll_interval)
return execution
@staticmethod
def _job_run_submission_error(
exc: Exception,
configuration: CloudRunWorkerJobV2Configuration,
):
"""
Creates a formatted error message for the Cloud Run V2 API errors
Args:
exc: The exception to format.
configuration: The configuration for the job.
"""
# noinspection PyUnresolvedReferences
if exc.status_code == 404:
pat1 = r"The requested URL [^ ]+ was not found on this server"
if re.findall(pat1, str(exc)):
# noinspection PyUnresolvedReferences
raise RuntimeError(
f"Failed to find resources at {exc.uri}. "
f"Confirm that region '{configuration.region}' is "
f"the correct region for your Cloud Run Job "
f"and that '{configuration.project}' is the "
f"correct GCP project. If your project ID is not "
f"correct, you are using a Credentials "
f"block with permissions for the wrong project."
) from exc
else:
raise exc
| CloudRunWorkerV2 |
python | google__pytype | pytype/rewrite/tests/test_utils.py | {
"start": 355,
"end": 481
} | class ____(unittest.TestCase):
def setUp(self):
super().setUp()
self.ctx = context.Context(src='')
| ContextfulTestBase |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 32900,
"end": 33139
} | class ____(UnspecializedNNModuleSource):
def guard_source(self) -> GuardSource:
return _GUARD_SOURCE_UNSPECIALIZED_BUILTIN_NN_MODULE[self.base.guard_source()]
@dataclasses.dataclass(frozen=True)
| UnspecializedBuiltinNNModuleSource |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 536,
"end": 663
} | class ____:
"Used to test self-referential repr() calls"
def __repr__(self):
return repr(self.value)
| ReprWrapper |
python | huggingface__transformers | src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py | {
"start": 852,
"end": 20938
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Wav2Vec2ConformerModel`]. It is used to
instantiate an Wav2Vec2Conformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2Conformer
[facebook/wav2vec2-conformer-rel-pos-large](https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*):
Vocabulary size of the Wav2Vec2Conformer model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`Wav2Vec2ConformerModel`]. Vocabulary size of the
model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward
method of [`Wav2Vec2ConformerModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for activations inside the fully connected layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
final_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the final projection layer of [`Wav2Vec2ConformerForCTC`].
layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more
details.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
feat_extract_norm (`str`, *optional*, defaults to `"group"`):
The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
convolutional layers.
feat_proj_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for output of the feature encoder.
feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
The non-linear activation function (function or string) in the 1D convolutional layers of the feature
extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for quantized feature encoder states.
conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
length of *conv_kernel* defines the number of convolutional layers and has to match the length of
*conv_dim*.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether the 1D convolutional layers have a bias.
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer.
apply_spec_augment (`bool`, *optional*, defaults to `True`):
Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
[SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://huggingface.co/papers/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
reasoning from the probability of each feature vector to be chosen as the start of the vector span to be
masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector
span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
mask_feature_min_masks (`int`, *optional*, defaults to 0),:
The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
step, irrespectively of `mask_feature_prob`. Only relevant if
''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
num_codevectors_per_group (`int`, *optional*, defaults to 320):
Number of entries in each quantization codebook (group).
num_codevector_groups (`int`, *optional*, defaults to 2):
Number of codevector groups for product codevector quantization.
contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
The temperature *kappa* in the contrastive loss.
feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for the output of the feature encoder that's used by the quantizer.
num_negatives (`int`, *optional*, defaults to 100):
Number of negative samples for the contrastive loss.
codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the quantized feature vectors.
proj_codevector_dim (`int`, *optional*, defaults to 256):
Dimensionality of the final projection of both the quantized and the transformer features.
diversity_loss_weight (`int`, *optional*, defaults to 0.1):
The weight of the codebook diversity loss component.
ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`Wav2Vec2ConformerForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`Wav2Vec2ConformerForCTC`].
use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
instance of [`Wav2Vec2ConformerForSequenceClassification`].
classifier_proj_size (`int`, *optional*, defaults to 256):
Dimensionality of the projection before token mean-pooling for classification.
tdnn_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
tdnn_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
*XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
tdnn_dilation (`tuple[int]` or `list[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
*XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
xvector_output_dim (`int`, *optional*, defaults to 512):
Dimensionality of the *XVector* embedding vectors.
add_adapter (`bool`, *optional*, defaults to `False`):
Whether a convolutional network should be stacked on top of the Wav2Vec2Conformer Encoder. Can be very
useful for warm-starting Wav2Vec2Conformer for SpeechEncoderDecoder models.
adapter_kernel_size (`int`, *optional*, defaults to 3):
Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
adapter_stride (`int`, *optional*, defaults to 2):
Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
num_adapter_layers (`int`, *optional*, defaults to 3):
Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
True`.
output_hidden_size (`int`, *optional*):
Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
if `add_adapter is True`.
position_embeddings_type (`str`, *optional*, defaults to `"relative"`):
Can be specified to `relative` or `rotary` for relative or rotary position embeddings respectively. If left
`None` no relative position embedding is applied.
rotary_embedding_base (`int`, *optional*, defaults to 10000):
If `"rotary"` position embeddings are used, defines the size of the embedding base.
max_source_positions (`int`, *optional*, defaults to 5000):
if `"relative"` position embeddings are used, defines the maximum source input positions.
conv_depthwise_kernel_size (`int`, *optional*, defaults to 31):
Kernel size of convolutional depthwise 1D layer in Conformer blocks.
conformer_conv_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all convolutional layers in Conformer blocks.
Example:
```python
>>> from transformers import Wav2Vec2ConformerConfig, Wav2Vec2ConformerModel
>>> # Initializing a Wav2Vec2Conformer facebook/wav2vec2-conformer-rel-pos-large style configuration
>>> configuration = Wav2Vec2ConformerConfig()
>>> # Initializing a model (with random weights) from the facebook/wav2vec2-conformer-rel-pos-large style configuration
>>> model = Wav2Vec2ConformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "wav2vec2-conformer"
def __init__(
self,
vocab_size=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout=0.1,
activation_dropout=0.1,
attention_dropout=0.1,
feat_proj_dropout=0.0,
feat_quantizer_dropout=0.0,
final_dropout=0.1,
layerdrop=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
feat_extract_norm="group",
feat_extract_activation="gelu",
conv_dim=(512, 512, 512, 512, 512, 512, 512),
conv_stride=(5, 2, 2, 2, 2, 2, 2),
conv_kernel=(10, 3, 3, 3, 3, 2, 2),
conv_bias=False,
num_conv_pos_embeddings=128,
num_conv_pos_embedding_groups=16,
apply_spec_augment=True,
mask_time_prob=0.05,
mask_time_length=10,
mask_time_min_masks=2,
mask_feature_prob=0.0,
mask_feature_length=10,
mask_feature_min_masks=0,
num_codevectors_per_group=320,
num_codevector_groups=2,
contrastive_logits_temperature=0.1,
num_negatives=100,
codevector_dim=256,
proj_codevector_dim=256,
diversity_loss_weight=0.1,
ctc_loss_reduction="sum",
ctc_zero_infinity=False,
use_weighted_layer_sum=False,
classifier_proj_size=256,
tdnn_dim=(512, 512, 512, 512, 1500),
tdnn_kernel=(5, 3, 3, 1, 1),
tdnn_dilation=(1, 2, 3, 1, 1),
xvector_output_dim=512,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
add_adapter=False,
adapter_kernel_size=3,
adapter_stride=2,
num_adapter_layers=3,
output_hidden_size=None,
position_embeddings_type="relative",
rotary_embedding_base=10000,
max_source_positions=5000,
conv_depthwise_kernel_size=31,
conformer_conv_dropout=0.1,
**kwargs,
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.use_weighted_layer_sum = use_weighted_layer_sum
self.max_source_positions = max_source_positions
self.position_embeddings_type = position_embeddings_type
self.rotary_embedding_base = rotary_embedding_base
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
)
# Conformer-block related
self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
self.conformer_conv_dropout = conformer_conv_dropout
# fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.feat_quantizer_dropout = feat_quantizer_dropout
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
# ctc loss
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
# adapter
self.add_adapter = add_adapter
self.adapter_kernel_size = adapter_kernel_size
self.adapter_stride = adapter_stride
self.num_adapter_layers = num_adapter_layers
self.output_hidden_size = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
self.classifier_proj_size = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
self.tdnn_dim = list(tdnn_dim)
self.tdnn_kernel = list(tdnn_kernel)
self.tdnn_dilation = list(tdnn_dilation)
self.xvector_output_dim = xvector_output_dim
@property
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1)
__all__ = ["Wav2Vec2ConformerConfig"]
| Wav2Vec2ConformerConfig |
python | aimacode__aima-python | deep_learning4e.py | {
"start": 14771,
"end": 18651
} | class ____:
"""
Simple perceptron neural network.
"""
def __init__(self, dataset, l_rate=0.01, epochs=1000, batch_size=10, optimizer=stochastic_gradient_descent,
loss=mean_squared_error_loss, verbose=False, plot=False):
self.dataset = dataset
self.l_rate = l_rate
self.epochs = epochs
self.batch_size = batch_size
self.optimizer = optimizer
self.loss = loss
self.verbose = verbose
self.plot = plot
input_size = len(dataset.inputs)
output_size = len(dataset.values[dataset.target])
# initialize the network, add dense layer
self.raw_net = [InputLayer(input_size), DenseLayer(input_size, output_size)]
def fit(self, X, y):
self.learned_net = self.optimizer(self.dataset, self.raw_net, loss=self.loss, epochs=self.epochs,
l_rate=self.l_rate, batch_size=self.batch_size, verbose=self.verbose)
return self
def predict(self, example):
layer_out = self.learned_net[1].forward(np.array(example).reshape((-1, 1)))
return layer_out.index(max(layer_out))
def keras_dataset_loader(dataset, max_length=500):
"""
Helper function to load keras datasets.
:param dataset: keras data set type
:param max_length: max length of each input sequence
"""
# init dataset
(X_train, y_train), (X_val, y_val) = dataset
if max_length > 0:
X_train = sequence.pad_sequences(X_train, maxlen=max_length)
X_val = sequence.pad_sequences(X_val, maxlen=max_length)
return (X_train[10:], y_train[10:]), (X_val, y_val), (X_train[:10], y_train[:10])
def SimpleRNNLearner(train_data, val_data, epochs=2, verbose=False):
"""
RNN example for text sentimental analysis.
:param train_data: a tuple of (training data, targets)
Training data: ndarray taking training examples, while each example is coded by embedding
Targets: ndarray taking targets of each example. Each target is mapped to an integer
:param val_data: a tuple of (validation data, targets)
:param epochs: number of epochs
:param verbose: verbosity mode
:return: a keras model
"""
total_inputs = 5000
input_length = 500
# init data
X_train, y_train = train_data
X_val, y_val = val_data
# init a the sequential network (embedding layer, rnn layer, dense layer)
model = Sequential()
model.add(Embedding(total_inputs, 32, input_length=input_length))
model.add(SimpleRNN(units=128))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# train the model
model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=epochs, batch_size=128, verbose=verbose)
return model
def AutoencoderLearner(inputs, encoding_size, epochs=200, verbose=False):
"""
Simple example of linear auto encoder learning producing the input itself.
:param inputs: a batch of input data in np.ndarray type
:param encoding_size: int, the size of encoding layer
:param epochs: number of epochs
:param verbose: verbosity mode
:return: a keras model
"""
# init data
input_size = len(inputs[0])
# init model
model = Sequential()
model.add(Dense(encoding_size, input_dim=input_size, activation='relu', kernel_initializer='random_uniform',
bias_initializer='ones'))
model.add(Dense(input_size, activation='relu', kernel_initializer='random_uniform', bias_initializer='ones'))
# update model with sgd
sgd = optimizers.SGD(lr=0.01)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
# train the model
model.fit(inputs, inputs, epochs=epochs, batch_size=10, verbose=verbose)
return model
| PerceptronLearner |
python | allegroai__clearml | clearml/backend_api/services/v2_20/workers.py | {
"start": 59551,
"end": 61691
} | class ____(Response):
"""
Response of workers.get_metric_keys endpoint.
:param categories: List of unique metric categories found in the statistics of the requested workers.
:type categories: Sequence[MetricsCategory]
"""
_service = "workers"
_action = "get_metric_keys"
_version = "2.20"
_schema = {
"definitions": {
"metrics_category": {
"properties": {
"metric_keys": {
"description": "The names of the metrics in the category.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"name": {
"description": "Name of the metrics category.",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"categories": {
"description": "List of unique metric categories found in the statistics of the requested workers.",
"items": {"$ref": "#/definitions/metrics_category"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, categories: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetMetricKeysResponse, self).__init__(**kwargs)
self.categories = categories
@schema_property("categories")
def categories(self) -> Optional[List[Any]]:
return self._property_categories
@categories.setter
def categories(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_categories = None
return
self.assert_isinstance(value, "categories", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetricsCategory.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "categories", MetricsCategory, is_array=True)
self._property_categories = value
| GetMetricKeysResponse |
python | pennersr__django-allauth | allauth/socialaccount/providers/dwolla/views.py | {
"start": 757,
"end": 1627
} | class ____(OAuth2Adapter):
"""Dwolla Views Adapter"""
scope_delimiter = "|"
provider_id = "dwolla"
access_token_url = TOKEN_URL
authorize_url = AUTH_URL
def complete_login(self, request, app, token, response, **kwargs):
resp = (
get_adapter()
.get_requests_session()
.get(
response["_links"]["account"]["href"],
headers={
"authorization": "Bearer %s" % token.token,
"accept": "application/vnd.dwolla.v1.hal+json",
},
)
)
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(DwollaOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(DwollaOAuth2Adapter)
| DwollaOAuth2Adapter |
python | PrefectHQ__prefect | src/prefect/cli/transfer/_migratable_resources/base.py | {
"start": 680,
"end": 957
} | class ____(Protocol):
@property
def source_id(self) -> uuid.UUID: ...
@property
def destination_id(self) -> uuid.UUID | None: ...
async def get_dependencies(self) -> list["MigratableProtocol"]: ...
async def migrate(self) -> None: ...
| MigratableProtocol |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-playgrounds/llama_index/tools/playgrounds/subgraph_connector/base.py | {
"start": 158,
"end": 2667
} | class ____(GraphQLToolSpec):
"""
Connects to subgraphs on The Graph's decentralized network via the Playgrounds API.
Attributes:
spec_functions (list): List of functions that specify the tool's capabilities.
url (str): The endpoint URL for the GraphQL requests.
headers (dict): Headers used for the GraphQL requests.
"""
spec_functions = ["graphql_request"]
def __init__(self, identifier: str, api_key: str, use_deployment_id: bool = False):
"""
Initialize the connector.
Args:
identifier (str): Subgraph identifier or Deployment ID.
api_key (str): API key for the Playgrounds API.
use_deployment_id (bool): Flag to indicate if the identifier is a deployment ID. Default is False.
"""
endpoint = "deployments" if use_deployment_id else "subgraphs"
self.url = (
f"https://api.playgrounds.network/v1/proxy/{endpoint}/id/{identifier}"
)
self.headers = {
"Content-Type": "application/json",
"Playgrounds-Api-Key": api_key,
}
def graphql_request(
self,
query: str,
variables: Optional[dict] = None,
operation_name: Optional[str] = None,
) -> Union[dict, str]:
"""
Make a GraphQL query.
Args:
query (str): The GraphQL query string to execute.
variables (dict, optional): Variables for the GraphQL query. Default is None.
operation_name (str, optional): Name of the operation, if multiple operations are present in the query. Default is None.
Returns:
dict: The response from the GraphQL server if successful.
str: Error message if the request fails.
"""
payload = {"query": query.strip()}
if variables:
payload["variables"] = variables
if operation_name:
payload["operationName"] = operation_name
try:
response = requests.post(self.url, headers=self.headers, json=payload)
# Check if the request was successful
response.raise_for_status()
# Return the JSON response
return response.json()
except requests.RequestException as e:
# Handle request errors
return str(e)
except ValueError as e:
# Handle JSON decoding errors
return f"Error decoding JSON: {e}"
| PlaygroundsSubgraphConnectorToolSpec |
python | spyder-ide__spyder | spyder/plugins/explorer/widgets/explorer.py | {
"start": 1814,
"end": 1877
} | class ____:
Size = 1
Type = 2
Date = 3
| DirViewColumns |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 73563,
"end": 74154
} | class ____:
"""Common methods for `BlendedGenericTransform` and `BlendedAffine2D`."""
def __eq__(self, other):
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_separately(self, transform):
return (self._x.contains_branch(transform),
self._y.contains_branch(transform))
__str__ = _make_str_method("_x", "_y")
| _BlendedMixin |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/enums.py | {
"start": 1333,
"end": 1547
} | class ____(MemberType, enum.Enum):
"""this is enum class"""
x = 'x'
def say_hello(self):
"""docstring"""
@classmethod
def say_goodbye(cls):
"""docstring"""
| EnumClassWithDataType |
python | doocs__leetcode | solution/0300-0399/0321.Create Maximum Number/Solution.py | {
"start": 0,
"end": 1692
} | class ____:
def maxNumber(self, nums1: List[int], nums2: List[int], k: int) -> List[int]:
def f(nums: List[int], k: int) -> List[int]:
n = len(nums)
stk = [0] * k
top = -1
remain = n - k
for x in nums:
while top >= 0 and stk[top] < x and remain > 0:
top -= 1
remain -= 1
if top + 1 < k:
top += 1
stk[top] = x
else:
remain -= 1
return stk
def compare(nums1: List[int], nums2: List[int], i: int, j: int) -> bool:
if i >= len(nums1):
return False
if j >= len(nums2):
return True
if nums1[i] > nums2[j]:
return True
if nums1[i] < nums2[j]:
return False
return compare(nums1, nums2, i + 1, j + 1)
def merge(nums1: List[int], nums2: List[int]) -> List[int]:
m, n = len(nums1), len(nums2)
i = j = 0
ans = [0] * (m + n)
for k in range(m + n):
if compare(nums1, nums2, i, j):
ans[k] = nums1[i]
i += 1
else:
ans[k] = nums2[j]
j += 1
return ans
m, n = len(nums1), len(nums2)
l, r = max(0, k - n), min(k, m)
ans = [0] * k
for x in range(l, r + 1):
arr1 = f(nums1, x)
arr2 = f(nums2, k - x)
arr = merge(arr1, arr2)
if ans < arr:
ans = arr
return ans
| Solution |
python | Textualize__textual | tests/test_style_importance.py | {
"start": 162,
"end": 3501
} | class ____(App[None]):
CSS = """
Container {
border: round green !important;
outline: round green !important;
align: right bottom !important;
content-align: right bottom !important;
offset: 17 23 !important;
overflow: hidden hidden !important;
padding: 10 20 30 40 !important;
scrollbar-size: 23 42 !important;
}
Container.more-specific {
border: solid red;
outline: solid red;
align: center middle;
content-align: center middle;
offset: 0 0;
overflow: scroll scroll;
padding: 1 2 3 4;
scrollbar-size: 1 2;
}
"""
def compose(self) -> ComposeResult:
yield Container(classes="more-specific")
async def test_border_importance():
"""Border without sides should support !important"""
async with StyleApp().run_test() as pilot:
border = pilot.app.query_one(Container).styles.border
desired = ("round", Color.parse("green"))
assert border.top == desired
assert border.left == desired
assert border.bottom == desired
assert border.right == desired
async def test_outline_importance():
"""Outline without sides should support !important"""
async with StyleApp().run_test() as pilot:
outline = pilot.app.query_one(Container).styles.outline
desired = ("round", Color.parse("green"))
assert outline.top == desired
assert outline.left == desired
assert outline.bottom == desired
assert outline.right == desired
async def test_align_importance():
"""Align without direction should support !important"""
async with StyleApp().run_test() as pilot:
assert pilot.app.query_one(Container).styles.align == ("right", "bottom")
async def test_content_align_importance():
"""Content align without direction should support !important"""
async with StyleApp().run_test() as pilot:
assert pilot.app.query_one(Container).styles.content_align == (
"right",
"bottom",
)
async def test_offset_importance():
"""Offset without direction should support !important"""
async with StyleApp().run_test() as pilot:
assert pilot.app.query_one(Container).styles.offset == ScalarOffset.from_offset(
(17, 23)
)
async def test_overflow_importance():
"""Overflow without direction should support !important"""
async with StyleApp().run_test() as pilot:
assert pilot.app.query_one(Container).styles.overflow_x == "hidden"
assert pilot.app.query_one(Container).styles.overflow_y == "hidden"
async def test_padding_importance():
"""Padding without sides should support !important"""
async with StyleApp().run_test() as pilot:
padding = pilot.app.query_one(Container).styles.padding
assert padding.top == 10
assert padding.left == 40
assert padding.bottom == 30
assert padding.right == 20
async def test_scrollbar_size_importance():
"""Scrollbar size without direction should support !important"""
async with StyleApp().run_test() as pilot:
assert pilot.app.query_one(Container).styles.scrollbar_size_horizontal == 23
assert pilot.app.query_one(Container).styles.scrollbar_size_vertical == 42
| StyleApp |
python | tensorflow__tensorflow | third_party/xla/xla/backends/cpu/testlib/kernel_runner_test.py | {
"start": 944,
"end": 3096
} | class ____(absltest.TestCase):
def test_llvm_ir_kernel_runner(self):
ir = """
%struct.XLA_CPU_KernelCallFrame = type { ptr, ptr, i64, ptr }
%struct.XLA_CPU_KernelArg = type { ptr, i64 }
; c = a + b (per thread)
define ptr @LlvmAddI32(ptr noundef %call_frame_ptr) {
%args_gep = getelementptr inbounds %struct.XLA_CPU_KernelCallFrame,
ptr %call_frame_ptr, i32 0, i32 3
%args_ptr = load ptr, ptr %args_gep, align 8
%arg1_gep = getelementptr inbounds %struct.XLA_CPU_KernelArg, ptr %args_ptr, i64 1
%arg2_gep = getelementptr inbounds %struct.XLA_CPU_KernelArg, ptr %args_ptr, i64 2
%arg0_ptr = load ptr, ptr %args_ptr, align 8
%arg1_ptr = load ptr, ptr %arg1_gep, align 8
%arg2_ptr = load ptr, ptr %arg2_gep, align 8
%thread_gep = getelementptr inbounds %struct.XLA_CPU_KernelCallFrame, ptr %call_frame_ptr, i32 0, i32 1
%thread_ptr = load ptr, ptr %thread_gep, align 8
%thread_idx = load i64, ptr %thread_ptr, align 8
%a_ptr = getelementptr inbounds i32, ptr %arg0_ptr, i64 %thread_idx
%a = load i32, ptr %a_ptr, align 4
%b_ptr = getelementptr inbounds i32, ptr %arg1_ptr, i64 %thread_idx
%b = load i32, ptr %b_ptr, align 4
%c = add nsw i32 %a, %b
%result_ptr = getelementptr inbounds i32, ptr %arg2_ptr, i64 %thread_idx
store i32 %c, ptr %result_ptr, align 4
ret ptr null
}
"""
llvm_emitter = cpu_testlib.LlvmTestKernelEmitter(
ir, "LlvmAddI32", (4, 1, 1)
)
kernel_definition = llvm_emitter.emit_kernel_definition()
runner = cpu_testlib.KernelRunner.create(
kernel_definition,
cpu_testlib.JitCompiler(base_testlib.HloModuleConfig()),
)
a = create_literal(np.array([1, 2, 3, 4], dtype=np.int32))
b = create_literal(np.array([5, 6, 7, 8], dtype=np.int32))
c = create_literal(np.array([0, 0, 0, 0], dtype=np.int32))
runner.call([a, b, c])
np.testing.assert_array_equal(np.asarray(c), np.asarray(a) + np.asarray(b))
| LLvmKernelRunnerTest |
python | wandb__wandb | wandb/sdk/lib/service/service_process.py | {
"start": 923,
"end": 3128
} | class ____:
"""A handle to a process running the internal service."""
def __init__(
self,
*,
connection_token: service_token.ServiceToken,
process: subprocess.Popen,
) -> None:
self._token = connection_token
self._process = process
@property
def token(self) -> service_token.ServiceToken:
"""A token for connecting to the process."""
return self._token
def join(self) -> int:
"""Wait for the process to end and return its exit code."""
return self._process.wait()
def _launch_server(settings: Settings) -> ServiceProcess:
"""Launch server and set ports."""
if platform.system() == "Windows":
creationflags: int = subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore[attr-defined]
start_new_session = False
else:
creationflags = 0
start_new_session = True
pid = str(os.getpid())
with tempfile.TemporaryDirectory() as tmpdir:
port_file = pathlib.Path(tmpdir, f"port-{pid}.txt")
service_args: list[str] = []
try:
core_path = get_core_path()
except WandbCoreNotAvailableError as e:
get_sentry().reraise(e)
service_args.extend([core_path])
if not error_reporting_enabled():
service_args.append("--no-observability")
if core_debug(default="False"):
service_args.extend(["--log-level", "-4"])
if dcgm_profiling_enabled():
service_args.append("--enable-dcgm-profiling")
service_args.extend(["--port-filename", str(port_file)])
service_args.extend(["--pid", pid])
if not ipc_support.SUPPORTS_UNIX:
service_args.append("--listen-on-localhost")
proc = subprocess.Popen(
service_args,
env=os.environ,
close_fds=True,
creationflags=creationflags,
start_new_session=start_new_session,
)
token = service_port_file.poll_for_token(
port_file,
proc,
timeout=settings.x_service_wait,
)
return ServiceProcess(connection_token=token, process=proc)
| ServiceProcess |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/gradient_boosting.py | {
"start": 464,
"end": 3219
} | class ____(object):
"""Super class of GradientBoostingClassifier and GradientBoostinRegressor.
Uses a collection of regression trees that trains on predicting the gradient
of the loss function.
Parameters:
-----------
n_estimators: int
The number of classification trees that are used.
learning_rate: float
The step length that will be taken when following the negative gradient during
training.
min_samples_split: int
The minimum number of samples needed to make a split when building a tree.
min_impurity: float
The minimum impurity required to split the tree further.
max_depth: int
The maximum depth of a tree.
regression: boolean
True or false depending on if we're doing regression or classification.
"""
def __init__(self, n_estimators, learning_rate, min_samples_split,
min_impurity, max_depth, regression):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.min_samples_split = min_samples_split
self.min_impurity = min_impurity
self.max_depth = max_depth
self.regression = regression
self.bar = progressbar.ProgressBar(widgets=bar_widgets)
# Square loss for regression
# Log loss for classification
self.loss = SquareLoss()
if not self.regression:
self.loss = CrossEntropy()
# Initialize regression trees
self.trees = []
for _ in range(n_estimators):
tree = RegressionTree(
min_samples_split=self.min_samples_split,
min_impurity=min_impurity,
max_depth=self.max_depth)
self.trees.append(tree)
def fit(self, X, y):
y_pred = np.full(np.shape(y), np.mean(y, axis=0))
for i in self.bar(range(self.n_estimators)):
gradient = self.loss.gradient(y, y_pred)
self.trees[i].fit(X, gradient)
update = self.trees[i].predict(X)
# Update y prediction
y_pred -= np.multiply(self.learning_rate, update)
def predict(self, X):
y_pred = np.array([])
# Make predictions
for tree in self.trees:
update = tree.predict(X)
update = np.multiply(self.learning_rate, update)
y_pred = -update if not y_pred.any() else y_pred - update
if not self.regression:
# Turn into probability distribution
y_pred = np.exp(y_pred) / np.expand_dims(np.sum(np.exp(y_pred), axis=1), axis=1)
# Set label to the value that maximizes probability
y_pred = np.argmax(y_pred, axis=1)
return y_pred
| GradientBoosting |
python | scipy__scipy | benchmarks/benchmarks/spatial.py | {
"start": 3207,
"end": 4346
} | class ____(LimitedParamBenchmark):
params = [
[(3,10000,1000), (8,10000,1000), (16,10000,1000)],
[1, 2, np.inf],
BOX_SIZES, LEAF_SIZES,
]
param_names = ['(m, n, r)', 'p', 'boxsize', 'leafsize']
num_param_combinations = 21
@staticmethod
def do_setup(self, mnr, p, boxsize, leafsize):
m, n, r = mnr
rng = np.random.default_rng(1234)
self.data = rng.uniform(size=(n, m))
self.queries = rng.uniform(size=(r, m))
self.T = cKDTree(self.data, leafsize=leafsize, boxsize=boxsize)
def setup(self, mnr, p, boxsize, leafsize):
LimitedParamBenchmark.setup(self, mnr, p, boxsize, leafsize)
Query.do_setup(self, mnr, p, boxsize, leafsize)
def time_query(self, mnr, p, boxsize, leafsize):
"""
Querying kd-tree
dim | # points | # queries | KDTree | cKDTree | flat cKDTree
"""
self.T.query(self.queries, p=p)
# Retain old benchmark results (remove this if changing the benchmark)
time_query.version = (
"327bc0627d5387347e9cdcf4c52a550c813bb80a859eeb0f3e5bfe6650a8a1db"
)
| Query |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVar3.py | {
"start": 162,
"end": 1514
} | class ____(Generic[_T, AnyStr]):
# This should generate an error because _S
# isn't defined in this context.
my_var1: _S
my_var2: AnyStr
# This should generate an error because _T
# is already in use.
class InnerClass1(Generic[_T]): ...
# This should generate an error because AnyStr
# is already in use.
class InnerClass2(Generic[_S, AnyStr]):
my_var1: _S
# This should generate an error because _T
# is already in use in the outer class.
my_var2: _T
class InnerClass3:
# This should generate an error.
x: list[_T]
def f(self, x: _T, y: _S, z: _S) -> _T: ...
def g(self, x: AnyStr) -> None:
# This should generate an error.
y: list[_T]
def func1(a: _T) -> _T | None:
my_var1: _T
# This should generate an error
my_var2: _S
# This should generate an error because _T
# is already in use.
class InnerClass3(Generic[_T]): ...
# This should generate an error.
a: _S = 3
# This should generate an error.
b: list[_T] = []
# This should generate an error.
c: list[AnyStr] = []
T = TypeVar("T")
def foo() -> Callable[[T], T]:
def inner(v: T) -> T:
reveal_type(v, expected_text="T@foo")
return v
return inner
# This should generate an error.
list[T]()
| OuterClass |
python | kamyu104__LeetCode-Solutions | Python/maximum-value-of-an-ordered-triplet-i.py | {
"start": 37,
"end": 489
} | class ____(object):
def maximumTripletValue(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
NEG_INF = float("-inf")
result = 0
mx_diff = mx = NEG_INF
for x in nums:
if mx_diff != NEG_INF:
result = max(result, mx_diff*x)
if mx != NEG_INF:
mx_diff = max(mx_diff, mx-x)
mx = max(mx, x)
return result
| Solution |
python | apache__airflow | devel-common/src/sphinx_exts/substitution_extensions.py | {
"start": 4210,
"end": 5323
} | class ____(SphinxTransform):
"""
Add a custom ``|version-spacepad|`` replacement definition
Since this desired replacement text is all just whitespace, we can't use
the normal RST to define this, we instead of to create this definition
manually after docutils has parsed the source files.
"""
# Run as early as possible
default_priority = 1
def apply(self, **kwargs: Any) -> None:
substitution_defs = self.document.substitution_defs
version = substitution_defs["version"].astext()
pad = " " * len(version)
substitution_defs["version-spacepad"] = nodes.substitution_definition(version, pad)
...
def setup(app: Sphinx) -> dict:
"""Setup plugin"""
app.add_config_value("substitutions", [], "html")
directives.register_directive("code-block", SubstitutionCodeBlock)
app.add_role("subst-code", substitution_code_role)
app.add_post_transform(SubstitutionCodeBlockTransform)
app.add_post_transform(AddSpacepadSubstReference)
return {"parallel_write_safe": True, "parallel_read_safe": True}
| AddSpacepadSubstReference |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/service/multi_process_cluster.py | {
"start": 2061,
"end": 5941
} | class ____:
"""tf.data service cluster with local and remote workers.
Represents a cluster with a dispatcher, `num_local_workers` local workers, and
`num_remote_workers` remote workers. Remote workers run in separate processes.
This is useful to test reading from local in-process workers. For example:
```
cluster = multi_process_cluster.MultiProcessCluster(
num_local_workers=1, num_remote_workers=3)
num_elements = 10
dataset = self.make_distributed_range_dataset(
num_elements, cluster, target_workers="LOCAL")
self.assertDatasetProduces(dataset, list(range(num_elements)))
```
"""
def __init__(self,
num_local_workers,
num_remote_workers,
worker_tags=None,
worker_addresses=None,
deployment_mode=data_service_pb2.DEPLOYMENT_MODE_COLOCATED):
self._work_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
self._deployment_mode = deployment_mode
self._start_dispatcher(worker_addresses)
self._start_local_workers(num_local_workers, worker_tags)
self._start_remote_workers(num_remote_workers, worker_tags)
def _start_dispatcher(self, worker_addresses, port=0):
if port == 0:
port = test_util.pick_unused_port()
self._dispatcher = server_lib.DispatchServer(
service_config_pb2.DispatcherConfig(
port=port,
protocol="grpc",
work_dir=self._work_dir,
fault_tolerant_mode=True,
worker_addresses=worker_addresses,
deployment_mode=self._deployment_mode),
start=True)
def _start_local_workers(self, num_workers, worker_tags=None):
self._local_workers = []
for _ in range(num_workers):
self.start_local_worker(worker_tags)
def _start_remote_workers(self, num_workers, worker_tags=None):
# List of (worker address, remote worker process) tuples.
self._remote_workers = []
for _ in range(num_workers):
self.start_remote_worker(worker_tags)
def start_local_worker(self, worker_tags=None):
worker = data_service_test_base.TestWorker(
self.dispatcher_address(),
_WORKER_SHUTDOWN_QUIET_PERIOD_MS,
port=test_util.pick_unused_port(),
worker_tags=worker_tags)
worker.start()
self._local_workers.append(worker)
def start_remote_worker(self, worker_tags=None):
"""Runs a tf.data service worker in a remote process."""
pipe_reader, pipe_writer = multi_process_lib.multiprocessing.Pipe(
duplex=False)
worker_process = _RemoteWorkerProcess(
self.dispatcher_address(),
port=test_util.pick_unused_port(),
worker_tags=worker_tags,
pipe_writer=pipe_writer)
worker_process.start()
worker_address = pipe_reader.recv()
self._remote_workers.append((worker_address, worker_process))
def restart_dispatcher(self):
port = int(self.dispatcher_address().split(":")[1])
self._dispatcher._stop()
self._start_dispatcher(
worker_addresses=(self.local_worker_addresses() +
self.remote_worker_addresses()),
port=port)
def restart_local_workers(self):
for worker in self._local_workers:
worker.restart()
def dispatcher_address(self):
return self._dispatcher._address
def local_worker_addresses(self):
return [worker.worker_address() for worker in self._local_workers]
def remote_worker_addresses(self):
return [worker_address for (worker_address, _) in self._remote_workers]
def _stop(self):
for worker in self._local_workers:
worker.stop()
for (_, worker_process) in self._remote_workers:
worker_process.kill()
self._dispatcher._stop()
def __del__(self):
self._stop()
def test_main():
"""Main function to be called within `__main__` of a test file."""
multi_process_lib.test_main()
| MultiProcessCluster |
python | django__django | tests/postgres_tests/models.py | {
"start": 1637,
"end": 1734
} | class ____(PostgreSQLModel):
field = ArrayField(models.FloatField(), size=3)
| WithSizeArrayModel |
python | pytorch__pytorch | test/mobile/model_test/torchvision_models.py | {
"start": 1293,
"end": 1824
} | class ____:
def getModule(self):
model = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)
model.eval()
example = torch.zeros(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
optimized_module = optimize_for_mobile(traced_script_module)
augment_model_with_bundled_inputs(
optimized_module,
[
(example,),
],
)
optimized_module(example)
return optimized_module
| Resnet18Module |
python | google__jax | tests/pallas/gpu_pallas_distributed_test.py | {
"start": 1270,
"end": 1877
} | class ____(jt_multiprocess.MultiProcessTest):
def setUp(self):
if (not jtu.test_device_matches(["cuda"]) or
not jtu.is_cuda_compute_capability_at_least("9.0")):
self.skipTest("Only works on GPU with capability >= sm90")
if not mgpu.supports_cross_device_collectives():
self.skipTest("NVSHMEM library unavailable.")
if jax.process_count() == 1:
self.skipTest("Test requires multiple processes.")
if os.environ.get("XLA_PYTHON_CLIENT_ALLOCATOR", "") == "platform":
self.skipTest("NVSHMEM doesn't work with the platform allocator.")
super().setUp()
| TestCase |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-file/source_file/source.py | {
"start": 803,
"end": 8999
} | class ____(Source):
"""This source aims to provide support for readers of different file formats stored in various locations.
It is optionally using s3fs, gcfs or smart_open libraries to handle efficient streaming of very large files
(either compressed or not).
Supported examples of URL this can accept are as follows:
```
s3://my_bucket/my_key
s3://my_key:my_secret@my_bucket/my_key
gs://my_bucket/my_blob
azure://my_bucket/my_blob (not tested)
hdfs:///path/file (not tested)
hdfs://path/file (not tested)
webhdfs://host:port/path/file (not tested)
./local/path/file
~/local/path/file
local/path/file
./local/path/file.gz
file:///home/user/file
file:///home/user/file.bz2
[ssh|scp|sftp]://username@host//path/file
[ssh|scp|sftp]://username@host/path/file
[ssh|scp|sftp]://username:password@host/path/file
```
The source reader currently leverages `read_csv` but will be extended to readers of different formats for
more potential sources as described below:
https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html
- read_json
- read_html
- read_excel
- read_fwf
- read_feather
- read_parquet
- read_orc
- read_pickle
All the options of the readers are exposed to the configuration file of this connector so it is possible to
override header names, types, encoding, etc
Note that this implementation is handling `url` target as a single file at the moment.
We will expand the capabilities of this source to discover and load either glob of multiple files,
content of directories, etc in a latter iteration.
"""
client_class = Client
def _get_client(self, config: Mapping):
"""Construct client"""
client = self.client_class(**config)
return client
@staticmethod
def _validate_and_transform(config: Mapping[str, Any]):
if "reader_options" in config:
try:
config["reader_options"] = json.loads(config["reader_options"])
if not isinstance(config["reader_options"], dict):
message = (
"Field 'reader_options' is not a valid JSON object. "
"Please provide key-value pairs, See field description for examples."
)
raise AirbyteTracedException(message=message, internal_message=message, failure_type=FailureType.config_error)
except ValueError:
message = "Field 'reader_options' is not valid JSON object. https://www.json.org/"
raise AirbyteTracedException(message=message, internal_message=message, failure_type=FailureType.config_error)
else:
config["reader_options"] = {}
config["url"] = dropbox_force_download(config["url"])
parse_result = urlparse(config["url"])
if parse_result.netloc == "docs.google.com" and parse_result.path.lower().startswith("/spreadsheets/"):
message = f'Failed to load {config["url"]}: please use the Official Google Sheets Source connector'
raise AirbyteTracedException(message=message, internal_message=message, failure_type=FailureType.config_error)
return config
def spec(self, logger: logging.Logger) -> ConnectorSpecification:
"""Returns the json schema for the spec"""
spec = super().spec(logger)
# override cloud spec to remove local file support
if is_cloud_environment():
for i in range(len(spec.connectionSpecification["properties"]["provider"]["oneOf"])):
provider = spec.connectionSpecification["properties"]["provider"]["oneOf"][i]
if provider["properties"]["storage"]["const"] == LOCAL_STORAGE_NAME:
spec.connectionSpecification["properties"]["provider"]["oneOf"].pop(i)
return spec
def check(self, logger, config: Mapping) -> AirbyteConnectionStatus:
"""
Check involves verifying that the specified file is reachable with
our credentials.
"""
config = self._validate_and_transform(config)
client = self._get_client(config)
source_url = client.reader.full_url
try:
list(client.streams(empty_schema=True))
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except (TypeError, ValueError, AirbyteTracedException) as err:
reason = f"Failed to load {source_url}. Please check File Format and Reader Options are set correctly."
logger.error(f"{reason}\n{repr(err)}")
raise AirbyteTracedException(message=reason, internal_message=reason, failure_type=FailureType.config_error)
except Exception as err:
reason = f"Failed to load {source_url}. You could have provided an invalid URL, please verify it: {repr(err)}."
logger.error(reason)
return AirbyteConnectionStatus(status=Status.FAILED, message=reason)
def discover(self, logger: logging.Logger, config: Mapping) -> AirbyteCatalog:
"""
Returns an AirbyteCatalog representing the available streams and fields in this integration. For example, given valid credentials to a
Remote CSV File, returns an Airbyte catalog where each csv file is a stream, and each column is a field.
"""
config = self._validate_and_transform(config)
client = self._get_client(config)
name, full_url = client.stream_name, client.reader.full_url
logger.info(f"Discovering schema of {name} at {full_url}...")
try:
streams = list(client.streams())
except Exception as err:
reason = f"Failed to discover schemas of {name} at {full_url}: {repr(err)}\n{traceback.format_exc()}"
logger.error(reason)
raise err
return AirbyteCatalog(streams=streams)
def read(
self,
logger: logging.Logger,
config: Mapping[str, Any],
catalog: ConfiguredAirbyteCatalog,
state: MutableMapping[str, Any] = None,
) -> Iterator[AirbyteMessage]:
"""Returns a generator of the AirbyteMessages generated by reading the source with the given configuration, catalog, and state."""
config = self._validate_and_transform(config)
client = self._get_client(config)
fields = self.selected_fields(catalog, config)
name = client.stream_name
airbyte_stream = catalog.streams[0].stream
logger.info(f"Syncing stream: {name} ({client.reader.full_url})...")
yield stream_status_as_airbyte_message(airbyte_stream, AirbyteStreamStatus.STARTED)
record_counter = 0
try:
for row in client.read(fields=fields):
record = AirbyteRecordMessage(stream=name, data=row, emitted_at=int(datetime.now().timestamp()) * 1000)
record_counter += 1
if record_counter == 1:
logger.info(f"Marking stream {name} as RUNNING")
yield stream_status_as_airbyte_message(airbyte_stream, AirbyteStreamStatus.RUNNING)
yield AirbyteMessage(type=Type.RECORD, record=record)
logger.info(f"Marking stream {name} as STOPPED")
yield stream_status_as_airbyte_message(airbyte_stream, AirbyteStreamStatus.COMPLETE)
except Exception as err:
reason = f"Failed to read data of {name} at {client.reader.full_url}: {repr(err)}\n{traceback.format_exc()}"
logger.error(reason)
logger.exception(f"Encountered an exception while reading stream {name}")
logger.info(f"Marking stream {name} as STOPPED")
yield stream_status_as_airbyte_message(airbyte_stream, AirbyteStreamStatus.INCOMPLETE)
raise err
@staticmethod
def selected_fields(catalog: ConfiguredAirbyteCatalog, config: Mapping[str, Any]) -> Iterable:
for configured_stream in catalog.streams:
fields = configured_stream.stream.json_schema["properties"].keys()
yield from fields
| SourceFile |
python | apache__airflow | providers/pinecone/src/airflow/providers/pinecone/operators/pinecone.py | {
"start": 1261,
"end": 3312
} | class ____(BaseOperator):
"""
Ingest vector embeddings into Pinecone.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PineconeIngestOperator`
:param conn_id: The connection id to use when connecting to Pinecone.
:param index_name: Name of the Pinecone index.
:param input_vectors: Data to be ingested, in the form of a list of vectors, list of tuples,
or list of dictionaries.
:param namespace: The namespace to write to. If not specified, the default namespace is used.
:param batch_size: The number of vectors to upsert in each batch.
:param upsert_kwargs: .. seealso:: https://docs.pinecone.io/reference/upsert
"""
template_fields: Sequence[str] = ("index_name", "input_vectors", "namespace")
def __init__(
self,
*,
conn_id: str = PineconeHook.default_conn_name,
index_name: str,
input_vectors: list[Vector] | list[tuple] | list[dict],
namespace: str = "",
batch_size: int | None = None,
upsert_kwargs: dict | None = None,
**kwargs: Any,
) -> None:
self.upsert_kwargs = upsert_kwargs or {}
super().__init__(**kwargs)
self.conn_id = conn_id
self.index_name = index_name
self.namespace = namespace
self.batch_size = batch_size
self.input_vectors = input_vectors
@cached_property
def hook(self) -> PineconeHook:
"""Return an instance of the PineconeHook."""
return PineconeHook(conn_id=self.conn_id)
def execute(self, context: Context) -> None:
"""Ingest data into Pinecone using the PineconeHook."""
self.hook.upsert(
index_name=self.index_name,
vectors=self.input_vectors,
namespace=self.namespace,
batch_size=self.batch_size,
**self.upsert_kwargs,
)
self.log.info("Successfully ingested data into Pinecone index %s.", self.index_name)
| PineconeIngestOperator |
python | huggingface__transformers | src/transformers/models/data2vec/modular_data2vec_audio.py | {
"start": 3938,
"end": 4299
} | class ____(Wav2Vec2FeatureEncoder):
def __init__(self, config):
nn.Module.__init__(self)
self.conv_layers = nn.ModuleList(
[Data2VecAudioConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
)
self.gradient_checkpointing = False
self._requires_grad = True
| Data2VecAudioFeatureEncoder |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 3161,
"end": 3406
} | class ____(IncrementalShopifyStream):
data_field = "disputes"
filter_field = "since_id"
cursor_field = "id"
order_field = "id"
def path(self, **kwargs) -> str:
return f"shopify_payments/{self.data_field}.json"
| Disputes |
python | django__django | tests/test_client_regress/views.py | {
"start": 368,
"end": 5229
} | class ____(Exception):
pass
def no_template_view(request):
"A simple view that expects a GET request, and returns a rendered template"
return HttpResponse(
"No template used. Sample content: twice once twice. Content ends."
)
def staff_only_view(request):
"""
A view that can only be visited by staff. Non staff members get an
exception
"""
if request.user.is_staff:
return HttpResponse()
else:
raise CustomTestException()
@login_required
def get_view(request):
"A simple login protected view"
return HttpResponse("Hello world")
def request_data(request, template="base.html", data="sausage"):
"A simple view that returns the request data in the context"
return render(
request,
template,
{
"get-foo": request.GET.get("foo"),
"get-bar": request.GET.get("bar"),
"post-foo": request.POST.get("foo"),
"post-bar": request.POST.get("bar"),
"data": data,
},
)
def view_with_argument(request, name):
"""A view that takes a string argument
The purpose of this view is to check that if a space is provided in
the argument, the test framework unescapes the %20 before passing
the value to the view.
"""
if name == "Arthur Dent":
return HttpResponse("Hi, Arthur")
else:
return HttpResponse("Howdy, %s" % name)
def nested_view(request):
"""
A view that uses test client to call another view.
"""
c = Client()
c.get("/no_template_view/")
return render(request, "base.html", {"nested": "yes"})
@login_required
def login_protected_redirect_view(request):
"A view that redirects all requests to the GET view"
return HttpResponseRedirect("/get_view/")
def redirect_to_self_with_changing_query_view(request):
query = request.GET.copy()
query["counter"] += "0"
return HttpResponseRedirect(
"/redirect_to_self_with_changing_query_view/?%s" % urlencode(query)
)
def set_session_view(request):
"A view that sets a session variable"
request.session["session_var"] = "YES"
return HttpResponse("set_session")
def check_session_view(request):
"A view that reads a session variable"
return HttpResponse(request.session.get("session_var", "NO"))
def request_methods_view(request):
"A view that responds with the request method"
return HttpResponse("request method: %s" % request.method)
def return_unicode(request):
return render(request, "unicode.html")
def return_undecodable_binary(request):
return HttpResponse(
b"%PDF-1.4\r\n%\x93\x8c\x8b\x9e ReportLab Generated PDF document "
b"http://www.reportlab.com"
)
def return_json_response(request):
content_type = request.GET.get("content_type")
kwargs = {"content_type": content_type} if content_type else {}
return JsonResponse({"key": "value"}, **kwargs)
def return_json_response_latin1(request):
return HttpResponse(
b'{"a":"\xc5"}', content_type="application/json; charset=latin1"
)
def return_text_file(request):
"A view that parses and returns text as a file."
match = CONTENT_TYPE_RE.match(request.META["CONTENT_TYPE"])
if match:
charset = match[1]
else:
charset = settings.DEFAULT_CHARSET
return HttpResponse(
request.body, status=200, content_type="text/plain; charset=%s" % charset
)
def check_headers(request):
"A view that responds with value of the X-ARG-CHECK header"
return HttpResponse(
"HTTP_X_ARG_CHECK: %s" % request.META.get("HTTP_X_ARG_CHECK", "Undefined")
)
def body(request):
"A view that is requested with GET and accesses request.body. Refs #14753."
return HttpResponse(request.body)
def read_all(request):
"A view that is requested with accesses request.read()."
return HttpResponse(request.read())
def read_buffer(request):
"A view that is requested with accesses request.read(LARGE_BUFFER)."
return HttpResponse(request.read(99999))
def request_context_view(request):
# Special attribute that won't be present on a plain HttpRequest
request.special_path = request.path
return render(request, "request_context.html")
def render_template_multiple_times(request):
"""A view that renders a template multiple times."""
return HttpResponse(render_to_string("base.html") + render_to_string("base.html"))
def redirect_based_on_extra_headers_1_view(request):
if "HTTP_REDIRECT" in request.META:
return HttpResponseRedirect("/redirect_based_on_extra_headers_2/")
return HttpResponse()
def redirect_based_on_extra_headers_2_view(request):
if "HTTP_REDIRECT" in request.META:
return HttpResponseRedirect("/redirects/further/more/")
return HttpResponse()
| CustomTestException |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 4130,
"end": 4234
} | class ____(Protocol):
def load_module(self, fullname: str, /) -> types.ModuleType: ...
| _LoaderProtocol |
python | joke2k__faker | faker/providers/barcode/fr_CA/__init__.py | {
"start": 50,
"end": 217
} | class ____(BarcodeProvider):
"""Implement bank provider for ``fr_CA`` locale.
There is no difference from the ``en_CA`` implementation.
"""
pass
| Provider |
python | getsentry__sentry | src/sentry/api/serializers/models/plugin.py | {
"start": 3998,
"end": 5663
} | class ____(PluginSerializer):
def __init__(self, project=None):
self.project = project
def get_attrs(self, item_list, user, **kwargs):
return {
item: {
"config": [
serialize_field(self.project, item, c)
for c in item.get_config(
project=self.project, user=user, add_additional_fields=True
)
]
}
for item in item_list
}
def serialize(self, obj, attrs, user, **kwargs):
d = super().serialize(obj, attrs, user)
d["config"] = attrs.get("config")
return d
def serialize_field(project, plugin, field):
data = {
"name": str(field["name"]),
"label": str(field.get("label") or field["name"].title().replace("_", " ")),
"type": field.get("type", "text"),
"required": field.get("required", True),
"help": str(field["help"]) if field.get("help") else None,
"placeholder": str(field["placeholder"]) if field.get("placeholder") else None,
"choices": field.get("choices"),
"readonly": field.get("readonly", False),
"defaultValue": field.get("default"),
"value": None,
"isDeprecated": is_plugin_deprecated(plugin, project),
}
data["isHidden"] = data["isDeprecated"] or plugin.is_hidden()
if field.get("type") != "secret":
data["value"] = plugin.get_option(field["name"], project)
else:
data["hasSavedValue"] = bool(field.get("has_saved_value", False))
data["prefix"] = field.get("prefix", "")
return data
| PluginWithConfigSerializer |
python | ray-project__ray | python/ray/autoscaler/v2/utils.py | {
"start": 1644,
"end": 2595
} | class ____:
"""
A utility class for protobuf objects.
"""
@staticmethod
def to_dict(proto):
"""
Convert a protobuf object to a dict.
This is a slow conversion, and should only be used for debugging or
latency insensitve code.
Args:
proto: the protobuf object
Returns:
dict: the dict
"""
from ray._private.protobuf_compat import message_to_dict
return message_to_dict(
proto,
preserving_proto_field_name=True,
always_print_fields_with_no_presence=True,
)
@staticmethod
def to_dict_list(protos):
"""
Convert a list of protobuf objects to a list of dicts.
Args:
protos: the list of protobuf objects
Returns:
dict_list: the list of dicts
"""
return [ProtobufUtil.to_dict(proto) for proto in protos]
| ProtobufUtil |
python | kamyu104__LeetCode-Solutions | Python/find-the-maximum-number-of-elements-in-subset.py | {
"start": 940,
"end": 1502
} | class ____(object):
def maximumLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
cnt = collections.Counter(nums)
result = 0
for x in cnt.iterkeys():
if x == 1:
result = max(result, cnt[x]-(1 if cnt[x]%2 == 0 else 0))
continue
l = 0
while x in cnt and cnt[x] >= 2:
l += 2
x *= x
l += 1 if x in cnt else -1
result = max(result, l)
return result
| Solution2 |
python | walkccc__LeetCode | solutions/53. Maximum Subarray/53-3.py | {
"start": 60,
"end": 271
} | class ____:
summ: int
# the sum of the subarray starting from the first number
maxSubarraySumLeft: int
# the sum of the subarray ending in the last number
maxSubarraySumRight: int
maxSubarraySum: int
| T |
python | PrefectHQ__prefect | src/prefect/server/database/orm_models.py | {
"start": 44441,
"end": 45131
} | class ____(Base):
__table_args__: Any = (
sa.Index(
"uq_automation_related_resource__automation_id__resource_id",
"automation_id",
"resource_id",
unique=True,
),
)
automation_id: Mapped[uuid.UUID] = mapped_column(
sa.ForeignKey("automation.id", ondelete="CASCADE")
)
resource_id: Mapped[Optional[str]] = mapped_column(index=True)
automation_owned_by_resource: Mapped[bool] = mapped_column(
default=False, server_default="0"
)
automation: Mapped["Automation"] = relationship(
"Automation", back_populates="related_resources", lazy="raise"
)
| AutomationRelatedResource |
python | ray-project__ray | python/ray/data/expressions.py | {
"start": 18108,
"end": 19381
} | class ____(Expr):
"""Expression that represents a constant scalar value.
This expression type represents a literal value that will be broadcast
to all rows when evaluated. The value can be any Python object.
Args:
value: The constant value to represent
Example:
>>> from ray.data.expressions import lit
>>> import numpy as np
>>> # Create a literal value
>>> five = lit(5) # Creates LiteralExpr(value=5)
>>> name = lit("John") # Creates LiteralExpr(value="John")
>>> numpy_val = lit(np.int32(42)) # Creates LiteralExpr with numpy type
"""
value: Any
data_type: DataType = field(init=False)
def __post_init__(self):
# Infer the type from the value using DataType.infer_dtype
inferred_dtype = DataType.infer_dtype(self.value)
# Use object.__setattr__ since the dataclass is frozen
object.__setattr__(self, "data_type", inferred_dtype)
def structurally_equals(self, other: Any) -> bool:
return (
isinstance(other, LiteralExpr)
and self.value == other.value
and type(self.value) is type(other.value)
)
@DeveloperAPI(stability="alpha")
@dataclass(frozen=True, eq=False, repr=False)
| LiteralExpr |
python | pytorch__pytorch | test/test_tensorboard.py | {
"start": 13778,
"end": 15259
} | class ____(BaseTestCase):
def test_embedding(self):
w = self.createSummaryWriter()
all_features = torch.tensor([[1.0, 2.0, 3.0], [5.0, 4.0, 1.0], [3.0, 7.0, 7.0]])
all_labels = torch.tensor([33.0, 44.0, 55.0])
all_images = torch.zeros(3, 3, 5, 5)
w.add_embedding(
all_features, metadata=all_labels, label_img=all_images, global_step=2
)
dataset_label = ["test"] * 2 + ["train"] * 2
all_labels = list(zip(all_labels, dataset_label))
w.add_embedding(
all_features,
metadata=all_labels,
label_img=all_images,
metadata_header=["digit", "dataset"],
global_step=2,
)
# assert...
def test_embedding_64(self):
w = self.createSummaryWriter()
all_features = torch.tensor([[1.0, 2.0, 3.0], [5.0, 4.0, 1.0], [3.0, 7.0, 7.0]])
all_labels = torch.tensor([33.0, 44.0, 55.0])
all_images = torch.zeros((3, 3, 5, 5), dtype=torch.float64)
w.add_embedding(
all_features, metadata=all_labels, label_img=all_images, global_step=2
)
dataset_label = ["test"] * 2 + ["train"] * 2
all_labels = list(zip(all_labels, dataset_label))
w.add_embedding(
all_features,
metadata=all_labels,
label_img=all_images,
metadata_header=["digit", "dataset"],
global_step=2,
)
| TestTensorBoardEmbedding |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super10.py | {
"start": 63,
"end": 115
} | class ____:
def clone(self):
return self
| A |
python | pytest-dev__pytest | testing/python/collect.py | {
"start": 29029,
"end": 36301
} | class ____:
def test_pytest_pycollect_module(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
class MyModule(pytest.Module):
pass
def pytest_pycollect_makemodule(module_path, parent):
if module_path.name == "test_xyz.py":
return MyModule.from_parent(path=module_path, parent=parent)
"""
)
pytester.makepyfile("def test_some(): pass")
pytester.makepyfile(test_xyz="def test_func(): pass")
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*<Module*test_pytest*", "*<MyModule*xyz*"])
def test_customized_pymakemodule_issue205_subdir(self, pytester: Pytester) -> None:
b = pytester.path.joinpath("a", "b")
b.mkdir(parents=True)
b.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(wrapper=True)
def pytest_pycollect_makemodule():
mod = yield
mod.obj.hello = "world"
return mod
"""
),
encoding="utf-8",
)
b.joinpath("test_module.py").write_text(
textwrap.dedent(
"""\
def test_hello():
assert hello == "world"
"""
),
encoding="utf-8",
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_customized_pymakeitem(self, pytester: Pytester) -> None:
b = pytester.path.joinpath("a", "b")
b.mkdir(parents=True)
b.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.hookimpl(wrapper=True)
def pytest_pycollect_makeitem():
result = yield
if result:
for func in result:
func._some123 = "world"
return result
"""
),
encoding="utf-8",
)
b.joinpath("test_module.py").write_text(
textwrap.dedent(
"""\
import pytest
@pytest.fixture()
def obj(request):
return request.node._some123
def test_hello(obj):
assert obj == "world"
"""
),
encoding="utf-8",
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
def test_pytest_pycollect_makeitem(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
class MyFunction(pytest.Function):
pass
def pytest_pycollect_makeitem(collector, name, obj):
if name == "some":
return MyFunction.from_parent(name=name, parent=collector)
"""
)
pytester.makepyfile("def some(): pass")
result = pytester.runpytest("--collect-only")
result.stdout.fnmatch_lines(["*MyFunction*some*"])
def test_issue2369_collect_module_fileext(self, pytester: Pytester) -> None:
"""Ensure we can collect files with weird file extensions as Python
modules (#2369)"""
# Implement a little meta path finder to import files containing
# Python source code whose file extension is ".narf".
pytester.makeconftest(
"""
import sys
import os.path
from importlib.util import spec_from_loader
from importlib.machinery import SourceFileLoader
from _pytest.python import Module
class MetaPathFinder:
def find_spec(self, fullname, path, target=None):
if os.path.exists(fullname + ".narf"):
return spec_from_loader(
fullname,
SourceFileLoader(fullname, fullname + ".narf"),
)
sys.meta_path.append(MetaPathFinder())
def pytest_collect_file(file_path, parent):
if file_path.suffix == ".narf":
return Module.from_parent(path=file_path, parent=parent)
"""
)
pytester.makefile(
".narf",
"""\
def test_something():
assert 1 + 1 == 2""",
)
# Use runpytest_subprocess, since we're futzing with sys.meta_path.
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_early_ignored_attributes(self, pytester: Pytester) -> None:
"""Builtin attributes should be ignored early on, even if
configuration would otherwise allow them.
This tests a performance optimization, not correctness, really,
although it tests PytestCollectionWarning is not raised, while
it would have been raised otherwise.
"""
pytester.makeini(
"""
[pytest]
python_classes=*
python_functions=*
"""
)
pytester.makepyfile(
"""
class TestEmpty:
pass
test_empty = TestEmpty()
def test_real():
pass
"""
)
items, rec = pytester.inline_genitems()
assert rec.ret == 0
assert len(items) == 1
def test_setup_only_available_in_subdir(pytester: Pytester) -> None:
sub1 = pytester.mkpydir("sub1")
sub2 = pytester.mkpydir("sub2")
sub1.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.path.stem == "test_in_sub1"
def pytest_runtest_call(item):
assert item.path.stem == "test_in_sub1"
def pytest_runtest_teardown(item):
assert item.path.stem == "test_in_sub1"
"""
),
encoding="utf-8",
)
sub2.joinpath("conftest.py").write_text(
textwrap.dedent(
"""\
import pytest
def pytest_runtest_setup(item):
assert item.path.stem == "test_in_sub2"
def pytest_runtest_call(item):
assert item.path.stem == "test_in_sub2"
def pytest_runtest_teardown(item):
assert item.path.stem == "test_in_sub2"
"""
),
encoding="utf-8",
)
sub1.joinpath("test_in_sub1.py").write_text("def test_1(): pass", encoding="utf-8")
sub2.joinpath("test_in_sub2.py").write_text("def test_2(): pass", encoding="utf-8")
result = pytester.runpytest("-v", "-s")
result.assert_outcomes(passed=2)
def test_modulecol_roundtrip(pytester: Pytester) -> None:
modcol = pytester.getmodulecol("pass", withinit=False)
trail = modcol.nodeid
newcol = modcol.session.perform_collect([trail], genitems=0)[0]
assert modcol.name == newcol.name
| TestConftestCustomization |
python | fsspec__filesystem_spec | fsspec/generic.py | {
"start": 5457,
"end": 13482
} | class ____(AsyncFileSystem):
"""Wrapper over all other FS types
<experimental!>
This implementation is a single unified interface to be able to run FS operations
over generic URLs, and dispatch to the specific implementations using the URL
protocol prefix.
Note: instances of this FS are always async, even if you never use it with any async
backend.
"""
protocol = "generic" # there is no real reason to ever use a protocol with this FS
def __init__(self, default_method="default", storage_options=None, **kwargs):
"""
Parameters
----------
default_method: str (optional)
Defines how to configure backend FS instances. Options are:
- "default": instantiate like FSClass(), with no
extra arguments; this is the default instance of that FS, and can be
configured via the config system
- "generic": takes instances from the `_generic_fs` dict in this module,
which you must populate before use. Keys are by protocol
- "options": expects storage_options, a dict mapping protocol to
kwargs to use when constructing the filesystem
- "current": takes the most recently instantiated version of each FS
"""
self.method = default_method
self.st_opts = storage_options
super().__init__(**kwargs)
def _parent(self, path):
fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
return fs.unstrip_protocol(fs._parent(path))
def _strip_protocol(self, path):
# normalization only
fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
return fs.unstrip_protocol(fs._strip_protocol(path))
async def _find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
if fs.async_impl:
out = await fs._find(
path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
)
else:
out = fs.find(
path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
)
result = {}
for k, v in out.items():
v = v.copy() # don't corrupt target FS dircache
name = fs.unstrip_protocol(k)
v["name"] = name
result[name] = v
if detail:
return result
return list(result)
async def _info(self, url, **kwargs):
fs = _resolve_fs(url, self.method)
if fs.async_impl:
out = await fs._info(url, **kwargs)
else:
out = fs.info(url, **kwargs)
out = out.copy() # don't edit originals
out["name"] = fs.unstrip_protocol(out["name"])
return out
async def _ls(
self,
url,
detail=True,
**kwargs,
):
fs = _resolve_fs(url, self.method)
if fs.async_impl:
out = await fs._ls(url, detail=True, **kwargs)
else:
out = fs.ls(url, detail=True, **kwargs)
out = [o.copy() for o in out] # don't edit originals
for o in out:
o["name"] = fs.unstrip_protocol(o["name"])
if detail:
return out
else:
return [o["name"] for o in out]
async def _cat_file(
self,
url,
**kwargs,
):
fs = _resolve_fs(url, self.method)
if fs.async_impl:
return await fs._cat_file(url, **kwargs)
else:
return fs.cat_file(url, **kwargs)
async def _pipe_file(
self,
path,
value,
**kwargs,
):
fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
if fs.async_impl:
return await fs._pipe_file(path, value, **kwargs)
else:
return fs.pipe_file(path, value, **kwargs)
async def _rm(self, url, **kwargs):
urls = url
if isinstance(urls, str):
urls = [urls]
fs = _resolve_fs(urls[0], self.method)
if fs.async_impl:
await fs._rm(urls, **kwargs)
else:
fs.rm(url, **kwargs)
async def _makedirs(self, path, exist_ok=False):
logger.debug("Make dir %s", path)
fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
if fs.async_impl:
await fs._makedirs(path, exist_ok=exist_ok)
else:
fs.makedirs(path, exist_ok=exist_ok)
def rsync(self, source, destination, **kwargs):
"""Sync files between two directory trees
See `func:rsync` for more details.
"""
rsync(source, destination, fs=self, **kwargs)
async def _cp_file(
self,
url,
url2,
blocksize=2**20,
callback=DEFAULT_CALLBACK,
tempdir: str | None = None,
**kwargs,
):
fs = _resolve_fs(url, self.method)
fs2 = _resolve_fs(url2, self.method)
if fs is fs2:
# pure remote
if fs.async_impl:
return await fs._copy(url, url2, **kwargs)
else:
return fs.copy(url, url2, **kwargs)
await copy_file_op(fs, [url], fs2, [url2], tempdir, 1, on_error="raise")
async def _make_many_dirs(self, urls, exist_ok=True):
fs = _resolve_fs(urls[0], self.method)
if fs.async_impl:
coros = [fs._makedirs(u, exist_ok=exist_ok) for u in urls]
await _run_coros_in_chunks(coros)
else:
for u in urls:
fs.makedirs(u, exist_ok=exist_ok)
make_many_dirs = sync_wrapper(_make_many_dirs)
async def _copy(
self,
path1: list[str],
path2: list[str],
recursive: bool = False,
on_error: str = "ignore",
maxdepth: int | None = None,
batch_size: int | None = None,
tempdir: str | None = None,
**kwargs,
):
# TODO: special case for one FS being local, which can use get/put
# TODO: special case for one being memFS, which can use cat/pipe
if recursive:
raise NotImplementedError("Please use fsspec.generic.rsync")
path1 = [path1] if isinstance(path1, str) else path1
path2 = [path2] if isinstance(path2, str) else path2
fs = _resolve_fs(path1, self.method)
fs2 = _resolve_fs(path2, self.method)
if fs is fs2:
if fs.async_impl:
return await fs._copy(path1, path2, **kwargs)
else:
return fs.copy(path1, path2, **kwargs)
await copy_file_op(
fs, path1, fs2, path2, tempdir, batch_size, on_error=on_error
)
async def copy_file_op(
fs1, url1, fs2, url2, tempdir=None, batch_size=20, on_error="ignore"
):
import tempfile
tempdir = tempdir or tempfile.mkdtemp()
try:
coros = [
_copy_file_op(
fs1,
u1,
fs2,
u2,
os.path.join(tempdir, uuid.uuid4().hex),
)
for u1, u2 in zip(url1, url2)
]
out = await _run_coros_in_chunks(
coros, batch_size=batch_size, return_exceptions=True
)
finally:
shutil.rmtree(tempdir)
if on_error == "return":
return out
elif on_error == "raise":
for o in out:
if isinstance(o, Exception):
raise o
async def _copy_file_op(fs1, url1, fs2, url2, local, on_error="ignore"):
if fs1.async_impl:
await fs1._get_file(url1, local)
else:
fs1.get_file(url1, local)
if fs2.async_impl:
await fs2._put_file(local, url2)
else:
fs2.put_file(local, url2)
os.unlink(local)
logger.debug("Copy %s -> %s; done", url1, url2)
async def maybe_await(cor):
if inspect.iscoroutine(cor):
return await cor
else:
return cor
| GenericFileSystem |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/executors/test_local_kubernetes_executor.py | {
"start": 1314,
"end": 5385
} | class ____:
def test_supports_pickling(self):
assert not LocalKubernetesExecutor.supports_pickling
def test_supports_sentry(self):
assert not LocalKubernetesExecutor.supports_sentry
def test_is_local_default_value(self):
assert not LocalKubernetesExecutor.is_local
def test_is_production_default_value(self):
assert LocalKubernetesExecutor.is_production
def test_serve_logs_default_value(self):
assert LocalKubernetesExecutor.serve_logs
def test_cli_commands_vended(self):
assert LocalKubernetesExecutor.get_cli_commands()
def test_queued_tasks(self):
local_executor_mock = mock.MagicMock()
k8s_executor_mock = mock.MagicMock()
local_kubernetes_executor = LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
local_queued_tasks = {("dag_id", "task_id", "2020-08-30", 1): "queued_command"}
k8s_queued_tasks = {("dag_id_2", "task_id_2", "2020-08-30", 2): "queued_command"}
local_executor_mock.queued_tasks = local_queued_tasks
k8s_executor_mock.queued_tasks = k8s_queued_tasks
expected_queued_tasks = {**local_queued_tasks, **k8s_queued_tasks}
assert local_kubernetes_executor.queued_tasks == expected_queued_tasks
assert len(local_kubernetes_executor.queued_tasks) == 2
def test_running(self):
local_executor_mock = mock.MagicMock()
k8s_executor_mock = mock.MagicMock()
local_kubernetes_executor = LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
local_running_tasks = {("dag_id", "task_id", "2020-08-30", 1)}
k8s_running_tasks = {}
local_executor_mock.running = local_running_tasks
k8s_executor_mock.running = k8s_running_tasks
assert local_kubernetes_executor.running == local_running_tasks.union(k8s_running_tasks)
assert len(local_kubernetes_executor.running) == 1
def test_slots_available(self):
local_executor = LocalExecutor()
k8s_executor_mock = mock.MagicMock()
local_kubernetes_executor = LocalKubernetesExecutor(local_executor, k8s_executor_mock)
# Should be equal to Local Executor default parallelism.
assert local_kubernetes_executor.slots_available == conf.getint("core", "PARALLELISM")
def test_kubernetes_executor_knows_its_queue(self):
local_executor_mock = mock.MagicMock()
k8s_executor_mock = mock.MagicMock()
LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
assert k8s_executor_mock.kubernetes_queue == conf.get("local_kubernetes_executor", "kubernetes_queue")
def test_log_is_fetched_from_k8s_executor_only_for_k8s_queue(self):
local_executor_mock = mock.MagicMock()
k8s_executor_mock = mock.MagicMock()
LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
local_k8s_exec = LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
simple_task_instance = mock.MagicMock()
simple_task_instance.queue = conf.get("local_kubernetes_executor", "kubernetes_queue")
local_k8s_exec.get_task_log(ti=simple_task_instance, try_number=3)
k8s_executor_mock.get_task_log.assert_called_once_with(ti=simple_task_instance, try_number=3)
k8s_executor_mock.reset_mock()
simple_task_instance.queue = "test-queue"
messages, logs = local_k8s_exec.get_task_log(ti=simple_task_instance, try_number=3)
k8s_executor_mock.get_task_log.assert_not_called()
assert logs == []
assert messages == []
def test_send_callback(self):
local_executor_mock = mock.MagicMock()
k8s_executor_mock = mock.MagicMock()
local_k8s_exec = LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
local_k8s_exec.callback_sink = mock.MagicMock()
callback = CallbackRequest(full_filepath="fake")
local_k8s_exec.send_callback(callback)
local_k8s_exec.callback_sink.send.assert_called_once_with(callback)
| TestLocalKubernetesExecutor |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_set_column08.py | {
"start": 315,
"end": 1407
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("set_column08.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write("A1", "Foo", bold)
worksheet.write("B1", "Bar", italic)
worksheet.write_column("A2", data[0])
worksheet.write_column("B2", data[1])
worksheet.write_column("C2", data[2])
worksheet.set_row(12, None, None, {"hidden": True})
worksheet.set_column("F:F", None, None, {"hidden": True})
worksheet.insert_image("E12", self.image_dir + "logo.png")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pennersr__django-allauth | allauth/headless/tokens/strategies/jwt/strategy.py | {
"start": 444,
"end": 3332
} | class ____(AbstractTokenStrategy):
def get_session_token(self, request: HttpRequest) -> Optional[str]:
ret = super().get_session_token(request)
if ret:
return ret
payload = self._get_access_token(request)
if not payload:
return None
return internal.session_key_from_sid(payload["sid"])
def _get_access_token(self, request: HttpRequest):
access_token = get_authorization_credential(
request, app_settings.JWT_AUTHORIZATION_HEADER_SCHEME
)
if access_token is None:
return None
user_payload = internal.validate_access_token(access_token)
if user_payload is None:
return None
return user_payload[1]
def create_session_token(self, request: HttpRequest) -> str:
assert request.user.is_authenticated # nosec
if not request.session.session_key:
request.session.save()
key = request.session.session_key
# We did save
assert isinstance(key, str) # nosec
return key
def create_access_token_payload(
self, request: HttpRequest
) -> Optional[Dict[str, Any]]:
ret = super().create_access_token_payload(request)
if ret is not None:
ret["refresh_token"] = internal.create_refresh_token(
request.user, request.session
)
return ret
def lookup_session(self, session_token: str) -> Optional[SessionBase]:
return sessionkit.lookup_session(session_token)
def create_access_token(self, request: HttpRequest) -> Optional[str]:
claims = self.get_claims(request.user)
return internal.create_access_token(request.user, request.session, claims)
def get_claims(self, user) -> Dict[str, Any]:
"""
Returns additional claims to be included in the access token. Note that
the following claims are reserved and will be automatically set by allauth regardless of what you return:
- ``iat``
- ``exp``
- ``sid``
- ``jti``
- ``token_use``
- ``sub``
"""
return {}
def refresh_token(self, refresh_token: str) -> Optional[Tuple[str, str]]:
user_session_payload = internal.validate_refresh_token(refresh_token)
if user_session_payload is None:
return None
user, session, payload = user_session_payload
access_token = internal.create_access_token(
user, session, self.get_claims(user)
)
if app_settings.JWT_ROTATE_REFRESH_TOKEN:
internal.invalidate_refresh_token(session, payload)
next_refresh_token = internal.create_refresh_token(user, session)
else:
next_refresh_token = refresh_token
session.save()
return access_token, next_refresh_token
| JWTTokenStrategy |
python | pennersr__django-allauth | allauth/headless/mfa/response.py | {
"start": 3049,
"end": 3227
} | class ____(APIResponse):
def __init__(self, request, request_options):
super().__init__(request, data={"request_options": request_options})
| WebAuthnRequestOptionsResponse |
python | ray-project__ray | rllib/policy/tf_policy.py | {
"start": 1386,
"end": 48691
} | class ____(Policy):
"""An agent policy and loss implemented in TensorFlow.
Do not sub-class this class directly (neither should you sub-class
DynamicTFPolicy), but rather use
rllib.policy.tf_policy_template.build_tf_policy
to generate your custom tf (graph-mode or eager) Policy classes.
Extending this class enables RLlib to perform TensorFlow specific
optimizations on the policy, e.g., parallelization across gpus or
fusing multiple graphs together in the multi-agent setting.
Input tensors are typically shaped like [BATCH_SIZE, ...].
.. testcode::
:skipif: True
from ray.rllib.policy import TFPolicy
class TFPolicySubclass(TFPolicy):
...
sess, obs_input, sampled_action, loss, loss_inputs = ...
policy = TFPolicySubclass(
sess, obs_input, sampled_action, loss, loss_inputs)
print(policy.compute_actions([1, 0, 2]))
print(policy.postprocess_trajectory(SampleBatch({...})))
.. testoutput::
(array([0, 1, 1]), [], {})
SampleBatch({"action": ..., "advantages": ..., ...})
"""
# In order to create tf_policies from checkpoints, this class needs to separate
# variables into their own scopes. Normally, we would do this in the model
# catalog, but since Policy.from_state() can be called anywhere, we need to
# keep track of it here to not break the from_state API.
tf_var_creation_scope_counter = 0
@staticmethod
def next_tf_var_scope_name():
# Tracks multiple instances that are spawned from this policy via .from_state()
TFPolicy.tf_var_creation_scope_counter += 1
return f"var_scope_{TFPolicy.tf_var_creation_scope_counter}"
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
sess: "tf1.Session",
obs_input: TensorType,
sampled_action: TensorType,
loss: Union[TensorType, List[TensorType]],
loss_inputs: List[Tuple[str, TensorType]],
model: Optional[ModelV2] = None,
sampled_action_logp: Optional[TensorType] = None,
action_input: Optional[TensorType] = None,
log_likelihood: Optional[TensorType] = None,
dist_inputs: Optional[TensorType] = None,
dist_class: Optional[type] = None,
state_inputs: Optional[List[TensorType]] = None,
state_outputs: Optional[List[TensorType]] = None,
prev_action_input: Optional[TensorType] = None,
prev_reward_input: Optional[TensorType] = None,
seq_lens: Optional[TensorType] = None,
max_seq_len: int = 20,
batch_divisibility_req: int = 1,
update_ops: List[TensorType] = None,
explore: Optional[TensorType] = None,
timestep: Optional[TensorType] = None,
):
"""Initializes a Policy object.
Args:
observation_space: Observation space of the policy.
action_space: Action space of the policy.
config: Policy-specific configuration data.
sess: The TensorFlow session to use.
obs_input: Input placeholder for observations, of shape
[BATCH_SIZE, obs...].
sampled_action: Tensor for sampling an action, of shape
[BATCH_SIZE, action...]
loss: Scalar policy loss output tensor or a list thereof
(in case there is more than one loss).
loss_inputs: A (name, placeholder) tuple for each loss input
argument. Each placeholder name must
correspond to a SampleBatch column key returned by
postprocess_trajectory(), and has shape [BATCH_SIZE, data...].
These keys will be read from postprocessed sample batches and
fed into the specified placeholders during loss computation.
model: The optional ModelV2 to use for calculating actions and
losses. If not None, TFPolicy will provide functionality for
getting variables, calling the model's custom loss (if
provided), and importing weights into the model.
sampled_action_logp: log probability of the sampled action.
action_input: Input placeholder for actions for
logp/log-likelihood calculations.
log_likelihood: Tensor to calculate the log_likelihood (given
action_input and obs_input).
dist_class: An optional ActionDistribution class to use for
generating a dist object from distribution inputs.
dist_inputs: Tensor to calculate the distribution
inputs/parameters.
state_inputs: List of RNN state input Tensors.
state_outputs: List of RNN state output Tensors.
prev_action_input: placeholder for previous actions.
prev_reward_input: placeholder for previous rewards.
seq_lens: Placeholder for RNN sequence lengths, of shape
[NUM_SEQUENCES].
Note that NUM_SEQUENCES << BATCH_SIZE. See
policy/rnn_sequencing.py for more information.
max_seq_len: Max sequence length for LSTM training.
batch_divisibility_req: pad all agent experiences batches to
multiples of this value. This only has an effect if not using
a LSTM model.
update_ops: override the batchnorm update ops
to run when applying gradients. Otherwise we run all update
ops found in the current variable scope.
explore: Placeholder for `explore` parameter into call to
Exploration.get_exploration_action. Explicitly set this to
False for not creating any Exploration component.
timestep: Placeholder for the global sampling timestep.
"""
self.framework = "tf"
super().__init__(observation_space, action_space, config)
# Get devices to build the graph on.
num_gpus = self._get_num_gpus_for_policy()
gpu_ids = get_gpu_devices()
logger.info(f"Found {len(gpu_ids)} visible cuda devices.")
# Place on one or more CPU(s) when either:
# - Fake GPU mode.
# - num_gpus=0 (either set by user or we are in local_mode=True).
# - no GPUs available.
if config["_fake_gpus"] or num_gpus == 0 or not gpu_ids:
self.devices = ["/cpu:0" for _ in range(int(math.ceil(num_gpus)) or 1)]
# Place on one or more actual GPU(s), when:
# - num_gpus > 0 (set by user) AND
# - local_mode=False AND
# - actual GPUs available AND
# - non-fake GPU mode.
else:
# We are a remote worker (WORKER_MODE=1):
# GPUs should be assigned to us by ray.
if ray._private.worker._mode() == ray._private.worker.WORKER_MODE:
gpu_ids = ray.get_gpu_ids()
if len(gpu_ids) < num_gpus:
raise ValueError(
"TFPolicy was not able to find enough GPU IDs! Found "
f"{gpu_ids}, but num_gpus={num_gpus}."
)
self.devices = [f"/gpu:{i}" for i, _ in enumerate(gpu_ids) if i < num_gpus]
# Disable env-info placeholder.
if SampleBatch.INFOS in self.view_requirements:
self.view_requirements[SampleBatch.INFOS].used_for_compute_actions = False
self.view_requirements[SampleBatch.INFOS].used_for_training = False
# Optionally add `infos` to the output dataset
if self.config["output_config"].get("store_infos", False):
self.view_requirements[SampleBatch.INFOS].used_for_training = True
assert model is None or isinstance(model, (ModelV2, tf.keras.Model)), (
"Model classes for TFPolicy other than `ModelV2|tf.keras.Model` "
"not allowed! You passed in {}.".format(model)
)
self.model = model
# Auto-update model's inference view requirements, if recurrent.
if self.model is not None:
self._update_model_view_requirements_from_init_state()
# If `explore` is explicitly set to False, don't create an exploration
# component.
self.exploration = self._create_exploration() if explore is not False else None
self._sess = sess
self._obs_input = obs_input
self._prev_action_input = prev_action_input
self._prev_reward_input = prev_reward_input
self._sampled_action = sampled_action
self._is_training = self._get_is_training_placeholder()
self._is_exploring = (
explore
if explore is not None
else tf1.placeholder_with_default(True, (), name="is_exploring")
)
self._sampled_action_logp = sampled_action_logp
self._sampled_action_prob = (
tf.math.exp(self._sampled_action_logp)
if self._sampled_action_logp is not None
else None
)
self._action_input = action_input # For logp calculations.
self._dist_inputs = dist_inputs
self.dist_class = dist_class
self._cached_extra_action_out = None
self._state_inputs = state_inputs or []
self._state_outputs = state_outputs or []
self._seq_lens = seq_lens
self._max_seq_len = max_seq_len
if self._state_inputs and self._seq_lens is None:
raise ValueError(
"seq_lens tensor must be given if state inputs are defined"
)
self._batch_divisibility_req = batch_divisibility_req
self._update_ops = update_ops
self._apply_op = None
self._stats_fetches = {}
self._timestep = (
timestep
if timestep is not None
else tf1.placeholder_with_default(
tf.zeros((), dtype=tf.int64), (), name="timestep"
)
)
self._optimizers: List[LocalOptimizer] = []
# Backward compatibility and for some code shared with tf-eager Policy.
self._optimizer = None
self._grads_and_vars: Union[ModelGradients, List[ModelGradients]] = []
self._grads: Union[ModelGradients, List[ModelGradients]] = []
# Policy tf-variables (weights), whose values to get/set via
# get_weights/set_weights.
self._variables = None
# Local optimizer(s)' tf-variables (e.g. state vars for Adam).
# Will be stored alongside `self._variables` when checkpointing.
self._optimizer_variables: Optional[TensorFlowVariables] = None
# The loss tf-op(s). Number of losses must match number of optimizers.
self._losses = []
# Backward compatibility (in case custom child TFPolicies access this
# property).
self._loss = None
# A batch dict passed into loss function as input.
self._loss_input_dict = {}
losses = force_list(loss)
if len(losses) > 0:
self._initialize_loss(losses, loss_inputs)
# The log-likelihood calculator op.
self._log_likelihood = log_likelihood
if (
self._log_likelihood is None
and self._dist_inputs is not None
and self.dist_class is not None
):
self._log_likelihood = self.dist_class(self._dist_inputs, self.model).logp(
self._action_input
)
@override(Policy)
def compute_actions_from_input_dict(
self,
input_dict: Union[SampleBatch, Dict[str, TensorType]],
explore: bool = None,
timestep: Optional[int] = None,
episode=None,
**kwargs,
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
# Switch off is_training flag in our batch.
if isinstance(input_dict, SampleBatch):
input_dict.set_training(False)
else:
# Deprecated dict input.
input_dict["is_training"] = False
builder = _TFRunBuilder(self.get_session(), "compute_actions_from_input_dict")
obs_batch = input_dict[SampleBatch.OBS]
to_fetch = self._build_compute_actions(
builder, input_dict=input_dict, explore=explore, timestep=timestep
)
# Execute session run to get action (and other fetches).
fetched = builder.get(to_fetch)
# Update our global timestep by the batch size.
self.global_timestep += (
len(obs_batch)
if isinstance(obs_batch, list)
else len(input_dict)
if isinstance(input_dict, SampleBatch)
else obs_batch.shape[0]
)
return fetched
@override(Policy)
def compute_actions(
self,
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorType], TensorType] = None,
prev_reward_batch: Union[List[TensorType], TensorType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes=None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs,
):
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
builder = _TFRunBuilder(self.get_session(), "compute_actions")
input_dict = {SampleBatch.OBS: obs_batch, "is_training": False}
if state_batches:
for i, s in enumerate(state_batches):
input_dict[f"state_in_{i}"] = s
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch
to_fetch = self._build_compute_actions(
builder, input_dict=input_dict, explore=explore, timestep=timestep
)
# Execute session run to get action (and other fetches).
fetched = builder.get(to_fetch)
# Update our global timestep by the batch size.
self.global_timestep += (
len(obs_batch)
if isinstance(obs_batch, list)
else tree.flatten(obs_batch)[0].shape[0]
)
return fetched
@override(Policy)
def compute_log_likelihoods(
self,
actions: Union[List[TensorType], TensorType],
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Optional[Union[List[TensorType], TensorType]] = None,
prev_reward_batch: Optional[Union[List[TensorType], TensorType]] = None,
actions_normalized: bool = True,
**kwargs,
) -> TensorType:
if self._log_likelihood is None:
raise ValueError(
"Cannot compute log-prob/likelihood w/o a self._log_likelihood op!"
)
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(
explore=False, tf_sess=self.get_session()
)
builder = _TFRunBuilder(self.get_session(), "compute_log_likelihoods")
# Normalize actions if necessary.
if actions_normalized is False and self.config["normalize_actions"]:
actions = normalize_action(actions, self.action_space_struct)
# Feed actions (for which we want logp values) into graph.
builder.add_feed_dict({self._action_input: actions})
# Feed observations.
builder.add_feed_dict({self._obs_input: obs_batch})
# Internal states.
state_batches = state_batches or []
if len(self._state_inputs) != len(state_batches):
raise ValueError(
"Must pass in RNN state batches for placeholders {}, got {}".format(
self._state_inputs, state_batches
)
)
builder.add_feed_dict(dict(zip(self._state_inputs, state_batches)))
if state_batches:
builder.add_feed_dict({self._seq_lens: np.ones(len(obs_batch))})
# Prev-a and r.
if self._prev_action_input is not None and prev_action_batch is not None:
builder.add_feed_dict({self._prev_action_input: prev_action_batch})
if self._prev_reward_input is not None and prev_reward_batch is not None:
builder.add_feed_dict({self._prev_reward_input: prev_reward_batch})
# Fetch the log_likelihoods output and return.
fetches = builder.add_fetches([self._log_likelihood])
return builder.get(fetches)[0]
@override(Policy)
def learn_on_batch(self, postprocessed_batch: SampleBatch) -> Dict[str, TensorType]:
assert self.loss_initialized()
# Switch on is_training flag in our batch.
postprocessed_batch.set_training(True)
builder = _TFRunBuilder(self.get_session(), "learn_on_batch")
# Callback handling.
learn_stats = {}
self.callbacks.on_learn_on_batch(
policy=self, train_batch=postprocessed_batch, result=learn_stats
)
fetches = self._build_learn_on_batch(builder, postprocessed_batch)
stats = builder.get(fetches)
self.num_grad_updates += 1
stats.update(
{
"custom_metrics": learn_stats,
NUM_AGENT_STEPS_TRAINED: postprocessed_batch.count,
NUM_GRAD_UPDATES_LIFETIME: self.num_grad_updates,
# -1, b/c we have to measure this diff before we do the update above.
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY: (
self.num_grad_updates
- 1
- (postprocessed_batch.num_grad_updates or 0)
),
}
)
return stats
@override(Policy)
def compute_gradients(
self, postprocessed_batch: SampleBatch
) -> Tuple[ModelGradients, Dict[str, TensorType]]:
assert self.loss_initialized()
# Switch on is_training flag in our batch.
postprocessed_batch.set_training(True)
builder = _TFRunBuilder(self.get_session(), "compute_gradients")
fetches = self._build_compute_gradients(builder, postprocessed_batch)
return builder.get(fetches)
@staticmethod
def _tf1_from_state_helper(state: PolicyState) -> "Policy":
"""Recovers a TFPolicy from a state object.
The `state` of an instantiated TFPolicy can be retrieved by calling its
`get_state` method. Is meant to be used by the Policy.from_state() method to
aid with tracking variable creation.
Args:
state: The state to recover a new TFPolicy instance from.
Returns:
A new TFPolicy instance.
"""
serialized_pol_spec: Optional[dict] = state.get("policy_spec")
if serialized_pol_spec is None:
raise ValueError(
"No `policy_spec` key was found in given `state`! "
"Cannot create new Policy."
)
pol_spec = PolicySpec.deserialize(serialized_pol_spec)
with tf1.variable_scope(TFPolicy.next_tf_var_scope_name()):
# Create the new policy.
new_policy = pol_spec.policy_class(
# Note(jungong) : we are intentionally not using keyward arguments here
# because some policies name the observation space parameter obs_space,
# and some others name it observation_space.
pol_spec.observation_space,
pol_spec.action_space,
pol_spec.config,
)
# Set the new policy's state (weights, optimizer vars, exploration state,
# etc..).
new_policy.set_state(state)
# Return the new policy.
return new_policy
@override(Policy)
def apply_gradients(self, gradients: ModelGradients) -> None:
assert self.loss_initialized()
builder = _TFRunBuilder(self.get_session(), "apply_gradients")
fetches = self._build_apply_gradients(builder, gradients)
builder.get(fetches)
@override(Policy)
def get_weights(self) -> Union[Dict[str, TensorType], List[TensorType]]:
return self._variables.get_weights()
@override(Policy)
def set_weights(self, weights) -> None:
return self._variables.set_weights(weights)
@override(Policy)
def get_exploration_state(self) -> Dict[str, TensorType]:
return self.exploration.get_state(sess=self.get_session())
@Deprecated(new="get_exploration_state", error=True)
def get_exploration_info(self) -> Dict[str, TensorType]:
return self.get_exploration_state()
@override(Policy)
def is_recurrent(self) -> bool:
return len(self._state_inputs) > 0
@override(Policy)
def num_state_tensors(self) -> int:
return len(self._state_inputs)
@override(Policy)
def get_state(self) -> PolicyState:
# For tf Policies, return Policy weights and optimizer var values.
state = super().get_state()
if len(self._optimizer_variables.variables) > 0:
state["_optimizer_variables"] = self.get_session().run(
self._optimizer_variables.variables
)
# Add exploration state.
state["_exploration_state"] = self.exploration.get_state(self.get_session())
return state
@override(Policy)
def set_state(self, state: PolicyState) -> None:
# Set optimizer vars first.
optimizer_vars = state.get("_optimizer_variables", None)
if optimizer_vars is not None:
self._optimizer_variables.set_weights(optimizer_vars)
# Set exploration's state.
if hasattr(self, "exploration") and "_exploration_state" in state:
self.exploration.set_state(
state=state["_exploration_state"], sess=self.get_session()
)
# Restore global timestep.
self.global_timestep = state["global_timestep"]
# Then the Policy's (NN) weights and connectors.
super().set_state(state)
@override(Policy)
def export_model(self, export_dir: str, onnx: Optional[int] = None) -> None:
"""Export tensorflow graph to export_dir for serving."""
if onnx:
try:
import tf2onnx
except ImportError as e:
raise RuntimeError(
"Converting a TensorFlow model to ONNX requires "
"`tf2onnx` to be installed. Install with "
"`pip install tf2onnx`."
) from e
with self.get_session().graph.as_default():
signature_def_map = self._build_signature_def()
sd = signature_def_map[
tf1.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY # noqa: E501
]
inputs = [v.name for k, v in sd.inputs.items()]
outputs = [v.name for k, v in sd.outputs.items()]
from tf2onnx import tf_loader
frozen_graph_def = tf_loader.freeze_session(
self.get_session(), input_names=inputs, output_names=outputs
)
with tf1.Session(graph=tf.Graph()) as session:
tf.import_graph_def(frozen_graph_def, name="")
g = tf2onnx.tfonnx.process_tf_graph(
session.graph,
input_names=inputs,
output_names=outputs,
inputs_as_nchw=inputs,
)
model_proto = g.make_model("onnx_model")
tf2onnx.utils.save_onnx_model(
export_dir, "model", feed_dict={}, model_proto=model_proto
)
# Save the tf.keras.Model (architecture and weights, so it can be retrieved
# w/o access to the original (custom) Model or Policy code).
elif (
hasattr(self, "model")
and hasattr(self.model, "base_model")
and isinstance(self.model.base_model, tf.keras.Model)
):
with self.get_session().graph.as_default():
try:
self.model.base_model.save(filepath=export_dir, save_format="tf")
except Exception:
logger.warning(ERR_MSG_TF_POLICY_CANNOT_SAVE_KERAS_MODEL)
else:
logger.warning(ERR_MSG_TF_POLICY_CANNOT_SAVE_KERAS_MODEL)
@override(Policy)
def import_model_from_h5(self, import_file: str) -> None:
"""Imports weights into tf model."""
if self.model is None:
raise NotImplementedError("No `self.model` to import into!")
# Make sure the session is the right one (see issue #7046).
with self.get_session().graph.as_default():
with self.get_session().as_default():
return self.model.import_from_h5(import_file)
@override(Policy)
def get_session(self) -> Optional["tf1.Session"]:
"""Returns a reference to the TF session for this policy."""
return self._sess
def variables(self):
"""Return the list of all savable variables for this policy."""
if self.model is None:
raise NotImplementedError("No `self.model` to get variables for!")
elif isinstance(self.model, tf.keras.Model):
return self.model.variables
else:
return self.model.variables()
def get_placeholder(self, name) -> "tf1.placeholder":
"""Returns the given action or loss input placeholder by name.
If the loss has not been initialized and a loss input placeholder is
requested, an error is raised.
Args:
name: The name of the placeholder to return. One of
SampleBatch.CUR_OBS|PREV_ACTION/REWARD or a valid key from
`self._loss_input_dict`.
Returns:
tf1.placeholder: The placeholder under the given str key.
"""
if name == SampleBatch.CUR_OBS:
return self._obs_input
elif name == SampleBatch.PREV_ACTIONS:
return self._prev_action_input
elif name == SampleBatch.PREV_REWARDS:
return self._prev_reward_input
assert self._loss_input_dict, (
"You need to populate `self._loss_input_dict` before "
"`get_placeholder()` can be called"
)
return self._loss_input_dict[name]
def loss_initialized(self) -> bool:
"""Returns whether the loss term(s) have been initialized."""
return len(self._losses) > 0
def _initialize_loss(
self, losses: List[TensorType], loss_inputs: List[Tuple[str, TensorType]]
) -> None:
"""Initializes the loss op from given loss tensor and placeholders.
Args:
loss (List[TensorType]): The list of loss ops returned by some
loss function.
loss_inputs (List[Tuple[str, TensorType]]): The list of Tuples:
(name, tf1.placeholders) needed for calculating the loss.
"""
self._loss_input_dict = dict(loss_inputs)
self._loss_input_dict_no_rnn = {
k: v
for k, v in self._loss_input_dict.items()
if (v not in self._state_inputs and v != self._seq_lens)
}
for i, ph in enumerate(self._state_inputs):
self._loss_input_dict["state_in_{}".format(i)] = ph
if self.model and not isinstance(self.model, tf.keras.Model):
self._losses = force_list(
self.model.custom_loss(losses, self._loss_input_dict)
)
self._stats_fetches.update({"model": self.model.metrics()})
else:
self._losses = losses
# Backward compatibility.
self._loss = self._losses[0] if self._losses is not None else None
if not self._optimizers:
self._optimizers = force_list(self.optimizer())
# Backward compatibility.
self._optimizer = self._optimizers[0] if self._optimizers else None
# Supporting more than one loss/optimizer.
if self.config["_tf_policy_handles_more_than_one_loss"]:
self._grads_and_vars = []
self._grads = []
for group in self.gradients(self._optimizers, self._losses):
g_and_v = [(g, v) for (g, v) in group if g is not None]
self._grads_and_vars.append(g_and_v)
self._grads.append([g for (g, _) in g_and_v])
# Only one optimizer and and loss term.
else:
self._grads_and_vars = [
(g, v)
for (g, v) in self.gradients(self._optimizer, self._loss)
if g is not None
]
self._grads = [g for (g, _) in self._grads_and_vars]
if self.model:
self._variables = TensorFlowVariables(
[], self.get_session(), self.variables()
)
# Gather update ops for any batch norm layers.
if len(self.devices) <= 1:
if not self._update_ops:
self._update_ops = tf1.get_collection(
tf1.GraphKeys.UPDATE_OPS, scope=tf1.get_variable_scope().name
)
if self._update_ops:
logger.info(
"Update ops to run on apply gradient: {}".format(self._update_ops)
)
with tf1.control_dependencies(self._update_ops):
self._apply_op = self.build_apply_op(
optimizer=self._optimizers
if self.config["_tf_policy_handles_more_than_one_loss"]
else self._optimizer,
grads_and_vars=self._grads_and_vars,
)
if log_once("loss_used"):
logger.debug(
"These tensors were used in the loss functions:"
f"\n{summarize(self._loss_input_dict)}\n"
)
self.get_session().run(tf1.global_variables_initializer())
# TensorFlowVariables holding a flat list of all our optimizers'
# variables.
self._optimizer_variables = TensorFlowVariables(
[v for o in self._optimizers for v in o.variables()], self.get_session()
)
def copy(self, existing_inputs: List[Tuple[str, "tf1.placeholder"]]) -> "TFPolicy":
"""Creates a copy of self using existing input placeholders.
Optional: Only required to work with the multi-GPU optimizer.
Args:
existing_inputs (List[Tuple[str, tf1.placeholder]]): Dict mapping
names (str) to tf1.placeholders to re-use (share) with the
returned copy of self.
Returns:
TFPolicy: A copy of self.
"""
raise NotImplementedError
def extra_compute_action_feed_dict(self) -> Dict[TensorType, TensorType]:
"""Extra dict to pass to the compute actions session run.
Returns:
Dict[TensorType, TensorType]: A feed dict to be added to the
feed_dict passed to the compute_actions session.run() call.
"""
return {}
def extra_compute_action_fetches(self) -> Dict[str, TensorType]:
# Cache graph fetches for action computation for better
# performance.
# This function is called every time the static graph is run
# to compute actions.
if not self._cached_extra_action_out:
self._cached_extra_action_out = self.extra_action_out_fn()
return self._cached_extra_action_out
def extra_action_out_fn(self) -> Dict[str, TensorType]:
"""Extra values to fetch and return from compute_actions().
By default we return action probability/log-likelihood info
and action distribution inputs (if present).
Returns:
Dict[str, TensorType]: An extra fetch-dict to be passed to and
returned from the compute_actions() call.
"""
extra_fetches = {}
# Action-logp and action-prob.
if self._sampled_action_logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = self._sampled_action_prob
extra_fetches[SampleBatch.ACTION_LOGP] = self._sampled_action_logp
# Action-dist inputs.
if self._dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = self._dist_inputs
return extra_fetches
def extra_compute_grad_feed_dict(self) -> Dict[TensorType, TensorType]:
"""Extra dict to pass to the compute gradients session run.
Returns:
Dict[TensorType, TensorType]: Extra feed_dict to be passed to the
compute_gradients Session.run() call.
"""
return {} # e.g, kl_coeff
def extra_compute_grad_fetches(self) -> Dict[str, any]:
"""Extra values to fetch and return from compute_gradients().
Returns:
Dict[str, any]: Extra fetch dict to be added to the fetch dict
of the compute_gradients Session.run() call.
"""
return {LEARNER_STATS_KEY: {}} # e.g, stats, td error, etc.
def optimizer(self) -> "tf.keras.optimizers.Optimizer":
"""TF optimizer to use for policy optimization.
Returns:
tf.keras.optimizers.Optimizer: The local optimizer to use for this
Policy's Model.
"""
if hasattr(self, "config") and "lr" in self.config:
return tf1.train.AdamOptimizer(learning_rate=self.config["lr"])
else:
return tf1.train.AdamOptimizer()
def gradients(
self,
optimizer: Union[LocalOptimizer, List[LocalOptimizer]],
loss: Union[TensorType, List[TensorType]],
) -> Union[List[ModelGradients], List[List[ModelGradients]]]:
"""Override this for a custom gradient computation behavior.
Args:
optimizer (Union[LocalOptimizer, List[LocalOptimizer]]): A single
LocalOptimizer of a list thereof to use for gradient
calculations. If more than one optimizer given, the number of
optimizers must match the number of losses provided.
loss (Union[TensorType, List[TensorType]]): A single loss term
or a list thereof to use for gradient calculations.
If more than one loss given, the number of loss terms must
match the number of optimizers provided.
Returns:
Union[List[ModelGradients], List[List[ModelGradients]]]: List of
ModelGradients (grads and vars OR just grads) OR List of List
of ModelGradients in case we have more than one
optimizer/loss.
"""
optimizers = force_list(optimizer)
losses = force_list(loss)
# We have more than one optimizers and loss terms.
if self.config["_tf_policy_handles_more_than_one_loss"]:
grads = []
for optim, loss_ in zip(optimizers, losses):
grads.append(optim.compute_gradients(loss_))
# We have only one optimizer and one loss term.
else:
return optimizers[0].compute_gradients(losses[0])
def build_apply_op(
self,
optimizer: Union[LocalOptimizer, List[LocalOptimizer]],
grads_and_vars: Union[ModelGradients, List[ModelGradients]],
) -> "tf.Operation":
"""Override this for a custom gradient apply computation behavior.
Args:
optimizer (Union[LocalOptimizer, List[LocalOptimizer]]): The local
tf optimizer to use for applying the grads and vars.
grads_and_vars (Union[ModelGradients, List[ModelGradients]]): List
of tuples with grad values and the grad-value's corresponding
tf.variable in it.
Returns:
tf.Operation: The tf op that applies all computed gradients
(`grads_and_vars`) to the model(s) via the given optimizer(s).
"""
optimizers = force_list(optimizer)
# We have more than one optimizers and loss terms.
if self.config["_tf_policy_handles_more_than_one_loss"]:
ops = []
for i, optim in enumerate(optimizers):
# Specify global_step (e.g. for TD3 which needs to count the
# num updates that have happened).
ops.append(
optim.apply_gradients(
grads_and_vars[i],
global_step=tf1.train.get_or_create_global_step(),
)
)
return tf.group(ops)
# We have only one optimizer and one loss term.
else:
return optimizers[0].apply_gradients(
grads_and_vars, global_step=tf1.train.get_or_create_global_step()
)
def _get_is_training_placeholder(self):
"""Get the placeholder for _is_training, i.e., for batch norm layers.
This can be called safely before __init__ has run.
"""
if not hasattr(self, "_is_training"):
self._is_training = tf1.placeholder_with_default(
False, (), name="is_training"
)
return self._is_training
def _debug_vars(self):
if log_once("grad_vars"):
if self.config["_tf_policy_handles_more_than_one_loss"]:
for group in self._grads_and_vars:
for _, v in group:
logger.info("Optimizing variable {}".format(v))
else:
for _, v in self._grads_and_vars:
logger.info("Optimizing variable {}".format(v))
def _extra_input_signature_def(self):
"""Extra input signatures to add when exporting tf model.
Inferred from extra_compute_action_feed_dict()
"""
feed_dict = self.extra_compute_action_feed_dict()
return {
k.name: tf1.saved_model.utils.build_tensor_info(k) for k in feed_dict.keys()
}
def _extra_output_signature_def(self):
"""Extra output signatures to add when exporting tf model.
Inferred from extra_compute_action_fetches()
"""
fetches = self.extra_compute_action_fetches()
return {
k: tf1.saved_model.utils.build_tensor_info(fetches[k])
for k in fetches.keys()
}
def _build_signature_def(self):
"""Build signature def map for tensorflow SavedModelBuilder."""
# build input signatures
input_signature = self._extra_input_signature_def()
input_signature["observations"] = tf1.saved_model.utils.build_tensor_info(
self._obs_input
)
if self._seq_lens is not None:
input_signature[
SampleBatch.SEQ_LENS
] = tf1.saved_model.utils.build_tensor_info(self._seq_lens)
if self._prev_action_input is not None:
input_signature["prev_action"] = tf1.saved_model.utils.build_tensor_info(
self._prev_action_input
)
if self._prev_reward_input is not None:
input_signature["prev_reward"] = tf1.saved_model.utils.build_tensor_info(
self._prev_reward_input
)
input_signature["is_training"] = tf1.saved_model.utils.build_tensor_info(
self._is_training
)
if self._timestep is not None:
input_signature["timestep"] = tf1.saved_model.utils.build_tensor_info(
self._timestep
)
for state_input in self._state_inputs:
input_signature[state_input.name] = tf1.saved_model.utils.build_tensor_info(
state_input
)
# build output signatures
output_signature = self._extra_output_signature_def()
for i, a in enumerate(tf.nest.flatten(self._sampled_action)):
output_signature[
"actions_{}".format(i)
] = tf1.saved_model.utils.build_tensor_info(a)
for state_output in self._state_outputs:
output_signature[
state_output.name
] = tf1.saved_model.utils.build_tensor_info(state_output)
signature_def = tf1.saved_model.signature_def_utils.build_signature_def(
input_signature,
output_signature,
tf1.saved_model.signature_constants.PREDICT_METHOD_NAME,
)
signature_def_key = (
tf1.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
)
signature_def_map = {signature_def_key: signature_def}
return signature_def_map
def _build_compute_actions(
self,
builder,
*,
input_dict=None,
obs_batch=None,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
):
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
# Call the exploration before_compute_actions hook.
self.exploration.before_compute_actions(
timestep=timestep, explore=explore, tf_sess=self.get_session()
)
builder.add_feed_dict(self.extra_compute_action_feed_dict())
# `input_dict` given: Simply build what's in that dict.
if hasattr(self, "_input_dict"):
for key, value in input_dict.items():
if key in self._input_dict:
# Handle complex/nested spaces as well.
tree.map_structure(
lambda k, v: builder.add_feed_dict({k: v}),
self._input_dict[key],
value,
)
# For policies that inherit directly from TFPolicy.
else:
builder.add_feed_dict({self._obs_input: input_dict[SampleBatch.OBS]})
if SampleBatch.PREV_ACTIONS in input_dict:
builder.add_feed_dict(
{self._prev_action_input: input_dict[SampleBatch.PREV_ACTIONS]}
)
if SampleBatch.PREV_REWARDS in input_dict:
builder.add_feed_dict(
{self._prev_reward_input: input_dict[SampleBatch.PREV_REWARDS]}
)
state_batches = []
i = 0
while "state_in_{}".format(i) in input_dict:
state_batches.append(input_dict["state_in_{}".format(i)])
i += 1
builder.add_feed_dict(dict(zip(self._state_inputs, state_batches)))
if "state_in_0" in input_dict and SampleBatch.SEQ_LENS not in input_dict:
builder.add_feed_dict(
{self._seq_lens: np.ones(len(input_dict["state_in_0"]))}
)
builder.add_feed_dict({self._is_exploring: explore})
if timestep is not None:
builder.add_feed_dict({self._timestep: timestep})
# Determine, what exactly to fetch from the graph.
to_fetch = (
[self._sampled_action]
+ self._state_outputs
+ [self.extra_compute_action_fetches()]
)
# Add the ops to fetch for the upcoming session call.
fetches = builder.add_fetches(to_fetch)
return fetches[0], fetches[1:-1], fetches[-1]
def _build_compute_gradients(self, builder, postprocessed_batch):
self._debug_vars()
builder.add_feed_dict(self.extra_compute_grad_feed_dict())
builder.add_feed_dict(
self._get_loss_inputs_dict(postprocessed_batch, shuffle=False)
)
fetches = builder.add_fetches([self._grads, self._get_grad_and_stats_fetches()])
return fetches[0], fetches[1]
def _build_apply_gradients(self, builder, gradients):
if len(gradients) != len(self._grads):
raise ValueError(
"Unexpected number of gradients to apply, got {} for {}".format(
gradients, self._grads
)
)
builder.add_feed_dict({self._is_training: True})
builder.add_feed_dict(dict(zip(self._grads, gradients)))
fetches = builder.add_fetches([self._apply_op])
return fetches[0]
def _build_learn_on_batch(self, builder, postprocessed_batch):
self._debug_vars()
builder.add_feed_dict(self.extra_compute_grad_feed_dict())
builder.add_feed_dict(
self._get_loss_inputs_dict(postprocessed_batch, shuffle=False)
)
fetches = builder.add_fetches(
[
self._apply_op,
self._get_grad_and_stats_fetches(),
]
)
return fetches[1]
def _get_grad_and_stats_fetches(self):
fetches = self.extra_compute_grad_fetches()
if LEARNER_STATS_KEY not in fetches:
raise ValueError("Grad fetches should contain 'stats': {...} entry")
if self._stats_fetches:
fetches[LEARNER_STATS_KEY] = dict(
self._stats_fetches, **fetches[LEARNER_STATS_KEY]
)
return fetches
def _get_loss_inputs_dict(self, train_batch: SampleBatch, shuffle: bool):
"""Return a feed dict from a batch.
Args:
train_batch: batch of data to derive inputs from.
shuffle: whether to shuffle batch sequences. Shuffle may
be done in-place. This only makes sense if you're further
applying minibatch SGD after getting the outputs.
Returns:
Feed dict of data.
"""
# Get batch ready for RNNs, if applicable.
if not isinstance(train_batch, SampleBatch) or not train_batch.zero_padded:
pad_batch_to_sequences_of_same_size(
train_batch,
max_seq_len=self._max_seq_len,
shuffle=shuffle,
batch_divisibility_req=self._batch_divisibility_req,
feature_keys=list(self._loss_input_dict_no_rnn.keys()),
view_requirements=self.view_requirements,
)
# Mark the batch as "is_training" so the Model can use this
# information.
train_batch.set_training(True)
# Build the feed dict from the batch.
feed_dict = {}
for key, placeholders in self._loss_input_dict.items():
a = tree.map_structure(
lambda ph, v: feed_dict.__setitem__(ph, v),
placeholders,
train_batch[key],
)
del a
state_keys = ["state_in_{}".format(i) for i in range(len(self._state_inputs))]
for key in state_keys:
feed_dict[self._loss_input_dict[key]] = train_batch[key]
if state_keys:
feed_dict[self._seq_lens] = train_batch[SampleBatch.SEQ_LENS]
return feed_dict
| TFPolicy |
python | pandas-dev__pandas | pandas/tests/dtypes/test_inference.py | {
"start": 66809,
"end": 72738
} | class ____:
def test_is_scalar_builtin_scalars(self):
assert is_scalar(None)
assert is_scalar(True)
assert is_scalar(False)
assert is_scalar(Fraction())
assert is_scalar(0.0)
assert is_scalar(1)
assert is_scalar(complex(2))
assert is_scalar(float("NaN"))
assert is_scalar(np.nan)
assert is_scalar("foobar")
assert is_scalar(b"foobar")
assert is_scalar(datetime(2014, 1, 1))
assert is_scalar(date(2014, 1, 1))
assert is_scalar(time(12, 0))
assert is_scalar(timedelta(hours=1))
assert is_scalar(pd.NaT)
assert is_scalar(pd.NA)
def test_is_scalar_builtin_nonscalars(self):
assert not is_scalar({})
assert not is_scalar([])
assert not is_scalar([1])
assert not is_scalar(())
assert not is_scalar((1,))
assert not is_scalar(slice(None))
assert not is_scalar(Ellipsis)
def test_is_scalar_numpy_array_scalars(self):
assert is_scalar(np.int64(1))
assert is_scalar(np.float64(1.0))
assert is_scalar(np.int32(1))
assert is_scalar(np.complex64(2))
assert is_scalar(np.object_("foobar"))
assert is_scalar(np.str_("foobar"))
assert is_scalar(np.bytes_(b"foobar"))
assert is_scalar(np.datetime64("2014-01-01"))
assert is_scalar(np.timedelta64(1, "h"))
@pytest.mark.parametrize(
"zerodim",
[
1,
"foobar",
np.datetime64("2014-01-01"),
np.timedelta64(1, "h"),
np.datetime64("NaT"),
],
)
def test_is_scalar_numpy_zerodim_arrays(self, zerodim):
zerodim = np.array(zerodim)
assert not is_scalar(zerodim)
assert is_scalar(lib.item_from_zerodim(zerodim))
@pytest.mark.parametrize("arr", [np.array([]), np.array([[]])])
def test_is_scalar_numpy_arrays(self, arr):
assert not is_scalar(arr)
assert not is_scalar(MockNumpyLikeArray(arr))
def test_is_scalar_pandas_scalars(self):
assert is_scalar(Timestamp("2014-01-01"))
assert is_scalar(Timedelta(hours=1))
assert is_scalar(Period("2014-01-01"))
assert is_scalar(Interval(left=0, right=1))
assert is_scalar(DateOffset(days=1))
assert is_scalar(pd.offsets.Minute(3))
def test_is_scalar_pandas_containers(self):
assert not is_scalar(Series(dtype=object))
assert not is_scalar(Series([1]))
assert not is_scalar(DataFrame())
assert not is_scalar(DataFrame([[1]]))
assert not is_scalar(Index([]))
assert not is_scalar(Index([1]))
assert not is_scalar(Categorical([]))
assert not is_scalar(DatetimeIndex([])._data)
assert not is_scalar(TimedeltaIndex([])._data)
assert not is_scalar(DatetimeIndex([])._data.to_period("D"))
assert not is_scalar(pd.array([1, 2, 3]))
def test_is_scalar_number(self):
# Number() is not recognized by PyNumber_Check, so by extension
# is not recognized by is_scalar, but instances of non-abstract
# subclasses are.
class Numeric(Number):
def __init__(self, value) -> None:
self.value = value
def __int__(self) -> int:
return self.value
num = Numeric(1)
assert is_scalar(num)
@pytest.mark.parametrize("unit", ["ms", "us", "ns"])
def test_datetimeindex_from_empty_datetime64_array(unit):
idx = DatetimeIndex(np.array([], dtype=f"datetime64[{unit}]"))
assert len(idx) == 0
def test_nan_to_nat_conversions():
df = DataFrame(
{"A": np.asarray(range(10), dtype="float64"), "B": Timestamp("20010101")}
)
df.iloc[3:6, :] = np.nan
result = df.loc[4, "B"]
assert result is pd.NaT
s = df["B"].copy()
s[8:9] = np.nan
assert s[8] is pd.NaT
@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
@pytest.mark.parametrize("spmatrix", ["bsr", "coo", "csc", "csr", "dia", "dok", "lil"])
def test_is_scipy_sparse(spmatrix):
sparse = pytest.importorskip("scipy.sparse")
klass = getattr(sparse, spmatrix + "_matrix")
assert is_scipy_sparse(klass([[0, 1]]))
assert not is_scipy_sparse(np.array([1]))
def test_ensure_int32():
values = np.arange(10, dtype=np.int32)
result = ensure_int32(values)
assert result.dtype == np.int32
values = np.arange(10, dtype=np.int64)
result = ensure_int32(values)
assert result.dtype == np.int32
@pytest.mark.parametrize(
"right,result",
[
(0, np.uint8),
(-1, np.int16),
(300, np.uint16),
# For floats, we just upcast directly to float64 instead of trying to
# find a smaller floating dtype
(300.0, np.uint16), # for integer floats, we convert them to ints
(300.1, np.float64),
(np.int16(300), np.int16 if np_version_gt2 else np.uint16),
],
)
def test_find_result_type_uint_int(right, result):
left_dtype = np.dtype("uint8")
assert find_result_type(left_dtype, right) == result
@pytest.mark.parametrize(
"right,result",
[
(0, np.int8),
(-1, np.int8),
(300, np.int16),
# For floats, we just upcast directly to float64 instead of trying to
# find a smaller floating dtype
(300.0, np.int16), # for integer floats, we convert them to ints
(300.1, np.float64),
(np.int16(300), np.int16),
],
)
def test_find_result_type_int_int(right, result):
left_dtype = np.dtype("int8")
assert find_result_type(left_dtype, right) == result
@pytest.mark.parametrize(
"right,result",
[
(300.0, np.float64),
(np.float32(300), np.float32),
],
)
def test_find_result_type_floats(right, result):
left_dtype = np.dtype("float16")
assert find_result_type(left_dtype, right) == result
| TestIsScalar |
python | django__django | django/contrib/postgres/fields/array.py | {
"start": 11027,
"end": 11559
} | class ____(In):
def get_prep_lookup(self):
values = super().get_prep_lookup()
if hasattr(values, "resolve_expression"):
return values
# In.process_rhs() expects values to be hashable, so convert lists
# to tuples.
prepared_values = []
for value in values:
if hasattr(value, "resolve_expression"):
prepared_values.append(value)
else:
prepared_values.append(tuple(value))
return prepared_values
| ArrayInLookup |
python | great-expectations__great_expectations | tests/integration/test_utils/data_source_config/spark_filesystem_csv.py | {
"start": 2173,
"end": 4560
} | class ____(
BatchTestSetup[SparkFilesystemCsvDatasourceTestConfig, CSVAsset]
):
def __init__(
self,
config: SparkFilesystemCsvDatasourceTestConfig,
data: pd.DataFrame,
base_dir: pathlib.Path,
context: AbstractDataContext,
) -> None:
super().__init__(config=config, data=data, context=context)
self._base_dir = base_dir
@property
def _spark_session(self) -> "pyspark.SparkSession":
return SparkDFExecutionEngine.get_or_create_spark_session()
@property
def _spark_schema(self) -> Union["pyspark_types.StructType", None]:
from great_expectations.compatibility.pyspark import types as pyspark_types
column_types = self.config.column_types or {}
struct_fields = [
pyspark_types.StructField(column_name, column_type())
for column_name, column_type in column_types.items()
]
return pyspark_types.StructType(struct_fields) if struct_fields else None
@property
def _spark_data(self) -> "pyspark.DataFrame":
if self._spark_schema:
return self._spark_session.createDataFrame(self.data, schema=self._spark_schema)
else:
return self._spark_session.createDataFrame(self.data)
@override
def make_asset(self) -> CSVAsset:
infer_schema = self._spark_schema is None
return self.context.data_sources.add_spark_filesystem(
name=self._random_resource_name(), base_directory=self._base_dir
).add_csv_asset(
name=self._random_resource_name(),
spark_schema=self._spark_schema,
header=True,
infer_schema=infer_schema,
**self.config.read_options,
)
@override
def make_batch(self) -> Batch:
return (
self.make_asset()
.add_batch_definition_path(name=self._random_resource_name(), path=self.csv_path)
.get_batch()
)
@override
def setup(self) -> None:
file_path = self._base_dir / self.csv_path
self._spark_data.write.format("csv").option("header", True).options(
**self.config.write_options
).save(str(file_path))
@override
def teardown(self) -> None: ...
@property
def csv_path(self) -> pathlib.Path:
return pathlib.Path("data.csv")
| SparkFilesystemCsvBatchTestSetup |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/alloy_db.py | {
"start": 1487,
"end": 52567
} | class ____(GoogleBaseHook):
"""Google Alloy DB Hook."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._client: alloydb_v1.AlloyDBAdminClient | None = None
def get_alloy_db_admin_client(self) -> alloydb_v1.AlloyDBAdminClient:
"""Retrieve AlloyDB client."""
if not self._client:
self._client = alloydb_v1.AlloyDBAdminClient(
credentials=self.get_credentials(),
client_info=CLIENT_INFO,
)
return self._client
def wait_for_operation(self, timeout: float | None, operation: Operation) -> proto.Message:
"""Wait for long-lasting operation to complete."""
self.log.info("Waiting for operation to complete...")
_timeout: int | None = int(timeout) if timeout else None
try:
return operation.result(timeout=_timeout)
except Exception:
error = operation.exception(timeout=_timeout)
raise AirflowException(error)
@GoogleBaseHook.fallback_to_default_project_id
def create_cluster(
self,
cluster_id: str,
cluster: alloydb_v1.Cluster | dict,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create an Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.CreateClusterRequest
:param cluster_id: Required. ID of the cluster to create.
:param cluster: Required. Cluster to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Cluster
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.create_cluster(
request={
"parent": client.common_location_path(project_id, location),
"cluster_id": cluster_id,
"cluster": cluster,
"request_id": request_id,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_secondary_cluster(
self,
cluster_id: str,
cluster: alloydb_v1.Cluster | dict,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create a secondary Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.CreateClusterRequest
:param cluster_id: Required. ID of the cluster to create.
:param cluster: Required. Cluster to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Cluster
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.create_secondary_cluster(
request={
"parent": client.common_location_path(project_id, location),
"cluster_id": cluster_id,
"cluster": cluster,
"request_id": request_id,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@tenacity.retry(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=1, max=10),
retry=tenacity.retry_if_exception_type(ValueError),
)
@GoogleBaseHook.fallback_to_default_project_id
def get_cluster(
self,
cluster_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> alloydb_v1.Cluster:
"""
Retrieve an Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.GetClusterRequest
:param cluster_id: Required. ID of the cluster.
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.get_cluster(
request={"name": client.cluster_path(project_id, location, cluster_id)},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_cluster(
self,
cluster_id: str,
cluster: alloydb_v1.Cluster | dict,
location: str,
update_mask: FieldMask | dict | None = None,
project_id: str = PROVIDE_PROJECT_ID,
allow_missing: bool = False,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Update an Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.UpdateClusterRequest
:param cluster_id: Required. ID of the cluster to update.
:param cluster: Required. Cluster to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Cluster
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the
Cluster resource by the update.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param allow_missing: Optional. If set to true, update succeeds even if cluster is not found.
In that case, a new cluster is created and update_mask is ignored.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
_cluster = deepcopy(cluster) if isinstance(cluster, dict) else alloydb_v1.Cluster.to_dict(cluster)
_cluster["name"] = client.cluster_path(project_id, location, cluster_id)
return client.update_cluster(
request={
"update_mask": update_mask,
"cluster": _cluster,
"request_id": request_id,
"validate_only": validate_only,
"allow_missing": allow_missing,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_cluster(
self,
cluster_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
etag: str | None = None,
validate_only: bool = False,
force: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete an Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.DeleteClusterRequest
:param cluster_id: Required. ID of the cluster to delete.
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param etag: Optional. The current etag of the Cluster. If an etag is provided and does not match the
current etag of the Cluster, deletion will be blocked and an ABORTED error will be returned.
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param force: Optional. Whether to cascade delete child instances for given cluster.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.delete_cluster(
request={
"name": client.cluster_path(project_id, location, cluster_id),
"request_id": request_id,
"etag": etag,
"validate_only": validate_only,
"force": force,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
cluster_id: str,
instance_id: str,
instance: alloydb_v1.Instance | dict,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create an instance in a given Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.CreateInstanceRequest
:param cluster_id: Required. ID of the cluster for creating an instance in.
:param instance_id: Required. ID of the instance to create.
:param instance: Required. Instance to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Instance
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.create_instance(
request={
"parent": client.cluster_path(project_id, location, cluster_id),
"instance_id": instance_id,
"instance": instance,
"request_id": request_id,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_secondary_instance(
self,
cluster_id: str,
instance_id: str,
instance: alloydb_v1.Instance | dict,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create a secondary instance in a given Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.CreateSecondaryInstanceRequest
:param cluster_id: Required. ID of the cluster for creating an instance in.
:param instance_id: Required. ID of the instance to create.
:param instance: Required. Instance to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Instance
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.create_secondary_instance(
request={
"parent": client.cluster_path(project_id, location, cluster_id),
"instance_id": instance_id,
"instance": instance,
"request_id": request_id,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@tenacity.retry(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=1, max=10),
retry=tenacity.retry_if_exception_type(ValueError),
)
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
cluster_id: str,
instance_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> alloydb_v1.Instance:
"""
Retrieve an Alloy DB instance.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.GetInstanceRequest
:param cluster_id: Required. ID of the cluster.
:param instance_id: Required. ID of the instance.
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.get_instance(
request={"name": client.instance_path(project_id, location, cluster_id, instance_id)},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
cluster_id: str,
instance_id: str,
instance: alloydb_v1.Instance | dict,
location: str,
update_mask: FieldMask | dict | None = None,
project_id: str = PROVIDE_PROJECT_ID,
allow_missing: bool = False,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Update an Alloy DB instance.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.UpdateInstanceRequest
:param cluster_id: Required. ID of the cluster.
:param instance_id: Required. ID of the cluster to update.
:param instance: Required. Cluster to update. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Instance
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the
Instance resource by the update.
:param request_id: Optional. The ID of an existing request object.:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param allow_missing: Optional. If set to true, update succeeds even if cluster is not found.
In that case, a new cluster is created and update_mask is ignored.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
_instance = (
deepcopy(instance) if isinstance(instance, dict) else alloydb_v1.Instance.to_dict(instance)
)
_instance["name"] = client.instance_path(project_id, location, cluster_id, instance_id)
return client.update_instance(
request={
"update_mask": update_mask,
"instance": _instance,
"request_id": request_id,
"validate_only": validate_only,
"allow_missing": allow_missing,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(
self,
instance_id: str,
cluster_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
etag: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Delete an Alloy DB instance.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.DeleteInstanceRequest
:param instance_id: Required. ID of the instance to delete.
:param cluster_id: Required. ID of the cluster.
:param location: Required. The ID of the Google Cloud region where the instance is located.
:param project_id: Optional. The ID of the Google Cloud project where the instance is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param etag: Optional. The current etag of the Instance. If an etag is provided and does not match the
current etag of the Instance, deletion will be blocked and an ABORTED error will be returned.
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the delete request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.delete_instance(
request={
"name": client.instance_path(project_id, location, cluster_id, instance_id),
"request_id": request_id,
"etag": etag,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_user(
self,
user_id: str,
user: alloydb_v1.User | dict,
cluster_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> alloydb_v1.User:
"""
Create a user in a given Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.CreateUserRequest
:param user_id: Required. ID of the user to create.
:param user: Required. The user to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.User
:param cluster_id: Required. ID of the cluster for creating a user in.
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.create_user(
request={
"parent": client.cluster_path(project_id, location, cluster_id),
"user_id": user_id,
"user": user,
"request_id": request_id,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@tenacity.retry(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=1, max=10),
retry=tenacity.retry_if_exception_type(ValueError),
)
@GoogleBaseHook.fallback_to_default_project_id
def get_user(
self,
user_id: str,
cluster_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> alloydb_v1.User:
"""
Get a user in a given Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.GetUserRequest
:param user_id: Required. ID of the user to create.
:param cluster_id: Required. ID of the cluster for creating a user in.
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.get_user(
request={
"name": client.user_path(project_id, location, cluster_id, user_id),
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_user(
self,
cluster_id: str,
user_id: str,
user: alloydb_v1.User | dict,
location: str,
update_mask: FieldMask | dict | None = None,
allow_missing: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> alloydb_v1.User:
"""
Update an Alloy DB user.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.UpdateUserRequest
:param cluster_id: Required. ID of the cluster.
:param user_id: Required. ID of the user to update.
:param user: Required. User to update. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.User
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the
User resource by the update.
:param request_id: Optional. The ID of an existing request object.:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param allow_missing: Optional. If set to true, update succeeds even if cluster is not found.
In that case, a new cluster is created and update_mask is ignored.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
_user = deepcopy(user) if isinstance(user, dict) else alloydb_v1.User.to_dict(user)
_user["name"] = client.user_path(project_id, location, cluster_id, user_id)
return client.update_user(
request={
"update_mask": update_mask,
"user": _user,
"request_id": request_id,
"validate_only": validate_only,
"allow_missing": allow_missing,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_user(
self,
user_id: str,
cluster_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Delete an Alloy DB user.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.DeleteUserRequest
:param user_id: Required. ID of the user to delete.
:param cluster_id: Required. ID of the cluster.
:param location: Required. The ID of the Google Cloud region where the instance is located.
:param project_id: Optional. The ID of the Google Cloud project where the instance is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the delete request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.delete_user(
request={
"name": client.user_path(project_id, location, cluster_id, user_id),
"request_id": request_id,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_backup(
self,
backup_id: str,
backup: alloydb_v1.Backup | dict,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create a backup in a given Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.CreateBackupRequest
:param backup_id: Required. ID of the backup to create.
:param backup: Required. The backup to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Backup
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.create_backup(
request={
"parent": client.common_location_path(project_id, location),
"backup_id": backup_id,
"backup": backup,
"request_id": request_id,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@tenacity.retry(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=1, max=10),
retry=tenacity.retry_if_exception_type(ValueError),
)
@GoogleBaseHook.fallback_to_default_project_id
def get_backup(
self,
backup_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> alloydb_v1.Backup:
"""
Get a backup in a given Alloy DB cluster.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.GetBackupRequest
:param backup_id: Required. ID of the backup to create.
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.get_backup(
request={
"name": client.backup_path(project_id, location, backup_id),
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_backup(
self,
backup_id: str,
backup: alloydb_v1.Backup | dict,
location: str,
update_mask: FieldMask | dict | None = None,
allow_missing: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Update an Alloy DB backup.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.UpdateBackupRequest
:param backup_id: Required. ID of the backup to update.
:param backup: Required. Backup to update. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Backup
:param location: Required. The ID of the Google Cloud region where the cluster is located.
:param update_mask: Optional. Field mask is used to specify the fields to be overwritten in the
Backup resource by the update.
:param request_id: Optional. The ID of an existing request object.:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the create request.
:param allow_missing: Optional. If set to true, update succeeds even if cluster is not found.
In that case, a new cluster is created and update_mask is ignored.
:param project_id: Optional. The ID of the Google Cloud project where the cluster is located.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
_backup = deepcopy(backup) if isinstance(backup, dict) else alloydb_v1.Backup.to_dict(backup)
_backup["name"] = client.backup_path(project_id, location, backup_id)
return client.update_backup(
request={
"update_mask": update_mask,
"backup": _backup,
"request_id": request_id,
"validate_only": validate_only,
"allow_missing": allow_missing,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_backup(
self,
backup_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
validate_only: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Delete an Alloy DB backup.
.. seealso::
For more details see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.DeleteBackupRequest
:param backup_id: Required. ID of the backup to delete.
:param location: Required. The ID of the Google Cloud region where the instance is located.
:param project_id: Optional. The ID of the Google Cloud project where the instance is located.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_only: Optional. If set, performs request validation, but does not actually execute
the delete request.
:param retry: Optional. Designation of what errors, if any, should be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
"""
client = self.get_alloy_db_admin_client()
return client.delete_backup(
request={
"name": client.backup_path(project_id, location, backup_id),
"request_id": request_id,
"validate_only": validate_only,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
| AlloyDbHook |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-dad-jokes/llama_index/readers/dad_jokes/base.py | {
"start": 166,
"end": 732
} | class ____(BaseReader):
"""
Dad jokes reader.
Reads a random dad joke.
"""
def _get_random_dad_joke(self):
response = requests.get(
"https://icanhazdadjoke.com/", headers={"Accept": "application/json"}
)
response.raise_for_status()
json_data = response.json()
return json_data["joke"]
def load_data(self) -> List[Document]:
"""
Return a random dad joke.
Args:
None.
"""
return [Document(text=self._get_random_dad_joke())]
| DadJokesReader |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 36854,
"end": 39326
} | class ____(object):
"""
Pending deallocations of a context (or device since we are using the primary
context). The capacity defaults to being unset (_SizeNotSet) but can be
modified later once the driver is initialized and the total memory capacity
known.
"""
def __init__(self, capacity=_SizeNotSet):
self._cons = deque()
self._disable_count = 0
self._size = 0
self.memory_capacity = capacity
@property
def _max_pending_bytes(self):
return int(self.memory_capacity * config.CUDA_DEALLOCS_RATIO)
def add_item(self, dtor, handle, size=_SizeNotSet):
"""
Add a pending deallocation.
The *dtor* arg is the destructor function that takes an argument,
*handle*. It is used as ``dtor(handle)``. The *size* arg is the
byte size of the resource added. It is an optional argument. Some
resources (e.g. CUModule) has an unknown memory footprint on the device.
"""
_logger.info('add pending dealloc: %s %s bytes', dtor.__name__, size)
self._cons.append((dtor, handle, size))
self._size += int(size)
if (len(self._cons) > config.CUDA_DEALLOCS_COUNT or
self._size > self._max_pending_bytes):
self.clear()
def clear(self):
"""
Flush any pending deallocations unless it is disabled.
Do nothing if disabled.
"""
if not self.is_disabled:
while self._cons:
[dtor, handle, size] = self._cons.popleft()
_logger.info('dealloc: %s %s bytes', dtor.__name__, size)
dtor(handle)
self._size = 0
@contextlib.contextmanager
def disable(self):
"""
Context manager to temporarily disable flushing pending deallocation.
This can be nested.
"""
self._disable_count += 1
try:
yield
finally:
self._disable_count -= 1
assert self._disable_count >= 0
@property
def is_disabled(self):
return self._disable_count > 0
def __len__(self):
"""
Returns number of pending deallocations.
"""
return len(self._cons)
MemoryInfo = namedtuple("MemoryInfo", "free,total")
"""Free and total memory for a device.
.. py:attribute:: free
Free device memory in bytes.
.. py:attribute:: total
Total device memory in bytes.
"""
| _PendingDeallocs |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_task_instances.py | {
"start": 57660,
"end": 59973
} | class ____:
def setup_method(self):
clear_db_runs()
clear_rendered_ti_fields()
def teardown_method(self):
clear_db_runs()
clear_rendered_ti_fields()
@pytest.mark.parametrize(
"payload",
[
# string value
{"field1": "string_value", "field2": "another_string"},
# dictionary value
{"field1": {"nested_key": "nested_value"}},
# string lists value
{"field1": ["123"], "field2": ["a", "b", "c"]},
# list of JSON values
{"field1": [1, "string", 3.14, True, None, {"nested": "dict"}]},
# nested dictionary with mixed types in lists
{
"field1": {"nested_dict": {"key1": 123, "key2": "value"}},
"field2": [3.14, {"sub_key": "sub_value"}, [1, 2]],
},
],
)
def test_ti_put_rtif_success(self, client, session, create_task_instance, payload):
ti = create_task_instance(
task_id="test_ti_put_rtif_success",
state=State.RUNNING,
session=session,
)
session.commit()
response = client.put(f"/execution/task-instances/{ti.id}/rtif", json=payload)
assert response.status_code == 201
assert response.json() == {"message": "Rendered task instance fields successfully set"}
session.expire_all()
rtifs = session.query(RenderedTaskInstanceFields).all()
assert len(rtifs) == 1
assert rtifs[0].dag_id == "dag"
assert rtifs[0].run_id == "test"
assert rtifs[0].task_id == "test_ti_put_rtif_success"
assert rtifs[0].map_index == -1
assert rtifs[0].rendered_fields == payload
def test_ti_put_rtif_missing_ti(self, client, session, create_task_instance):
create_task_instance(
task_id="test_ti_put_rtif_missing_ti",
state=State.RUNNING,
session=session,
)
session.commit()
payload = {"field1": "rendered_value1", "field2": "rendered_value2"}
random_id = uuid6.uuid7()
response = client.put(f"/execution/task-instances/{random_id}/rtif", json=payload)
assert response.status_code == 404
assert response.json()["detail"] == "Not Found"
| TestTIPutRTIF |
python | pypa__twine | twine/commands/check.py | {
"start": 1589,
"end": 5997
} | class ____(io.StringIO):
def write(self, text: str) -> int:
matched = _REPORT_RE.search(text)
if matched:
line = matched.group("line")
level_text = matched.group("level").capitalize()
message = matched.group("message").rstrip("\r\n")
text = f"line {line}: {level_text}: {message}\n"
return super().write(text)
def __str__(self) -> str:
return self.getvalue().strip()
def _parse_content_type(value: str) -> Tuple[str, Dict[str, str]]:
"""Implement logic of deprecated cgi.parse_header().
From https://docs.python.org/3.11/library/cgi.html#cgi.parse_header.
"""
msg = email.message.EmailMessage()
msg["content-type"] = value
return msg.get_content_type(), msg["content-type"].params
def _check_file(
filename: str, render_warning_stream: _WarningStream
) -> Tuple[List[str], bool]:
"""Check given distribution."""
warnings = []
is_ok = True
package = package_file.PackageFile.from_filename(filename, comment=None)
metadata = package.metadata_dictionary()
description = metadata.get("description")
description_content_type = metadata.get("description_content_type")
if description_content_type is None:
warnings.append(
"`long_description_content_type` missing. defaulting to `text/x-rst`."
)
description_content_type = "text/x-rst"
content_type, params = _parse_content_type(description_content_type)
renderer = _RENDERERS.get(content_type, _RENDERERS[None])
if not description or description.rstrip() == "UNKNOWN":
warnings.append("`long_description` missing.")
elif renderer:
rendering_result = renderer.render(
description, stream=render_warning_stream, **params
)
if rendering_result is None:
is_ok = False
return warnings, is_ok
def check(
dists: List[str],
strict: bool = False,
) -> bool:
"""Check that a distribution will render correctly on PyPI and display the results.
This is currently only validates ``long_description``, but more checks could be
added.
:param dists:
The distribution files to check.
:param output_stream:
The destination of the resulting output.
:param strict:
If ``True``, treat warnings as errors.
:return:
``True`` if there are rendering errors, otherwise ``False``.
"""
dists = commands._find_dists(dists)
uploads, _, _ = commands._split_inputs(dists)
if not uploads: # Return early, if there are no files to check.
logger.error("No files to check.")
return False
failure = False
for filename in uploads:
print(f"Checking {filename}: ", end="")
render_warning_stream = _WarningStream()
warnings, is_ok = _check_file(filename, render_warning_stream)
# Print the status and/or error
if not is_ok:
failure = True
print("[red]FAILED[/red]")
logger.error(
"`long_description` has syntax errors in markup"
" and would not be rendered on PyPI."
f"\n{render_warning_stream}"
)
elif warnings:
if strict:
failure = True
print("[red]FAILED due to warnings[/red]")
else:
print("[yellow]PASSED with warnings[/yellow]")
else:
print("[green]PASSED[/green]")
# Print warnings after the status and/or error
for message in warnings:
logger.warning(message)
return failure
def main(args: List[str]) -> bool:
"""Execute the ``check`` command.
:param args:
The command-line arguments.
:return:
The exit status of the ``check`` command.
"""
parser = argparse.ArgumentParser(prog="twine check")
parser.add_argument(
"dists",
nargs="+",
metavar="dist",
help="The distribution files to check, usually dist/*",
)
parser.add_argument(
"--strict",
action="store_true",
default=False,
required=False,
help="Fail on warnings",
)
parsed_args = parser.parse_args(args)
# Call the check function with the arguments from the command line
return check(parsed_args.dists, strict=parsed_args.strict)
| _WarningStream |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/ghostwriter/test_ghostwriter_cli.py | {
"start": 6101,
"end": 6317
} | class ____:
@staticmethod
def to_json(json: Union[dict,list]) -> str:
return json.dumps(json)
@staticmethod
def from_json(json: str) -> Union[dict,list]:
return json.loads(json)
| MyClass |
python | pytorch__pytorch | torch/profiler/_memory_profiler.py | {
"start": 11780,
"end": 14826
} | class ____:
def __init__(self, op_tree: OpTree) -> None:
self._values: dict[TensorKey, int] = {}
for node in op_tree.sorted_nodes:
if node.typed[0] == _EventType.TorchOp:
for t in self._flat_tensor_inputs(node.typed[1]):
self._update_values(t)
elif node.typed[0] == _EventType.PyCall:
typed_fields = node.typed[1]
assert typed_fields.module is None or typed_fields.optimizer is None
if typed_fields.module is not None:
for _, p, p_grad in typed_fields.module.parameters:
self._update_values(p)
self._update_values(p_grad)
if typed_fields.optimizer is not None:
for p, p_grad, state in typed_fields.optimizer.parameters:
self._update_values(p)
self._update_values(p_grad)
for _, t in state:
self._update_values(t)
allocations: dict[TensorKey, int] = {}
for node in op_tree.sorted_nodes:
if node.typed[0] == _EventType.Allocation:
alloc_fields = node.typed[1]
key = TensorKey.from_allocation(alloc_fields)
if key:
new_size = abs(alloc_fields.alloc_size)
prior_size = allocations.setdefault(key, new_size)
# It is possible to resize Storage in PyTorch, however we
# key on data pointer so most resizes will be treated as a
# change in storage. The one corner case that cannot be
# handled is `realloc` which successfully resizes the
# storage. At time of writing this is not done anywhere in
# the core PyTorch codebase.
if prior_size != new_size:
delta = f"{prior_size} vs. {new_size}"
log.warning("Mismatch between allocation and free: %s", delta)
self._values.update(allocations)
def _update_values(self, t: Optional[_TensorMetadata]) -> None:
key = TensorKey.from_tensor(t)
if key is not None and t is not None and t.layout == torch.strided:
# Scalars are represented as zero dim Tensors
n = max(
i[0] * i[1] for i in zip(t.sizes or [1], t.strides or [1], strict=True)
)
num_bytes = n * _element_size(t.dtype)
assert num_bytes >= 0, f"{num_bytes}"
self._values[key] = max(self._values.get(key, 0), num_bytes)
@staticmethod
def _flat_tensor_inputs(op: _ExtraFields_TorchOp) -> Iterator[_TensorMetadata]:
for i in op.inputs:
if isinstance(i, _TensorMetadata):
yield i
elif isinstance(i, list):
yield from i
def __getitem__(self, key: TensorKey):
return self._values[key]
@dataclasses.dataclass()
| SizeMap |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 18189,
"end": 18280
} | class ____(IterableExportEventsStreamAdjustableRange):
data_field = "smsBounce"
| SmsBounce |
python | keon__algorithms | tests/test_tree.py | {
"start": 478,
"end": 1417
} | class ____(unittest.TestCase):
def test_preorder(self):
tree = create_tree()
self.assertEqual([100, 50, 25, 75, 150, 125, 175], preorder(tree))
self.assertEqual([100, 50, 25, 75, 150, 125, 175], preorder_rec(tree))
def test_postorder(self):
tree = create_tree()
self.assertEqual([25, 75, 50, 125, 175, 150, 100], postorder(tree))
self.assertEqual([25, 75, 50, 125, 175, 150, 100], postorder_rec(tree))
def test_inorder(self):
tree = create_tree()
self.assertEqual([25, 50, 75, 100, 125, 150, 175], inorder(tree))
self.assertEqual([25, 50, 75, 100, 125, 150, 175], inorder_rec(tree))
def create_tree():
n1 = Node(100)
n2 = Node(50)
n3 = Node(150)
n4 = Node(25)
n5 = Node(75)
n6 = Node(125)
n7 = Node(175)
n1.left, n1.right = n2, n3
n2.left, n2.right = n4, n5
n3.left, n3.right = n6, n7
return n1
| TestTraversal |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modeling_glm4v_moe.py | {
"start": 14522,
"end": 17930
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Glm4vMoeTextConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.rotary_fn = apply_rotary_pos_emb
self.rope_parameters = config.rope_parameters
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape)
key_states = self.k_proj(hidden_states).view(hidden_shape)
value_states = self.v_proj(hidden_states).view(hidden_shape)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_multimodal_rotary_pos_emb( # diff with Llama
query_states, key_states, cos, sin, self.rope_parameters["mrope_section"]
)
if past_key_values is not None:
# sin and cos are specific to RoPE models; position_ids needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Glm4vMoeTextAttention |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/codemods/test_codemods.py | {
"start": 1948,
"end": 4745
} | class ____(CodemodTest):
TRANSFORM = codemods.HypothesisFixPositionalKeywonlyArgs
def test_substitution(self) -> None:
before = """
import hypothesis.strategies as st
st.floats(0, 1, False, False, 32)
st.fractions(0, 1, 9)
"""
after = """
import hypothesis.strategies as st
st.floats(0, 1, allow_nan=False, allow_infinity=False, width=32)
st.fractions(0, 1, max_denominator=9)
"""
self.assertCodemod(before=before, after=after)
def test_noop_with_new_floats_kw(self) -> None:
before = """
import hypothesis.strategies as st
st.floats(0, 1, False, False, True, 32, False, False) # allow_subnormal=True
"""
self.assertCodemod(before=before, after=before)
def test_noop_if_unsure(self) -> None:
before = """
import random
if random.getrandbits(1):
from hypothesis import target
from hypothesis.strategies import lists as sets
def fractions(*args):
pass
else:
from hypothesis import target
from hypothesis.strategies import fractions, sets
fractions(0, 1, 9)
sets(None, 1)
target(0, 'label')
"""
after = before.replace("'label'", "label='label'")
self.assertCodemod(before=before, after=after)
def test_stateful_rule_noop(self):
# `rule()(lambda self: None)` is a call with a positional argument, and
# so we need an additional check that the "func" node is a Name rather than
# itself being a Call, lest we rewrite the outer instead of the inner.
# (this may be an upstream bug in metadata processing)
before = """
from hypothesis.stateful import RuleBasedStateMachine, rule
class MultipleRulesSameFuncMachine(RuleBasedStateMachine):
rule1 = rule()(lambda self: None)
"""
self.assertCodemod(before=before, after=before)
def test_kwargs_noop(self):
before = """
from hypothesis import target
kwargs = {"observation": 1, "label": "foobar"}
target(**kwargs)
"""
self.assertCodemod(before=before, after=before)
def test_noop_with_too_many_arguments_passed(self) -> None:
# If there are too many arguments, we should leave this alone to raise
# TypeError on older versions instead of deleting the additional args.
before = """
import hypothesis.strategies as st
st.sets(st.integers(), 0, 1, True)
"""
self.assertCodemod(before=before, after=before)
| TestFixPositionalKeywonlyArgs |
python | PyCQA__pylint | pylint/checkers/exceptions.py | {
"start": 8198,
"end": 9552
} | class ____(BaseVisitor):
"""Visit references (anything that is not an AST leaf)."""
def visit_name(self, node: nodes.Name) -> None:
if node.name == "NotImplemented":
self._checker.add_message(
"notimplemented-raised", node=self._node, confidence=HIGH
)
return
try:
exceptions = [
c
for _, c in _annotated_unpack_infer(node)
if isinstance(c, nodes.ClassDef)
]
except astroid.InferenceError:
return
for exception in exceptions:
if self._checker._is_overgeneral_exception(exception):
self._checker.add_message(
"broad-exception-raised",
args=exception.name,
node=self._node,
confidence=INFERENCE,
)
def visit_call(self, node: nodes.Call) -> None:
if isinstance(node.func, nodes.Name):
self.visit_name(node.func)
match node.args:
case [nodes.Const(value=str() as msg), _, *_]:
if "%" in msg or ("{" in msg and "}" in msg):
self._checker.add_message(
"raising-format-tuple", node=self._node, confidence=HIGH
)
| ExceptionRaiseRefVisitor |
python | ansible__ansible | lib/ansible/module_utils/six/__init__.py | {
"start": 19805,
"end": 35250
} | class ____(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
_assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
def assertNotRegex(self, *args, **kwargs):
return getattr(self, _assertNotRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
# This does exactly the same what the :func:`py3:functools.update_wrapper`
# function does on Python versions after 3.2. It sets the ``__wrapped__``
# attribute on ``wrapper`` object and it doesn't raise an error if any of
# the attributes mentioned in ``assigned`` and ``updated`` are missing on
# ``wrapped`` object.
def _update_wrapper(wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
continue
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
wrapper.__wrapped__ = wrapped
return wrapper
_update_wrapper.__doc__ = functools.update_wrapper.__doc__
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
return functools.partial(_update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
wraps.__doc__ = functools.wraps.__doc__
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if PY2 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
return s.decode(encoding, errors)
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| Module_six_moves_urllib |
python | PyCQA__pylint | tests/functional/u/unused/unused_import_assigned_to.py | {
"start": 344,
"end": 373
} | class ____:
foo = foo.baz
| bar |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 481415,
"end": 481827
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("BypassForcePushAllowance", graphql_name="node")
"""The item at the end of the edge."""
| BypassForcePushAllowanceEdge |
python | pytest-dev__pytest | testing/test_pathlib.py | {
"start": 48446,
"end": 62990
} | class ____:
"""Test import_path support when importing from properly namespace packages."""
@pytest.fixture(autouse=True)
def setup_imports_tracking(self, monkeypatch: MonkeyPatch) -> None:
monkeypatch.setattr(sys, "pytest_namespace_packages_test", [], raising=False)
def setup_directories(
self, tmp_path: Path, monkeypatch: MonkeyPatch | None, pytester: Pytester
) -> tuple[Path, Path]:
# Use a code to guard against modules being imported more than once.
# This is a safeguard in case future changes break this invariant.
code = dedent(
"""
import sys
imported = getattr(sys, "pytest_namespace_packages_test", [])
assert __name__ not in imported, f"{__name__} already imported"
imported.append(__name__)
sys.pytest_namespace_packages_test = imported
"""
)
# Set up a namespace package "com.company", containing
# two subpackages, "app" and "calc".
(tmp_path / "src/dist1/com/company/app/core").mkdir(parents=True)
(tmp_path / "src/dist1/com/company/app/__init__.py").write_text(
code, encoding="UTF-8"
)
(tmp_path / "src/dist1/com/company/app/core/__init__.py").write_text(
code, encoding="UTF-8"
)
models_py = tmp_path / "src/dist1/com/company/app/core/models.py"
models_py.touch()
(tmp_path / "src/dist2/com/company/calc/algo").mkdir(parents=True)
(tmp_path / "src/dist2/com/company/calc/__init__.py").write_text(
code, encoding="UTF-8"
)
(tmp_path / "src/dist2/com/company/calc/algo/__init__.py").write_text(
code, encoding="UTF-8"
)
algorithms_py = tmp_path / "src/dist2/com/company/calc/algo/algorithms.py"
algorithms_py.write_text(code, encoding="UTF-8")
r = validate_namespace_package(
pytester,
[tmp_path / "src/dist1", tmp_path / "src/dist2"],
["com.company.app.core.models", "com.company.calc.algo.algorithms"],
)
assert r.ret == 0
if monkeypatch is not None:
monkeypatch.syspath_prepend(tmp_path / "src/dist1")
monkeypatch.syspath_prepend(tmp_path / "src/dist2")
return models_py, algorithms_py
@pytest.mark.parametrize("import_mode", ["prepend", "append", "importlib"])
def test_resolve_pkg_root_and_module_name_ns_multiple_levels(
self,
tmp_path: Path,
monkeypatch: MonkeyPatch,
pytester: Pytester,
import_mode: str,
) -> None:
models_py, algorithms_py = self.setup_directories(
tmp_path, monkeypatch, pytester
)
pkg_root, module_name = resolve_pkg_root_and_module_name(
models_py, consider_namespace_packages=True
)
assert (pkg_root, module_name) == (
tmp_path / "src/dist1",
"com.company.app.core.models",
)
mod = import_path(
models_py, mode=import_mode, root=tmp_path, consider_namespace_packages=True
)
assert mod.__name__ == "com.company.app.core.models"
assert mod.__file__ == str(models_py)
# Ensure we do not import the same module again (#11475).
mod2 = import_path(
models_py, mode=import_mode, root=tmp_path, consider_namespace_packages=True
)
assert mod is mod2
pkg_root, module_name = resolve_pkg_root_and_module_name(
algorithms_py, consider_namespace_packages=True
)
assert (pkg_root, module_name) == (
tmp_path / "src/dist2",
"com.company.calc.algo.algorithms",
)
mod = import_path(
algorithms_py,
mode=import_mode,
root=tmp_path,
consider_namespace_packages=True,
)
assert mod.__name__ == "com.company.calc.algo.algorithms"
assert mod.__file__ == str(algorithms_py)
# Ensure we do not import the same module again (#11475).
mod2 = import_path(
algorithms_py,
mode=import_mode,
root=tmp_path,
consider_namespace_packages=True,
)
assert mod is mod2
def test_ns_multiple_levels_import_rewrite_assertions(
self,
tmp_path: Path,
monkeypatch: MonkeyPatch,
pytester: Pytester,
) -> None:
"""Check assert rewriting with `--import-mode=importlib` (#12659)."""
self.setup_directories(tmp_path, monkeypatch, pytester)
code = dedent("""
def test():
assert "four lights" == "five lights"
""")
# A case is in a subdirectory with an `__init__.py` file.
test_py = tmp_path / "src/dist2/com/company/calc/algo/test_demo.py"
test_py.write_text(code, encoding="UTF-8")
pkg_root, module_name = resolve_pkg_root_and_module_name(
test_py, consider_namespace_packages=True
)
assert (pkg_root, module_name) == (
tmp_path / "src/dist2",
"com.company.calc.algo.test_demo",
)
result = pytester.runpytest("--import-mode=importlib", test_py)
result.stdout.fnmatch_lines(
[
"E AssertionError: assert 'four lights' == 'five lights'",
"E *",
"E - five lights*",
"E + four lights",
]
)
def test_ns_multiple_levels_import_error(
self,
tmp_path: Path,
pytester: Pytester,
) -> None:
# Trigger condition 1: ns and file with the same name
file = pytester.path / "cow/moo/moo.py"
file.parent.mkdir(parents=True)
file.write_text("data=123", encoding="utf-8")
# Trigger condition 2: tests are located in ns
tests = pytester.path / "cow/moo/test_moo.py"
tests.write_text(
dedent(
"""
from cow.moo.moo import data
def test_moo():
print(data)
"""
),
encoding="utf-8",
)
result = pytester.runpytest("--import-mode=importlib")
assert result.ret == ExitCode.OK
@pytest.mark.parametrize("import_mode", ["prepend", "append", "importlib"])
def test_incorrect_namespace_package(
self,
tmp_path: Path,
monkeypatch: MonkeyPatch,
pytester: Pytester,
import_mode: str,
) -> None:
models_py, algorithms_py = self.setup_directories(
tmp_path, monkeypatch, pytester
)
# Namespace packages must not have an __init__.py at its top-level
# directory; if it does, it is no longer a namespace package, and we fall back
# to importing just the part of the package containing the __init__.py files.
(tmp_path / "src/dist1/com/__init__.py").touch()
# Because of the __init__ file, 'com' is no longer a namespace package:
# 'com.company.app' is importable as a normal module.
# 'com.company.calc' is no longer importable because 'com' is not a namespace package anymore.
r = validate_namespace_package(
pytester,
[tmp_path / "src/dist1", tmp_path / "src/dist2"],
["com.company.app.core.models", "com.company.calc.algo.algorithms"],
)
assert r.ret == 1
r.stderr.fnmatch_lines("*No module named 'com.company.calc*")
pkg_root, module_name = resolve_pkg_root_and_module_name(
models_py, consider_namespace_packages=True
)
assert (pkg_root, module_name) == (
tmp_path / "src/dist1",
"com.company.app.core.models",
)
# dist2/com/company will contain a normal Python package.
pkg_root, module_name = resolve_pkg_root_and_module_name(
algorithms_py, consider_namespace_packages=True
)
assert (pkg_root, module_name) == (
tmp_path / "src/dist2/com/company",
"calc.algo.algorithms",
)
def test_detect_meta_path(
self,
tmp_path: Path,
monkeypatch: MonkeyPatch,
pytester: Pytester,
) -> None:
"""
resolve_pkg_root_and_module_name() considers sys.meta_path when importing namespace packages.
Regression test for #12112.
"""
class CustomImporter(importlib.abc.MetaPathFinder):
"""
Imports the module name "com" as a namespace package.
This ensures our namespace detection considers sys.meta_path, which is important
to support all possible ways a module can be imported (for example editable installs).
"""
def find_spec(
self, name: str, path: Any = None, target: Any = None
) -> importlib.machinery.ModuleSpec | None:
if name == "com":
spec = importlib.machinery.ModuleSpec("com", loader=None)
spec.submodule_search_locations = [str(com_root_2), str(com_root_1)]
return spec
return None
# Setup directories without configuring sys.path.
models_py, _algorithms_py = self.setup_directories(
tmp_path, monkeypatch=None, pytester=pytester
)
com_root_1 = tmp_path / "src/dist1/com"
com_root_2 = tmp_path / "src/dist2/com"
# Because the namespace package is not setup correctly, we cannot resolve it as a namespace package.
pkg_root, module_name = resolve_pkg_root_and_module_name(
models_py, consider_namespace_packages=True
)
assert (pkg_root, module_name) == (
tmp_path / "src/dist1/com/company",
"app.core.models",
)
# Insert our custom importer, which will recognize the "com" directory as a namespace package.
new_meta_path = [CustomImporter(), *sys.meta_path]
monkeypatch.setattr(sys, "meta_path", new_meta_path)
# Now we should be able to resolve the path as namespace package.
pkg_root, module_name = resolve_pkg_root_and_module_name(
models_py, consider_namespace_packages=True
)
assert (pkg_root, module_name) == (
tmp_path / "src/dist1",
"com.company.app.core.models",
)
@pytest.mark.parametrize("insert", [True, False])
def test_full_ns_packages_without_init_files(
self, pytester: Pytester, tmp_path: Path, monkeypatch: MonkeyPatch, insert: bool
) -> None:
(tmp_path / "src/dist1/ns/b/app/bar/test").mkdir(parents=True)
(tmp_path / "src/dist1/ns/b/app/bar/m.py").touch()
if insert:
# The presence of this __init__.py is not a problem, ns.b.app is still part of the namespace package.
(tmp_path / "src/dist1/ns/b/app/__init__.py").touch()
(tmp_path / "src/dist2/ns/a/core/foo/test").mkdir(parents=True)
(tmp_path / "src/dist2/ns/a/core/foo/m.py").touch()
# Validate the namespace package by importing it in a Python subprocess.
r = validate_namespace_package(
pytester,
[tmp_path / "src/dist1", tmp_path / "src/dist2"],
["ns.b.app.bar.m", "ns.a.core.foo.m"],
)
assert r.ret == 0
monkeypatch.syspath_prepend(tmp_path / "src/dist1")
monkeypatch.syspath_prepend(tmp_path / "src/dist2")
assert resolve_pkg_root_and_module_name(
tmp_path / "src/dist1/ns/b/app/bar/m.py", consider_namespace_packages=True
) == (tmp_path / "src/dist1", "ns.b.app.bar.m")
assert resolve_pkg_root_and_module_name(
tmp_path / "src/dist2/ns/a/core/foo/m.py", consider_namespace_packages=True
) == (tmp_path / "src/dist2", "ns.a.core.foo.m")
def test_ns_import_same_name_directory_12592(
tmp_path: Path, pytester: Pytester
) -> None:
"""Regression for `--import-mode=importlib` with directory parent and child with same name (#12592)."""
y_dir = tmp_path / "x/y/y"
y_dir.mkdir(parents=True)
test_y = tmp_path / "x/y/test_y.py"
test_y.write_text("def test(): pass", encoding="UTF-8")
result = pytester.runpytest("--import-mode=importlib", test_y)
assert result.ret == ExitCode.OK
def test_is_importable(pytester: Pytester) -> None:
pytester.syspathinsert()
path = pytester.path / "bar/foo.py"
path.parent.mkdir()
path.touch()
assert is_importable("bar.foo", path) is True
# Ensure that the module that can be imported points to the path we expect.
path = pytester.path / "some/other/path/bar/foo.py"
path.mkdir(parents=True, exist_ok=True)
assert is_importable("bar.foo", path) is False
# Paths containing "." cannot be imported.
path = pytester.path / "bar.x/__init__.py"
path.parent.mkdir()
path.touch()
assert is_importable("bar.x", path) is False
# Pass starting with "." denote relative imports and cannot be checked using is_importable.
path = pytester.path / ".bar.x/__init__.py"
path.parent.mkdir()
path.touch()
assert is_importable(".bar.x", path) is False
def test_compute_module_name(tmp_path: Path) -> None:
assert compute_module_name(tmp_path, tmp_path) is None
assert compute_module_name(Path(), Path()) is None
assert compute_module_name(tmp_path, tmp_path / "mod.py") == "mod"
assert compute_module_name(tmp_path, tmp_path / "src/app/bar") == "src.app.bar"
assert compute_module_name(tmp_path, tmp_path / "src/app/bar.py") == "src.app.bar"
assert (
compute_module_name(tmp_path, tmp_path / "src/app/bar/__init__.py")
== "src.app.bar"
)
def validate_namespace_package(
pytester: Pytester, paths: Sequence[Path], modules: Sequence[str]
) -> RunResult:
"""
Validate that a Python namespace package is set up correctly.
In a sub interpreter, add 'paths' to sys.path and attempt to import the given modules.
In this module many tests configure a set of files as a namespace package, this function
is used as sanity check that our files are configured correctly from the point of view of Python.
"""
lines = [
"import sys",
# Configure sys.path.
*[f"sys.path.append(r{str(x)!r})" for x in paths],
# Imports.
*[f"import {x}" for x in modules],
]
return pytester.runpython_c("\n".join(lines))
| TestNamespacePackages |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/properties.py | {
"start": 2665,
"end": 2930
} | class ____:
# pyre-ignore[10]: __classproperty__ is a made-up name to allow testing
@__classproperty__
def my_class_property(cls) -> str:
return ""
def test_issue_in_class_property():
_test_sink(ClassProperty.my_class_property)
| ClassProperty |
python | weaviate__weaviate-python-client | weaviate/collections/queries/fetch_object_by_id/sync.py | {
"start": 325,
"end": 478
} | class ____(
Generic[Properties, References],
_FetchObjectByIDQueryExecutor[ConnectionSync, Properties, References],
):
pass
| _FetchObjectByIDQuery |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.