id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
310,816
33
16
7
76
8
0
36
93
set_myzone
Start depreciation of custom services in Advantage Air (#58777) Co-authored-by: J. Nick Koston <nick@koston.org>
https://github.com/home-assistant/core.git
async def set_myzone(self, **kwargs): _LOGGER.warning( "The advantage_air.set_myzone service has been deprecated and will be removed in a future version, please use the select.select_option service on the MyZone entity" ) await self.async_change( {self.ac_key: {"info": {"myZone": self._zone["number"]}}} )
41
climate.py
Python
homeassistant/components/advantage_air/climate.py
0cd3302ebc5951c9ecd00ab1e6cd9ae28173fab5
core
1
314,495
19
8
15
148
17
0
36
158
device_class
Adjust CoverEntity property type hints in components (#73943) * Adjust CoverEntity property type hints in components * Revert changes to rflink * Revert changes to wilight
https://github.com/home-assistant/core.git
def device_class(self) -> CoverDeviceClass: if isinstance(self.node, Awning): return CoverDeviceClass.AWNING if isinstance(self.node, Blind): return CoverDeviceClass.BLIND if isinstance(self.node, GarageDoor): return CoverDeviceClass.GARAGE if isinstance(self.node, Gate): return CoverDeviceClass.GATE if isinstance(self.node, RollerShutter): return CoverDeviceClass.SHUTTER if isinstance(self.node, Window): return CoverDeviceClass.WINDOW return CoverDeviceClass.WINDOW
96
cover.py
Python
homeassistant/components/velux/cover.py
10dc38e0ec27f7bef990ee431459342f9c3c52b4
core
7
127,340
21
12
13
92
13
0
27
107
get_object_refs_from_last_execute
[serve] Visualize Deployment Graph with Gradio (#27897)
https://github.com/ray-project/ray.git
async def get_object_refs_from_last_execute(self) -> Dict[str, Any]: cache = {} for node_uuid, value in self.cache_from_last_execute.items(): if isinstance(value, asyncio.Task): cache[node_uuid] = await value else: cache[node_uuid] = value return cache
57
dag_node.py
Python
python/ray/dag/dag_node.py
4c970cc88247f7cfa7351297b8b5050f2372742e
ray
3
101,280
55
15
20
269
28
0
81
258
read_image_batch
Data Augmentation update (#1263) - lib.detected_face - Subclass Masks for Landmark based masks - Add training mask propery + methods to DetectedFace - lib.training_training - subclass TrainingDataGenerator for training and preview data - Split cache into own module - Reduce thread count to 1 to prevent image corruption + data re-use - Process on largest model input/output size rather than stored image size - Size and crop masks during caching stage - Implement ring buffer for data flow - Fix preview reload bug - augmentation - typing - switch color aug order - better initialization - Fix warp + landmark warp to correctly apply at different image scales - Slightly improved warp caching - Don't store whether image is_preview. Handle all data as training images implicitly - plugins.trainer: Typing and fixes to work with trainingdata refactor
https://github.com/deepfakes/faceswap.git
def read_image_batch(filenames, with_metadata=False): logger.trace("Requested batch: '%s'", filenames) batch = [None for _ in range(len(filenames))] if with_metadata: meta = [None for _ in range(len(filenames))] with futures.ThreadPoolExecutor() as executor: images = {executor.submit(read_image, filename, raise_error=True, with_metadata=with_metadata): idx for idx, filename in enumerate(filenames)} for future in futures.as_completed(images): ret_idx = images[future] if with_metadata: batch[ret_idx], meta[ret_idx] = future.result() else: batch[ret_idx] = future.result() batch = np.array(batch) retval = (batch, meta) if with_metadata else batch logger.trace("Returning images: (filenames: %s, batch shape: %s, with_metadata: %s)", filenames, batch.shape, with_metadata) return retval
172
image.py
Python
lib/image.py
2beceffad9b15c1fd78f06b9b272563321c5a41e
faceswap
8
153,457
23
16
9
96
10
0
28
135
_dialect_is_microsoft_sql
FEAT-#979: Enable reading from SQL server. (#4279) Co-authored-by: eavidan <eran.avidan@intel.com> Co-authored-by: Devin Petersohn <devin-petersohn@users.noreply.github.com> Signed-off-by: mvashishtha <mahesh@ponder.io>
https://github.com/modin-project/modin.git
def _dialect_is_microsoft_sql(self): if self._dialect_is_microsoft_sql_cache is None: self._dialect_is_microsoft_sql_cache = False if self.lib == _SQLALCHEMY_LIB_NAME: from sqlalchemy import create_engine self._dialect_is_microsoft_sql_cache = create_engine( *self.args, **self.kwargs ).driver in ("pymssql", "pyodbc") return self._dialect_is_microsoft_sql_cache
57
db_conn.py
Python
modin/db_conn.py
2d40797b2b700d81d4db4a4cd023d563edf6431f
modin
3
47,565
32
11
17
235
15
0
66
209
test_next_dagrun_after_auto_align
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
https://github.com/apache/airflow.git
def test_next_dagrun_after_auto_align(self): dag = DAG( dag_id='test_scheduler_auto_align_1', start_date=timezone.datetime(2016, 1, 1, 10, 10, 0), schedule_interval="4 5 * * *", ) EmptyOperator(task_id='dummy', dag=dag, owner='airflow') next_info = dag.next_dagrun_info(None) assert next_info and next_info.logical_date == timezone.datetime(2016, 1, 2, 5, 4) dag = DAG( dag_id='test_scheduler_auto_align_2', start_date=timezone.datetime(2016, 1, 1, 10, 10, 0), schedule_interval="10 10 * * *", ) EmptyOperator(task_id='dummy', dag=dag, owner='airflow') next_info = dag.next_dagrun_info(None) assert next_info and next_info.logical_date == timezone.datetime(2016, 1, 1, 10, 10)
156
test_dag.py
Python
tests/models/test_dag.py
49e336ae0302b386a2f47269a6d13988382d975f
airflow
3
3,401
15
8
4
50
9
0
16
28
test_stream_unsupported_by_bulk
Source Salesforce: Deprecate API Type parameter (#9302) * use BULK for the first sync, REST for incremental sync * if stream contains compound data or/and base64 use always REST * fix get stream state from connector state * fix integration test * refactor catalog name * format code * refactor unit tests * refactor unit tests 2 * format code 2 * Set additionalProperties to true not to break test temporarily * fix unit test and remove unnecessary filtering fields * bump version * updated spec and def yaml Co-authored-by: auganbay <auganenu@gmail.com>
https://github.com/airbytehq/airbyte.git
def test_stream_unsupported_by_bulk(stream_config, stream_api, caplog): stream_name = "AcceptedEventRelation" stream = _generate_stream(stream_name, stream_config, stream_api) assert not isinstance(stream, BulkSalesforceStream)
31
unit_test.py
Python
airbyte-integrations/connectors/source-salesforce/unit_tests/unit_test.py
0a3713a5a52995dc0dc205d8edfd097bf625899f
airbyte
1
40,284
105
21
30
400
39
0
152
739
comp_data
Avoid stall/crash on duplicate index in comp_data (#2776) * Avoid stall/crash on duplicate index in comp_data * Don't fail on empty data; update release notes
https://github.com/mwaskom/seaborn.git
def comp_data(self): if not hasattr(self, "ax"): # Probably a good idea, but will need a bunch of tests updated # Most of these tests should just use the external interface # Then this can be re-enabled. # raise AttributeError("No Axes attached to plotter") return self.plot_data if not hasattr(self, "_comp_data"): comp_data = ( self.plot_data .copy(deep=False) .drop(["x", "y"], axis=1, errors="ignore") ) for var in "yx": if var not in self.variables: continue parts = [] grouped = self.plot_data[var].groupby(self.converters[var], sort=False) for converter, orig in grouped: with pd.option_context('mode.use_inf_as_null', True): orig = orig.dropna() if var in self.var_levels: # TODO this should happen in some centralized location # it is similar to GH2419, but more complicated because # supporting `order` in categorical plots is tricky orig = orig[orig.isin(self.var_levels[var])] comp = pd.to_numeric(converter.convert_units(orig)) if converter.get_scale() == "log": comp = np.log10(comp) parts.append(pd.Series(comp, orig.index, name=orig.name)) if parts: comp_col = pd.concat(parts) else: comp_col = pd.Series(dtype=float, name=var) comp_data.insert(0, var, comp_col) self._comp_data = comp_data return self._comp_data
243
_core.py
Python
seaborn/_core.py
8605afd2ab1c6474c50ba90e478f4dc2decc8c7a
seaborn
9
272,692
5
9
2
32
4
0
5
11
minimum
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def minimum(inputs, **kwargs): return Minimum(**kwargs)(inputs)
18
minimum.py
Python
keras/layers/merging/minimum.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
160,868
32
14
9
189
21
0
39
172
trace
ENH: Adding __array_ufunc__ capability to MaskedArrays. This enables any ufunc numpy operations that are called on a MaskedArray to use the masked version of that function automatically without needing to resort to np.ma.func() calls.
https://github.com/numpy/numpy.git
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): #!!!: implement out + test! m = self._mask if m is nomask: result = self.view(np.ndarray).trace(offset=offset, axis1=axis1, axis2=axis2, out=out) return result.astype(dtype) else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) return D.astype(dtype).filled(0).sum(axis=-1, out=out) trace.__doc__ = ndarray.trace.__doc__
116
core.py
Python
numpy/ma/core.py
6d77c591c59b5678f14ae5af2127eebb7d2415bc
numpy
2
25,708
12
10
4
44
4
0
13
29
get_duplicates_items
Feature/gift cards post mvp (#7977) * Add giftCardBulkCreate mutation * Extend OrderFilter with giftCardUsed and giftCardBought fields * Allow exporting gift cards * Update the name of the email template for export * Add exportGiftCards muttaion * Add used gift card filter * Export only unused gift cards * Block mutations for expired gift cards (#8115) * Block mutations for expired gift cards * Block only resending and activating expired gift cards * Add celery schedule task for deactivate expired cards (#8100) * Add gift card section to invoice (#8148) * Add filtering on gift card events (#8090) * Add filtering on gift card events * Filter gift card events by orders instead of order_id * Update populatedb with gift card data (#8016) * Generate gift cards with events in populate db * Set product types kinds and add placeholder for gift card product * Add dedicated gift card product images * Change order of order emails (#8168) * Drop duplicated kind field from producttype in populatedb (#8224) * Change gift card display_code field to last_4 (#8445) * Change gift card display_code field to last_4 * Change last4 to last4CodeChars * Fix github test env action configuration * Drop filtering gift cards by tag * Fix export gift card tags test * Re-add gift card tags query (#8412) * Update populatedb with gift card data (#8016) * Generate gift cards with events in populate db * Set product types kinds and add placeholder for gift card product * Add dedicated gift card product images * Add giftCardTags model * Add giftCardTags query Co-authored-by: Iga Karbowiak <40886528+IKarbowiak@users.noreply.github.com> Co-authored-by: IKarbowiak <iga.karbowiak@mirumee.com> * Do not create EXPIRY_DATE_UPDATED gift card event when expiry date is not changed (#8882) Co-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>
https://github.com/saleor/saleor.git
def get_duplicates_items(first_list, second_list): if first_list and second_list: return set(first_list) & set(second_list) return []
26
__init__.py
Python
saleor/graphql/core/utils/__init__.py
f5a45de4a22fecacfcd5b2cd18c07e5cf95ce27c
saleor
3
130,222
43
12
13
128
11
0
49
120
recover_args
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def recover_args(flattened_args): assert ( len(flattened_args) % 2 == 0 ), "Flattened arguments need to be even-numbered. See `flatten_args`." args = [] kwargs = {} for name_index in range(0, len(flattened_args), 2): name, arg = flattened_args[name_index], flattened_args[name_index + 1] if name == DUMMY_TYPE: args.append(arg) else: kwargs[name] = arg return args, kwargs
80
signature.py
Python
python/ray/_private/signature.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
3
241,561
20
10
7
157
23
1
21
41
test_ddp_spawn_configure_ddp
Rename training plugin test files & names to strategy (#11303)
https://github.com/Lightning-AI/lightning.git
def test_ddp_spawn_configure_ddp(tmpdir): trainer = Trainer(default_root_dir=tmpdir, num_processes=2, strategy="ddp_spawn", fast_dev_run=True) model = BoringModelDDP() trainer.fit(model) trainer.validate(model, dataloaders=model.val_dataloader()) trainer.test(model, dataloaders=model.test_dataloader()) trainer.predict(model, dataloaders=model.predict_dataloader()) @pytest.mark.parametrize("trainer_fn", [TrainerFn.FITTING, "other"])
@pytest.mark.parametrize("trainer_fn", [TrainerFn.FITTING, "other"])
79
test_ddp_spawn_strategy.py
Python
tests/strategies/test_ddp_spawn_strategy.py
650c710efacd633fa283955145342bb64063c883
lightning
1
59,215
16
9
7
77
7
0
29
50
test_result_literal_null_is_distinguishable_from_none
Implement `prefect.result` types and utilities (#6973)
https://github.com/PrefectHQ/prefect.git
async def test_result_literal_null_is_distinguishable_from_none(): result = await ResultLiteral.create(None) assert result is not None serialized = result.json() assert serialized is not None assert serialized != "null" assert json.loads(serialized) is not None
45
test_result_literal.py
Python
tests/results/test_result_literal.py
a0c1df21a43c5045c8306d6f8ef912155d9a86f4
prefect
1
144,152
24
11
17
113
18
0
27
166
_replace_alarm_config_variables
[autoscaler] Add AWS Autoscaler CloudWatch Alarm support (#21523) These changes add a set of improvements to enable automatic creation and update of CloudWatch alarms when provisioning AWS Autoscaling clusters. Successful implementation of these improvements will allow AWS Autoscaler users to: Setup alarms against Ray CloudWatch metrics to get notified about increased load, service outage. Update their CloudWatch alarm JSON configuration files during Ray up execution time. Notes: This PR is a follow-up PR for #20266, which adds CloudWatch alarm support.
https://github.com/ray-project/ray.git
def _replace_alarm_config_variables(self) -> List[Dict[str, Any]]: data = self._load_config_file("alarm") param_data = [] for item in data: item_out = copy.deepcopy(item) self._replace_all_config_variables( item_out, self.node_id, self.cluster_name, self.provider_config["region"], ) param_data.append(item_out) return param_data
71
cloudwatch_helper.py
Python
python/ray/autoscaler/_private/aws/cloudwatch/cloudwatch_helper.py
8237c6228fe884f463ae68fbb744f98decad6a85
ray
2
120,519
204
18
5
748
71
0
309
610
convert_scatter_jax_to_tf
feat: support subset of scatters with no XLA Supports: - Scatters with unique indices e.g..at[:, ((3,4),(5,5)), 3].add - Contiguous scatters with non-unique indices of single depth e.g..at[:, ((2,2,2),)].add
https://github.com/google/jax.git
def convert_scatter_jax_to_tf(update_op, unsorted_segment_op=None): def error(msg): suffix = ("See source code for the precise conditions under which " "scatter_(update/add/multiply/min/max) ops can be converted without XLA.") return _xla_disabled_error("scatter_(update/add/multiply/min/max)", f"{msg} - {suffix}") def _sparse_scatter( operand, scatter_indices, updates, update_jaxpr, update_consts, dimension_numbers, indices_are_sorted: bool, unique_indices: bool, mode, _in_avals: Sequence[core.ShapedArray], _out_aval: core.ShapedArray): # Infer unique indices from lack of batch dimension unique_indices = unique_indices or (len(scatter_indices.shape) == 1) if unique_indices: suboperand = tf.gather_nd(operand, scatter_indices) updated_suboperand = update_op(suboperand, updates) # add a batch dim if none exist if len(scatter_indices.shape) == 1: scatter_indices = scatter_indices[None] updated_suboperand = updated_suboperand[None] y = tf.tensor_scatter_nd_update(operand, scatter_indices, updated_suboperand) else: if (scatter_indices.shape[-1] == 1) and (unsorted_segment_op != None): # If only indexing into the first dimension, it's a segment op operand_update = unsorted_segment_op(updates, tf.squeeze(scatter_indices, -1), operand.shape[0]) y = update_op(operand, operand_update) else: raise error("Scatter supports unique indices. Scatter also supports non-unique indices with indexing into only one dimension for (add, mul, min, max)") return y def sparse_scatter( operand, scatter_indices, updates, update_jaxpr, update_consts, dimension_numbers, indices_are_sorted: bool, unique_indices: bool, mode, _in_avals: Sequence[core.ShapedArray], _out_aval: core.ShapedArray): ud = dimension_numbers.update_window_dims wd = dimension_numbers.inserted_window_dims sd = dimension_numbers.scatter_dims_to_operand_dims dtype = operand.dtype # assume updates has same dtype as operand if dtype in [tf.bool, tf.complex64]: raise error(f"Scatter does not support operands of type {dtype}") if not (wd == sd): raise error("Complex scatters are not supported") if not (mode == lax.GatherScatterMode.PROMISE_IN_BOUNDS): raise error("Only scatter mode `PROMISE_IN_BOUNDS` is supported") # Shift axes to the front to match tf syntax, inverse afterwards fwd = partial(shift_axes_forward, axes=sd) inv = partial(fwd, inverse=True) # shift update value axes to the back, so batch are at the front updates_shifted = shift_axes_forward(updates, axes=ud, forward=False) return inv(_sparse_scatter( fwd(operand), scatter_indices, updates_shifted, update_jaxpr, update_consts, dimension_numbers, indices_are_sorted, unique_indices, mode, _in_avals, _out_aval, )) return sparse_scatter tf_impl_no_xla[lax.scatter_p] = convert_scatter_jax_to_tf(lambda x,y: y) # just replace with the update tf_impl_no_xla[lax.scatter_add_p] = convert_scatter_jax_to_tf(tf.add, tf.math.unsorted_segment_sum) tf_impl_no_xla[lax.scatter_mul_p] = convert_scatter_jax_to_tf(tf.multiply, tf.math.unsorted_segment_prod) tf_impl_no_xla[lax.scatter_min_p] = convert_scatter_jax_to_tf(tf.minimum, tf.math.unsorted_segment_min) tf_impl_no_xla[lax.scatter_max_p] = convert_scatter_jax_to_tf(tf.maximum, tf.math.unsorted_segment_max) tf_impl_no_xla[lax.sort_p] = _unimplemented("sort")
17
impl_no_xla.py
Python
jax/experimental/jax2tf/impl_no_xla.py
17f8638fd451e46fc387ef6f32a8db13b2a92e5a
jax
1
43,685
11
7
2
61
10
0
12
31
node_id
Map and Partial DAG authoring interface for Dynamic Task Mapping (#19965) * Make DAGNode a proper Abstract Base Class * Prevent mapping an already mapped Task/TaskGroup Also prevent calls like .partial(...).partial(...). It is uncertain whether these kinds of repeated partial/map calls have utility, so let's disable them entirely for now to simplify implementation. We can always add them if they are proven useful. Co-authored-by: Tzu-ping Chung <tp@astronomer.io>
https://github.com/apache/airflow.git
def node_id(self) -> str: raise NotImplementedError() task_group: Optional["TaskGroup"] start_date: Optional[pendulum.DateTime] end_date: Optional[pendulum.DateTime]
11
taskmixin.py
Python
airflow/models/taskmixin.py
e9226139c2727a4754d734f19ec625c4d23028b3
airflow
1
111,419
9
12
5
80
11
0
9
28
test_json_to_doc_attribute_consistency
Add Doc.from_json() (#10688) * Implement Doc.from_json: rough draft. * Implement Doc.from_json: first draft with tests. * Implement Doc.from_json: added documentation on website for Doc.to_json(), Doc.from_json(). * Implement Doc.from_json: formatting changes. * Implement Doc.to_json(): reverting unrelated formatting changes. * Implement Doc.to_json(): fixing entity and span conversion. Moving fixture and doc <-> json conversion tests into single file. * Implement Doc.from_json(): replaced entity/span converters with doc.char_span() calls. * Implement Doc.from_json(): handling sentence boundaries in spans. * Implementing Doc.from_json(): added parser-free sentence boundaries transfer. * Implementing Doc.from_json(): added parser-free sentence boundaries transfer. * Implementing Doc.from_json(): incorporated various PR feedback. * Renaming fixture for document without dependencies. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implementing Doc.from_json(): using two sent_starts instead of one. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implementing Doc.from_json(): doc_without_dependency_parser() -> doc_without_deps. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implementing Doc.from_json(): incorporating various PR feedback. Rebased on latest master. * Implementing Doc.from_json(): refactored Doc.from_json() to work with annotation IDs instead of their string representations. * Implement Doc.from_json(): reverting unwanted formatting/rebasing changes. * Implement Doc.from_json(): added check for char_span() calculation for entities. * Update spacy/tokens/doc.pyx Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): minor refactoring, additional check for token attribute consistency with corresponding test. * Implement Doc.from_json(): removed redundancy in annotation type key naming. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): Simplifying setting annotation values. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement doc.from_json(): renaming annot_types to token_attrs. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): adjustments for renaming of annot_types to token_attrs. * Implement Doc.from_json(): removing default categories. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): simplifying lexeme initialization. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): simplifying lexeme initialization. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): refactoring to only have keys for present annotations. * Implement Doc.from_json(): fix check for tokens' HEAD attributes. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): refactoring Doc.from_json(). * Implement Doc.from_json(): fixing span_group retrieval. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): fixing span retrieval. * Implement Doc.from_json(): added schema for Doc JSON format. Minor refactoring in Doc.from_json(). * Implement Doc.from_json(): added comment regarding Token and Span extension support. * Implement Doc.from_json(): renaming inconsistent_props to partial_attrs.. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): adjusting error message. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): extending E1038 message. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): added params to E1038 raises. * Implement Doc.from_json(): combined attribute collection with partial attributes check. * Implement Doc.from_json(): added optional schema validation. * Implement Doc.from_json(): fixed optional fields in schema, tests. * Implement Doc.from_json(): removed redundant None check for DEP. * Implement Doc.from_json(): added passing of schema validatoin message to E1037.. * Implement Doc.from_json(): removing redundant error E1040. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): changing message for E1037. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): adjusted website docs and docstring of Doc.from_json(). * Update spacy/tests/doc/test_json_doc_conversion.py * Implement Doc.from_json(): docstring update. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): docstring update. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): website docs update. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): docstring formatting. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): docstring formatting. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): fixing Doc reference in website docs. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): reformatted website/docs/api/doc.md. * Implement Doc.from_json(): bumped IDs of new errors to avoid merge conflicts. * Implement Doc.from_json(): fixing bug in tests. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Implement Doc.from_json(): fix setting of sentence starts for docs without DEP. * Implement Doc.from_json(): add check for valid char spans when manually setting sentence boundaries. Refactor sentence boundary setting slightly. Move error message for lack of support for partial token annotations to errors.py. * Implement Doc.from_json(): simplify token sentence start manipulation. Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com> * Combine related error messages * Update spacy/tests/doc/test_json_doc_conversion.py Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
https://github.com/explosion/spaCy.git
def test_json_to_doc_attribute_consistency(doc): doc_json = doc.to_json() doc_json["tokens"][1].pop("morph") with pytest.raises(ValueError): Doc(doc.vocab).from_json(doc_json)
44
test_json_doc_conversion.py
Python
spacy/tests/doc/test_json_doc_conversion.py
8387ce4c01db48d92ac5638e18316c0f1fc8861e
spaCy
1
108,031
43
13
12
159
13
0
59
187
_get_font_family_and_reduced
Move towards making texmanager stateless. Previously, TexManager needed to call get_font_config at a specific place in the middle of processing to update some internal attributes before proceeding with TeX source generation. Instead, move towards making TexManager stateless (except for caching), i.e. the user facing API should be thought of as a bunch of independently callable functions `make_tex()`, `make_dvi()`, etc. (they will probably stay as methods on a "empty" TexManager object for a long time for backcompat, in fact).
https://github.com/matplotlib/matplotlib.git
def _get_font_family_and_reduced(cls): ff = rcParams['font.family'] ff_val = ff[0].lower() if len(ff) == 1 else None if len(ff) == 1 and ff_val in cls._font_families: return ff_val, False elif len(ff) == 1 and ff_val in cls._font_preambles: return cls._font_types[ff_val], True else: _log.info('font.family must be one of (%s) when text.usetex is ' 'True. serif will be used by default.', ', '.join(cls._font_families)) return 'serif', False
94
texmanager.py
Python
lib/matplotlib/texmanager.py
13147992b317c29c6e832ca7f6d05bf48aeb0718
matplotlib
6
293,342
9
9
3
45
8
0
9
23
toggle
Add switch platform to bosch_shc integration (#62315) * Add support for switch platform. * Add untested files to .coveragerc. * Differ between Light Switch and Smart Plug. Bumped to boschshcpy==0.2.27 * Removed duplicated code. Fixed suggestions from code review. * Fixed pylint errors * Fix pylint issue. * Add property statement * Fixed wrong attribute access * Apply suggestions from code review Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> * Move switch function to base class. Changes from code review. * Apply suggestions from code review Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> * Merged camera switch into SHCSwitch class * Type hint * Removed deprecated sensor entities in switch device. Added routing switch entity. * Apply suggestions from code review Co-authored-by: epenet <6771947+epenet@users.noreply.github.com> * Icon and EntityCategory as class attributes Co-authored-by: epenet <6771947+epenet@users.noreply.github.com>
https://github.com/home-assistant/core.git
def toggle(self, **kwargs) -> None: setattr(self._device, self.entity_description.on_key, not self.is_on)
28
switch.py
Python
homeassistant/components/bosch_shc/switch.py
0b7b1baf30182fd12e55ee9fe66fa3788fee71ba
core
1
338,161
86
14
31
550
43
0
146
349
load_accelerator_state
Megatron-LM integration (#667) * Megatron-LM integration * add code and resolve comment Co-Authored-By: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * add code * add code * fix many :bug: * add code * add code and reverting tracker processes * updating logging utilities, fixing Pipeline Parallelism and dataset/dataloader :bug: s 1. Fixing bugs related to Pipeline Parallelism 2. Fixing bugs related to dataloaders/datasets. 3. Fixing logging utilities so that all logging and tracking happens on last process when using Megatron. * addressing comments * resolving comments * update code * refactoring and adding code to support custom implementation of`AbstractTrainStep` class * minor change * Many fixes for supporting custom TrainStep and Megatron Indexed Datasets * Add code, :bug: fixes and a initial doc file with headings * fixing a big :bug: related to loading checkpoints * adding doc and an example * example test CI * docs * more docs * more doc changes * more doc changes * docs * more docs * doc fixing * trying if we can directly import megatronlm utils * doc fixing and throwing error if megatron isn't available. * resolving comments * fixes to bert and t5 and more docs Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
https://github.com/huggingface/accelerate.git
def load_accelerator_state(input_dir, models, optimizers, schedulers, process_index, scaler=None): # Model states for i, model in enumerate(models): weights_name = f"{MODEL_NAME}.bin" if i == 0 else f"{MODEL_NAME}_{i}.bin" input_model_file = os.path.join(input_dir, weights_name) models[i].load_state_dict(torch.load(input_model_file, map_location="cpu")) logger.info("All model weights loaded successfully") # Optimizer states for i, opt in enumerate(optimizers): optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" input_optimizer_file = os.path.join(input_dir, optimizer_name) optimizers[i].load_state_dict(torch.load(input_optimizer_file, map_location="cpu")) logger.info("All optimizer states loaded successfully") # Scheduler states for i, scheduler in enumerate(schedulers): scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" input_scheduler_file = os.path.join(input_dir, scheduler_name) scheduler.load_state_dict(torch.load(input_scheduler_file)) logger.info("All scheduler states loaded successfully") # GradScaler state if scaler is not None: input_scaler_file = os.path.join(input_dir, SCALER_NAME) scaler.load_state_dict(torch.load(input_scaler_file)) logger.info("GradScaler state loaded successfully") # Random states try: states = torch.load(os.path.join(input_dir, f"{RNG_STATE_NAME}_{process_index}.pkl")) random.setstate(states["random_state"]) np.random.set_state(states["numpy_random_seed"]) torch.set_rng_state(states["torch_manual_seed"]) torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"]) # ^^ safe to call this function even if cuda is not available if is_tpu_available(): xm.set_rng_state(states["xm_seed"]) logger.info("All random states loaded successfully") except: logger.info("Could not load random states")
305
checkpointing.py
Python
src/accelerate/checkpointing.py
e3ebf176b85a57f1d9f082b2aeda22df21e2d395
accelerate
10
56,454
36
13
30
232
17
0
82
433
test_to_block_document_anonymous_errors
Improve anonymous name generation and check client anonymous blocks
https://github.com/PrefectHQ/prefect.git
def test_to_block_document_anonymous_errors(self, block_type_x): # explicit false with pytest.raises( ValueError, match="(No name provided, either as an argument or on the block)", ): self.MyRegisteredBlock(x="x")._to_block_document( block_schema_id=uuid4(), block_type_id=block_type_x.id, is_anonymous=False, ) # none with no fallback with pytest.raises( ValueError, match="(No name provided, either as an argument or on the block)", ): self.MyRegisteredBlock(x="x")._to_block_document( block_schema_id=uuid4(), block_type_id=block_type_x.id, is_anonymous=None, ) # none with False fallback anon_block_4 = self.MyRegisteredBlock(x="x") anon_block_4._is_anonymous = False with pytest.raises( ValueError, match="(No name provided, either as an argument or on the block)", ): anon_block_4._to_block_document( block_schema_id=uuid4(), block_type_id=block_type_x.id, is_anonymous=None, )
139
test_core.py
Python
tests/blocks/test_core.py
539d90ed73c1332590d0345daf5e2b2ae4b49b92
prefect
1
21,650
64
18
36
327
36
0
86
514
from_requirement
Vendor in latest requirements lib and pip-shims in order to drop packaging and resolve differences in sourcing it.
https://github.com/pypa/pipenv.git
def from_requirement(cls, requirement, parent=None): name = requirement.normalized_name specifiers = requirement.ireq.specifier if not requirement.editable else "" markers = requirement.ireq.markers extras = requirement.ireq.extras is_pinned = is_pinned_requirement(requirement.ireq) is_constraint = bool(parent) _, finder = get_finder(sources=None) candidates = [] if not is_pinned and not requirement.editable: for r in requirement.find_all_matches(finder=finder): req = make_install_requirement( name, r.version, extras=extras, markers=markers, constraint=is_constraint, ) req.req.link = getattr(r, "location", getattr(r, "link", None)) req.parent = parent candidates.append(req) candidates = sorted( set(candidates), key=lambda k: parse(version_from_ireq(k)), ) else: candidates = [requirement.ireq] return cls( name=name, specifiers=specifiers, markers=markers, candidates=candidates, requirement=requirement, parent=parent, finder=finder, )
213
dependencies.py
Python
pipenv/vendor/requirementslib/models/dependencies.py
8a4d2eb130fd173466310f59df607ea59bfc44a5
pipenv
5
101,186
50
14
21
316
37
0
62
397
estimate_blur
lib.detected_face.Mask - Add source + target offset and coverage to set_sub_crop method
https://github.com/deepfakes/faceswap.git
def estimate_blur(cls, image, metadata=None): if metadata is not None: alignments = metadata["alignments"] det_face = DetectedFace() det_face.from_png_meta(alignments) aln_face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32"), image=image, centering="legacy", size=256, is_aligned=True) mask = det_face.mask["components"] mask.set_sub_crop(aln_face.pose.offset[mask.stored_centering], aln_face.pose.offset["legacy"], centering="legacy") mask = cv2.resize(mask.mask, (256, 256), interpolation=cv2.INTER_CUBIC)[..., None] image = np.minimum(aln_face.face, mask) if image.ndim == 3: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blur_map = cv2.Laplacian(image, cv2.CV_32F) score = np.var(blur_map) / np.sqrt(image.shape[0] * image.shape[1]) return score
204
sort.py
Python
tools/sort/sort.py
32950897376b48e0f08b46385602e4df902cf49e
faceswap
3
336,897
13
13
4
66
10
0
14
46
enable_gradient_checkpointing
[UNet2DConditionModel] add gradient checkpointing (#461) * add grad ckpt to downsample blocks * make it work * don't pass gradient_checkpointing to upsample block * add tests for UNet2DConditionModel * add test_gradient_checkpointing * add gradient_checkpointing for up and down blocks * add functions to enable and disable grad ckpt * remove the forward argument * better naming * make supports_gradient_checkpointing private
https://github.com/huggingface/diffusers.git
def enable_gradient_checkpointing(self): if not self._supports_gradient_checkpointing: raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.") self.apply(partial(self._set_gradient_checkpointing, value=True))
33
modeling_utils.py
Python
src/diffusers/modeling_utils.py
e7120bae955949355e50a4c2adef8353790fda0d
diffusers
2
260,258
24
10
6
56
8
0
24
50
sqeuclidean_row_norms
MAINT Create private `_pairwise_distances_reductions` submodule (#23724) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> Co-authored-by: Jérémie du Boisberranger <jeremiedbb@users.noreply.github.com> Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
https://github.com/scikit-learn/scikit-learn.git
def sqeuclidean_row_norms(X, num_threads): if X.dtype == np.float64: return _sqeuclidean_row_norms64(X, num_threads) raise ValueError( f"Only 64bit float datasets are supported at this time, got: X.dtype={X.dtype}." )
30
_dispatcher.py
Python
sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py
325130f205f446fcdea333d40bd9197605e3bccc
scikit-learn
2
45,563
59
14
25
132
13
0
82
357
_validate_extra
Deprecate non-JSON conn.extra (#21816) Connection extra field is generally assumed to be JSON but we don't actually require it. Here we deprecate non-JSON extra so that in 3.0 we can require it. Further, we require that it not just be any json but must also parse as dict, because a string value such as '"hi"' or '[1,2,3]' is json, but a very bad practice.
https://github.com/apache/airflow.git
def _validate_extra(extra, conn_id) -> None: if extra is None: return None try: extra_parsed = json.loads(extra) if not isinstance(extra_parsed, dict): warnings.warn( "Encountered JSON value in `extra` which does not parse as a dictionary in " f"connection {conn_id!r}. From Airflow 3.0, the `extra` field must contain a JSON " "representation of a Python dict.", DeprecationWarning, stacklevel=3, ) except json.JSONDecodeError: warnings.warn( f"Encountered non-JSON in `extra` field for connection {conn_id!r}. Support for " "non-JSON `extra` will be removed in Airflow 3.0", DeprecationWarning, stacklevel=2, ) return None
74
connection.py
Python
airflow/models/connection.py
3aebb21c523c0eea0d4a1518d502ff95fd98011b
airflow
4
249,314
23
10
14
115
13
0
24
117
test_missing_parameter
Use literals in place of `HTTPStatus` constants in tests (#13488) * Use literals in place of `HTTPStatus` constants in tests * newsfile * code style * code style
https://github.com/matrix-org/synapse.git
def test_missing_parameter(self) -> None: channel = self.make_request( "POST", self.url, access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) self.assertEqual( "Missing integer query parameter 'before_ts'", channel.json_body["error"] )
71
test_media.py
Python
tests/rest/admin/test_media.py
2281427175e4c93a30c39607fb4ac23c2a1f399f
synapse
1
288,079
37
9
14
135
15
1
41
98
test_discovery_incl_nodeid
Move MQTT discovery hass.data globals to dataclass (#78706) * Add MQTT discovery hass.data globals to dataclass * isort * Additional rework * Add hass.data["mqtt_tags"] to dataclass * Follow-up comment * Corrections
https://github.com/home-assistant/core.git
async def test_discovery_incl_nodeid(hass, mqtt_mock_entry_no_yaml_config, caplog): await mqtt_mock_entry_no_yaml_config() async_fire_mqtt_message( hass, "homeassistant/binary_sensor/my_node_id/bla/config", '{ "name": "Beer", "state_topic": "test-topic" }', ) await hass.async_block_till_done() state = hass.states.get("binary_sensor.beer") assert state is not None assert state.name == "Beer" assert ("binary_sensor", "my_node_id bla") in hass.data[ "mqtt" ].discovery_already_discovered @patch("homeassistant.components.mqtt.PLATFORMS", [Platform.BINARY_SENSOR])
@patch("homeassistant.components.mqtt.PLATFORMS", [Platform.BINARY_SENSOR])
65
test_discovery.py
Python
tests/components/mqtt/test_discovery.py
84b2c74746b694d217fe6d448a8dfff4bc2d7a9e
core
1
60,425
93
22
25
295
13
0
179
381
CleanseRawStrings
Balanced joint maximum mean discrepancy for deep transfer learning
https://github.com/jindongwang/transferlearning.git
def CleanseRawStrings(raw_lines): delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: # Inside a raw string, look for the end end = line.find(delimiter) if end >= 0: # Found the end of the string, match leading space for this # line and resume copying the original lines, and also insert # a "" on the last line. leading_space = Match(r'^(\s*)\S', line) line = leading_space.group(1) + '""' + line[end + len(delimiter):] delimiter = None else: # Haven't found the end yet, append a blank line. line = '' else: # Look for beginning of a raw string. # See 2.14.15 [lex.string] for syntax. matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) if matched: delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) if end >= 0: # Raw string ended on same line line = (matched.group(1) + '""' + matched.group(3)[end + len(delimiter):]) delimiter = None else: # Start of a multi-line raw string line = matched.group(1) + '""' lines_without_raw_strings.append(line) # TODO(unknown): if delimiter is not None here, we might want to # emit a warning for unterminated string. return lines_without_raw_strings
167
cpp_lint.py
Python
code/deep/BJMMD/caffe/scripts/cpp_lint.py
cc4d0564756ca067516f71718a3d135996525909
transferlearning
6
135,997
30
12
17
105
18
0
31
128
_worker_health_check
[RLlib] Refactor `WorkerSet` on top of `FaultTolerantActorManager`. (#29938) Signed-off-by: Jun Gong <jungong@anyscale.com>
https://github.com/ray-project/ray.git
def _worker_health_check(self) -> List[int]: logger.info("Health checking all workers ...") remote_results = self.__worker_manager.foreach_actor( lambda w: w.sample_with_count(), healthy_only=False, ) return [ r.actor_id for r in remote_results if not r.ok and isinstance(r.get(), RayError) ]
65
worker_set.py
Python
rllib/evaluation/worker_set.py
e707ce4fb3717e3c05118c57f503dfbd03552ca9
ray
4
8,637
6
9
2
27
4
0
6
20
auto_window_size
Enable dataset window autosizing (#2721) * set windowed shuffle for large datasets * documentation * update to automatic windowing flag * address reviews * address reviews * update logging info and add auto_window flag passthrough * update tests to use flag passthrough * more descriptive test class name * todo to add link to windowing docs * local test handling for dask import * handle RayDataset import in local tests * bad type annotation * bad type annotation
https://github.com/ludwig-ai/ludwig.git
def auto_window_size(self): return int(self.object_store_size // 5)
13
test_ray.py
Python
tests/integration_tests/test_ray.py
0d19a48cff0958ed77926a0712cbdb6485d4034a
ludwig
1
38,306
85
13
28
176
23
0
130
382
test_diverse_beam_search
Black preview (#17217) * Black preview * Fixup too! * Fix check copies * Use the same version as the CI * Bump black
https://github.com/huggingface/transformers.git
def test_diverse_beam_search(self): article = bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = bart_model.generate( input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0, remove_invalid_values=True, ) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the" " middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle" " name, as well as his father's first. It is the first baby for both of them.", "Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the" " first child for both. The couple announced the pregnancy in January. The name Silas is the middle" " name of Timberlake's maternal grandfather. It's also his own middle name.", ], )
107
test_generation_utils.py
Python
tests/generation/test_generation_utils.py
afe5d42d8d1d80af911ed980c2936bfe887078f6
transformers
1
96,684
45
14
22
195
20
0
58
325
test_sessions_metrics_equal_num_keys
fix(metrics): Zero-fill response when there's no data [INGEST-941] (#32157) When there isn't any metrics data, the `groups` of the response is empty. However, the absence of data must be represented with an appropriate value. For example, `count_unique(user)` must return `0` when there aren't any users, instead of returning no data. The value representing the absence of data is `0` for sums and counts, and `None` for everything else (such as `p50`).
https://github.com/getsentry/sentry.git
def test_sessions_metrics_equal_num_keys(self): empty_groupbyes = ["project", "release", "environment", "session.status"] interval_days = "1d" for groupby in empty_groupbyes: with patch( "sentry.api.endpoints.organization_sessions.release_health", SessionsReleaseHealthBackend(), ): sessions_data = result_sorted(self.get_sessions_data(groupby, interval_days)) with patch( "sentry.release_health.metrics_sessions_v2.indexer.resolve", MockIndexer().resolve ), patch( "sentry.api.endpoints.organization_sessions.release_health", MetricsReleaseHealthBackend(), ): metrics_data = result_sorted(self.get_sessions_data(groupby, interval_days)) errors = compare_results( sessions=sessions_data, metrics=metrics_data, rollup=interval_days * 24 * 60 * 60, # days to seconds ) assert len(errors) == 0
114
test_metrics_sessions_v2.py
Python
tests/sentry/release_health/test_metrics_sessions_v2.py
cfdb7fdc1fef7f8a364bbfef050cdcfc66c82371
sentry
2
128,542
44
16
20
374
21
0
75
271
test_recurrent_unroll_and_filter
[RLlib] Add torch models (#29043) 1. converted class attributes to setters 2. use override decorator 3. SimpleModel should not have any T dimension, it can confuse folks. So I removed it. 4. I merged all the unittests under one class and separated them by methods names. It will be easier to use -k filter to run pytests later if we don't allow repetative method names. Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Steven Morad <smorad@anyscale.com>
https://github.com/ray-project/ray.git
def test_recurrent_unroll_and_filter(self): inputs = TensorDict( { "in": torch.arange(B * T * 2).reshape(B, T, 2), "bork": torch.arange(5 * 4).reshape(5, 4), } ) states = TensorDict( { "in": torch.arange(B * 4).reshape(B, 4), "bork": torch.arange(5 * 4).reshape(5, 4), } ) outputs, out_states = SimpleRecurrentModel(ModelConfig()).unroll(inputs, states) desired = TensorDict({"out": torch.arange(B * T * 3).reshape(B, T, 3)}) desired_states = TensorDict({"out": torch.arange(B * 5).reshape(B, 5)}) for k in outputs.flatten().keys() | desired.flatten().keys(): check(outputs[k], desired[k]) for k in out_states.flatten().keys() | desired_states.flatten().keys(): check(out_states[k], desired_states[k])
235
test_torch_model.py
Python
rllib/models/tests/test_torch_model.py
b0945548e874642287b81514b71432a2330de1d3
ray
3
299,250
17
10
6
63
7
0
20
66
_async_validate_input
Sabnzbd config flow (#68138) Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
https://github.com/home-assistant/core.git
async def _async_validate_input(self, user_input): errors = {} sab_api = await get_client(self.hass, user_input) if not sab_api: errors["base"] = "cannot_connect" return errors
35
config_flow.py
Python
homeassistant/components/sabnzbd/config_flow.py
3f5027834b2334eaf5eaab7884376b3cfb089a63
core
2
100,516
33
14
17
247
24
0
39
396
_crop_source_faces
bugfix: Preview Tool, ensure all config items are written
https://github.com/deepfakes/faceswap.git
def _crop_source_faces(self): logger.debug("Updating source faces") self._faces = {} for image in self.source: detected_face = image["detected_faces"][0] src_img = image["image"] detected_face.load_aligned(src_img, size=self._size, centering=self._centering) matrix = detected_face.aligned.matrix self._faces.setdefault("filenames", []).append(os.path.splitext(image["filename"])[0]) self._faces.setdefault("matrix", []).append(matrix) self._faces.setdefault("src", []).append(transform_image(src_img, matrix, self._size, self._padding)) self.update_source = False logger.debug("Updated source faces")
150
preview.py
Python
tools/preview/preview.py
71c20252c2e747f692289cdefe80ad0d5a456ea6
faceswap
2
258,754
65
12
65
357
47
0
92
380
fit
DOC Clarify splitting behavior of `fit_params` (#22379) Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Tom Augspurger <tom.w.augspurger@gmail.com> Co-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, y=None, *, groups=None, **fit_params): estimator = self.estimator refit_metric = "score" if callable(self.scoring): scorers = self.scoring elif self.scoring is None or isinstance(self.scoring, str): scorers = check_scoring(self.estimator, self.scoring) else: scorers = _check_multimetric_scoring(self.estimator, self.scoring) self._check_refit_for_multimetric(scorers) refit_metric = self.refit X, y, groups = indexable(X, y, groups) fit_params = _check_fit_params(X, fit_params) cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator)) n_splits = cv_orig.get_n_splits(X, y, groups) base_estimator = clone(self.estimator) parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch) fit_and_score_kwargs = dict( scorer=scorers, fit_params=fit_params, return_train_score=self.return_train_score, return_n_test_samples=True, return_times=True, return_parameters=False, error_score=self.error_score, verbose=self.verbose, ) results = {} with parallel: all_candidate_params = [] all_out = [] all_more_results = defaultdict(list)
459
_search.py
Python
sklearn/model_selection/_search.py
bdb7db524a5cb584d975b3bb1823494bcd5eb92f
scikit-learn
12
37,626
33
10
5
119
12
0
39
53
corners_to_center_format
Add YOLOS (#16848) * First draft * Add YolosForObjectDetection * Make forward pass work * Add mid position embeddings * Add interpolation of position encodings * Add expected values * Add YOLOS to tests * Add integration test * Support tiny model as well * Support all models in conversion script * Remove mid_pe_size attribute * Make more tests pass * Add model to README and fix config * Add copied from statements * Rename base_model_prefix to vit * Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP * Apply suggestions from code review * Apply more suggestions from code review * Convert remaining checkpoints * Improve docstrings * Add YolosFeatureExtractor * Add feature extractor to docs * Add corresponding tests * Fix style * Fix docs * Apply suggestion from code review * Fix bad rebase * Fix some more bad rebase * Fix missing character * Improve docs and variable names Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
https://github.com/huggingface/transformers.git
def corners_to_center_format(x): x_transposed = x.T x0, y0, x1, y1 = x_transposed[0], x_transposed[1], x_transposed[2], x_transposed[3] b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] return np.stack(b, axis=-1) # Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes
81
feature_extraction_yolos.py
Python
src/transformers/models/yolos/feature_extraction_yolos.py
1ac698744c4dbdf1495d303246d08ffacdf4f5b8
transformers
1
145,261
25
8
5
83
12
1
32
51
test_usage_lib_cluster_metadata_generation_usage_disabled
[Usage Stats] Implement usage stats report "Turned off by default". (#22249) This is the second PR to implement usage stats on Ray. Please refer to the file usage_lib.py for more details. The full specification is here https://docs.google.com/document/d/1ZT-l9YbGHh-iWRUC91jS-ssQ5Qe2UQ43Lsoc1edCalc/edit#heading=h.17dss3b9evbj. This adds a dashboard module to enable usage stats. **Usage stats report is turned off by default** after this PR. We can control the report (enablement, report period, and URL. Note that URL is strictly for testing) using the env variable. ## NOTE This requires us to add `requests` to the default library. `requests` must be okay to be included because 1. it is extremely lightweight. It is implemented only with built-in libs. 2. It is really stable. The project basically claims they are "deprecated", meaning no new features will be added there. cc @edoakes @richardliaw for the approval For the HTTP request, I was alternatively considered httpx, but it was not as lightweight as `requests`. So I decided to implement async requests using the thread pool.
https://github.com/ray-project/ray.git
def test_usage_lib_cluster_metadata_generation_usage_disabled(shutdown_only): meta = ray_usage_lib._generate_cluster_metadata() assert "ray_version" in meta assert "python_version" in meta assert len(meta) == 2 @pytest.mark.skipif( sys.platform == "win32", reason="Test depends on runtime env feature not supported on Windows.", )
@pytest.mark.skipif( sys.platform == "win32", reason="Test depends on runtime env feature not supported on Windows.", )
28
test_usage_stats.py
Python
python/ray/tests/test_usage_stats.py
36a31cb6fde95d490c81c6de5d9f911b4cac8af2
ray
1
194,923
9
8
7
41
7
0
10
38
get_additional_agent_args
SeeKeR (#4447) * seeker * todo * readme updates; add test * small config changes * various updates * readme fix * model card * add arxiv link * surround spacy with try catch * more protected * more protection of imports * lint
https://github.com/facebookresearch/ParlAI.git
def get_additional_agent_args(cls) -> ParlaiParser: additional_agent_parser = SeekerAgent.get_additional_agent_args() GPT2ComboAgent.add_cmdline_args(additional_agent_parser) return additional_agent_parser
23
gpt2_seeker.py
Python
projects/seeker/agents/gpt2_seeker.py
7e453008fde751aff0cfd752662e19fe2adc7410
ParlAI
1
268,886
32
11
11
123
19
1
36
68
deserialize
Reorganize `advanced_activations.py` into smaller logically organized files hosted under an `activation` directory. PiperOrigin-RevId: 425465077
https://github.com/keras-team/keras.git
def deserialize(name, custom_objects=None): globs = globals() # only replace missing activations activation_globs = activation_layers.get_globals() for key, val in activation_globs.items(): if key not in globs: globs[key] = val return deserialize_keras_object( name, module_objects=globs, custom_objects=custom_objects, printable_module_name='activation function') @keras_export('keras.activations.get') @tf.__internal__.dispatch.add_dispatch_support
@keras_export('keras.activations.get') @tf.__internal__.dispatch.add_dispatch_support
62
activations.py
Python
keras/activations.py
c06d3d133469eb8adab5ee89a30dd8659865d303
keras
3
283,073
8
9
4
30
5
0
9
18
get_app_id
Logging : refactor log generation/collection (#1538) * Update only_send_file_size * Updating only_send_file_size feature * Updating only_send_file_size feature * Updating only_send_file_size feature * Updating settings_controller * Update settings_controller : help message * Data Collection : move boto3 as discord dependencies * Update dependencies * Refactoring log collection * Refactoring logging * Logging : add rolling clock * Logging : enable daemon for logging clock * Logging : fix linting and type * Logging : update linting * Logging : add comment * Logging : remove empty log files * Logging : adding PII filter * Logging removing : unlink * Logging : terminal.py * Logging : linting * Logging : linting * minor changes * Logging : update * Logging : linting * added userId * userId NA Co-authored-by: LBolte29 <lbolte@gmx.net>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def get_app_id() -> str: app_id = get_log_dir().stem return app_id
16
loggers.py
Python
gamestonk_terminal/loggers.py
56a9d76b805157e80aff7e8d8b0b5ca27875b107
OpenBBTerminal
1
33,479
19
13
5
72
11
1
23
53
_reorder_cache
add task_type_id to BERT to support ERNIE-2.0 and ERNIE-3.0 models (#18686) * add_ernie * remove Tokenizer in ernie * polish code * format code style * polish code * fix style * update doc * make fix-copies * change model name * change model name * fix dependency * add more copied from * rename ErnieLMHeadModel to ErnieForCausalLM do not expose ErnieLayer update doc * fix * make style * polish code * polish code * fix * fix * fix * fix * fix * final fix Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
https://github.com/huggingface/transformers.git
def _reorder_cache(self, past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past @add_start_docstrings(, ERNIE_START_DOCSTRING)
@add_start_docstrings("""Ernie Model with a `language modeling` head on top.""", ERNIE_START_DOCSTRING)
40
modeling_ernie.py
Python
src/transformers/models/ernie/modeling_ernie.py
22f72185601d5167a747104b4aca102d0e92524c
transformers
3
213,070
13
9
4
42
4
0
14
46
setdefault
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <4760060+hawflau@users.noreply.github.com>
https://github.com/aws/serverless-application-model.git
def setdefault(self, key, default): if key not in self: self[key] = default return self[key]
27
py27hash_fix.py
Python
samtranslator/utils/py27hash_fix.py
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
serverless-application-model
2
271,198
11
10
4
50
6
0
12
44
on_epoch_end
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def on_epoch_end(self): self._current_epoch += 1 if self._current_epoch < self._epochs: self._current_batch_size = self._linearly_increasing_batch_size()
29
data_adapter_test.py
Python
keras/engine/data_adapter_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
42,731
36
13
21
135
16
0
48
167
print_warning_if_setup_changed
Replace generation of docker volumes to be done from python (#23985) The pre-commit to generate docker volumes in docker compose file is now written in Python and it also uses the newer "volume:" syntax to define the volumes mounted in the docker-compose.
https://github.com/apache/airflow.git
def print_warning_if_setup_changed() -> bool: try: package_hash = get_package_setup_metadata_hash() except ModuleNotFoundError as e: if "importlib_metadata" in e.msg: return False sources_hash = get_installation_sources_config_metadata_hash() if sources_hash != package_hash: installation_sources = get_installation_airflow_sources() if installation_sources is not None: breeze_sources = installation_sources / "dev" / "breeze" warn_dependencies_changed() set_forced_answer_for_upgrade_check() ask_to_reinstall_breeze(breeze_sources) set_forced_answer(None) return True return False
74
path_utils.py
Python
dev/breeze/src/airflow_breeze/utils/path_utils.py
882535a8a2699af7d1d079ecebd8c31aa7fbaba9
airflow
5
247,541
36
11
17
139
13
0
38
175
test_blacklisted_ip_specific
Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke <clokep@users.noreply.github.com>
https://github.com/matrix-org/synapse.git
def test_blacklisted_ip_specific(self) -> None: self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")] channel = self.make_request( "GET", "preview_url?url=http://example.com", shorthand=False ) # No requests made. self.assertEqual(len(self.reactor.tcpClients), 0) self.assertEqual(channel.code, 502) self.assertEqual( channel.json_body, { "errcode": "M_UNKNOWN", "error": "DNS resolution failure during URL preview generation", }, )
81
test_url_preview.py
Python
tests/rest/media/v1/test_url_preview.py
32c828d0f760492711a98b11376e229d795fd1b3
synapse
1
260,403
172
15
77
766
65
1
284
716
glm_dataset
TST tight tests for GLMs (#23619) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
https://github.com/scikit-learn/scikit-learn.git
def glm_dataset(global_random_seed, request): data_type, model = request.param # Make larger dim more than double as big as the smaller one. # This helps when constructing singular matrices like (X, X). if data_type == "long": n_samples, n_features = 12, 4 else: n_samples, n_features = 4, 12 k = min(n_samples, n_features) rng = np.random.RandomState(global_random_seed) X = make_low_rank_matrix( n_samples=n_samples, n_features=n_features, effective_rank=k, tail_strength=0.1, random_state=rng, ) X[:, -1] = 1 # last columns acts as intercept U, s, Vt = linalg.svd(X, full_matrices=False) assert np.all(s > 1e-3) # to be sure assert np.max(s) / np.min(s) < 100 # condition number of X if data_type == "long": coef_unpenalized = rng.uniform(low=1, high=3, size=n_features) coef_unpenalized *= rng.choice([-1, 1], size=n_features) raw_prediction = X @ coef_unpenalized else: raw_prediction = rng.uniform(low=-3, high=3, size=n_samples) # minimum norm solution min ||w||_2 such that raw_prediction = X w: # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True) sw = np.full(shape=n_samples, fill_value=1 / n_samples) y = linear_loss.base_loss.link.inverse(raw_prediction) # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with # optimizer. Note that the problem is well conditioned such that we get accurate # results. l2_reg_strength = 1 fun = partial( linear_loss.loss, X=X[:, :-1], y=y, sample_weight=sw, l2_reg_strength=l2_reg_strength, ) grad = partial( linear_loss.gradient, X=X[:, :-1], y=y, sample_weight=sw, l2_reg_strength=l2_reg_strength, ) coef_penalized_with_intercept = _special_minimize( fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14 ) linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False) fun = partial( linear_loss.loss, X=X[:, :-1], y=y, sample_weight=sw, l2_reg_strength=l2_reg_strength, ) grad = partial( linear_loss.gradient, X=X[:, :-1], y=y, sample_weight=sw, l2_reg_strength=l2_reg_strength, ) coef_penalized_without_intercept = _special_minimize( fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14 ) # To be sure assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm( coef_unpenalized ) return ( model, X, y, coef_unpenalized, coef_penalized_with_intercept, coef_penalized_without_intercept, l2_reg_strength, ) @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [False, True])
495
test_glm.py
Python
sklearn/linear_model/_glm/tests/test_glm.py
9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f
scikit-learn
3
180,769
58
20
25
308
17
0
85
378
_get_dataset_features_info
Add a flagging callback to save json files to a hugging face dataset (#1821) * work on saving flags in JSON format * explained what I did more clearly * final updates + added test case * reviews to flagging.py for HuggingFaceDatasetJSONSaver * formatted imports * used uuid for random ids * used uuid for random + function to get dataset infos * reformmated flagging.py * fix examples test * formatting * async examples * working on mix * comment out failing test * fixed interface problem * final updates to HuggingFaceDatasetJSONSaver flagging.py * final updates to HuggingFaceDatasetJSONSaver flagging.py * formatting * some tweaks * tweaks * tweaks * omar's fixes * added back test.init * restored test init Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
https://github.com/gradio-app/gradio.git
def _get_dataset_features_info(is_new, components): infos = {"flagged": {"features": {}}} # File previews for certain input and output types file_preview_types = {gr.Audio: "Audio", gr.Image: "Image"} headers = [] # Generate the headers and dataset_infos if is_new: for component in components: headers.append(component.label) infos["flagged"]["features"][component.label] = { "dtype": "string", "_type": "Value", } if isinstance(component, tuple(file_preview_types)): headers.append(component.label + " file") for _component, _type in file_preview_types.items(): if isinstance(component, _component): infos["flagged"]["features"][component.label + " file"] = { "_type": _type } break headers.append("flag") infos["flagged"]["features"]["flag"] = { "dtype": "string", "_type": "Value", } return infos, file_preview_types, headers
172
flagging.py
Python
gradio/flagging.py
9c4dc6c183b418eb82d6b062eeb0feef9cd52740
gradio
6
292,710
4
6
2
19
3
0
4
18
media_image_hash
Correctly handle missing mpd albumart (#66771) Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
https://github.com/home-assistant/core.git
def media_image_hash(self): return self._media_image_hash
10
media_player.py
Python
homeassistant/components/mpd/media_player.py
facf22c2ddccbf9b205a2d8b26da457330b53ba6
core
1
82,298
13
13
27
69
14
0
16
51
test_processors
Enabled isort workflow (#7200) * Ran isort * Enabled isort workflow Co-authored-by: Vinit Kumar <mail@vinitkumar.me>
https://github.com/django-cms/django-cms.git
def test_processors(self): from djangocms_text_ckeditor.cms_plugins import TextPlugin from cms.plugin_pool import plugin_pool instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0] load_from_string = self.load_template_from_string
169
test_rendering.py
Python
cms/tests/test_rendering.py
a3110e1ff24085373898c7d2a85f628abeb8518d
django-cms
1
118,873
32
12
9
122
17
0
39
83
get_current_version
[STLT-24] 🔄 (CircleCI RC Pipeline) Add release candidate pipeline (#4425)
https://github.com/streamlit/streamlit.git
def get_current_version(): filename = os.path.join(BASE_DIR, "lib/setup.py") regex = r"(?P<pre>.*VERSION = \")(.*)(?P<post>\" # PEP-440$)" pattern = re.compile(regex) for line in fileinput.input(filename): match = pattern.match(line.rstrip()) if match: return match.groups()[1] raise Exception('Did not find regex "%s" for version in setup.py' % (regex))
72
get_prerelease_version.py
Python
scripts/get_prerelease_version.py
967fb9b856b94ee22f7d125139f8b8536cc75e99
streamlit
3
297,732
28
12
13
122
15
1
30
157
_data_to_save
Add aliases to area registry items (#84294) * Add aliases to area registry items * Update test * Fix WS API
https://github.com/home-assistant/core.git
def _data_to_save(self) -> dict[str, list[dict[str, Any]]]: data = {} data["areas"] = [ { "aliases": list(entry.aliases), "name": entry.name, "id": entry.id, "picture": entry.picture, } for entry in self.areas.values() ] return data @callback
@callback
73
area_registry.py
Python
homeassistant/helpers/area_registry.py
1a42bd5c4cb51ffbfcaf8d5389b80a228712ac81
core
2
266,515
37
11
18
212
20
0
63
221
_create_role_list
updated metadata dump to do full docs dump (#76170) * minor refactor in other options by pushing common code into functions * consolidate coll_filter * more normalizing loader * dont pass plugin_loader, its global import * Also dump roles and collections * adjusted tests to new err msg * disable namespace filter (unused)
https://github.com/ansible/ansible.git
def _create_role_list(self): roles_path = self._get_roles_path() collection_filter = self._get_collection_filter() if not collection_filter: roles = self._find_all_normal_roles(roles_path) else: roles = [] collroles = self._find_all_collection_roles(collection_filter=collection_filter) result = {} for role, role_path in roles: argspec = self._load_argspec(role, role_path=role_path) fqcn, summary = self._build_summary(role, '', argspec) result[fqcn] = summary for role, collection, collection_path in collroles: argspec = self._load_argspec(role, collection_path=collection_path) fqcn, summary = self._build_summary(role, collection, argspec) result[fqcn] = summary return result
134
doc.py
Python
lib/ansible/cli/doc.py
29b5eb6ba9fb652ddd8dd06cdd8f2e80e2098063
ansible
4
100,464
29
14
16
283
16
0
58
194
decoder
Update all Keras Imports to be conditional (#1214) * Remove custom keras importer * first round keras imports fix * launcher.py: Remove KerasFinder references * 2nd round keras imports update (lib and extract) * 3rd round keras imports update (train) * remove KerasFinder from tests * 4th round keras imports update (tests)
https://github.com/deepfakes/faceswap.git
def decoder(self, side): input_ = Input(shape=(8, 8, 512)) var_x = input_ var_x = UpscaleBlock(256, activation="leakyrelu")(var_x) var_x = UpscaleBlock(128, activation="leakyrelu")(var_x) var_x = UpscaleBlock(64, activation="leakyrelu")(var_x) var_x = Conv2DOutput(3, 5, name=f"face_out_{side}")(var_x) outputs = [var_x] if self.learn_mask: var_y = input_ var_y = UpscaleBlock(256, activation="leakyrelu")(var_y) var_y = UpscaleBlock(128, activation="leakyrelu")(var_y) var_y = UpscaleBlock(64, activation="leakyrelu")(var_y) var_y = Conv2DOutput(1, 5, name=f"mask_out_{side}")(var_y) outputs.append(var_y) return KerasModel(input_, outputs=outputs, name=f"decoder_{side}")
168
original.py
Python
plugins/train/model/original.py
aa39234538a8f83e6aa2b60b8275a570e8876ac2
faceswap
2
37,492
7
10
2
37
5
0
7
13
require_tokenizers
Update all require decorators to use skipUnless when possible (#16999)
https://github.com/huggingface/transformers.git
def require_tokenizers(test_case): return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
20
testing_utils.py
Python
src/transformers/testing_utils.py
57e6464ac9a31156f1c93e59107323e6ec01309e
transformers
1
47,528
31
11
19
222
33
0
43
180
test_execute_task_instances_backfill_tasks_wont_execute
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
https://github.com/apache/airflow.git
def test_execute_task_instances_backfill_tasks_wont_execute(self, dag_maker): dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute' task_id_1 = 'dummy_task' with dag_maker(dag_id=dag_id): task1 = EmptyOperator(task_id=task_id_1) self.scheduler_job = SchedulerJob(subdir=os.devnull) session = settings.Session() dr1 = dag_maker.create_dagrun(run_type=DagRunType.BACKFILL_JOB) ti1 = TaskInstance(task1, run_id=dr1.run_id) ti1.refresh_from_db() ti1.state = State.SCHEDULED session.merge(ti1) session.flush() assert dr1.is_backfill self.scheduler_job._critical_section_execute_task_instances(session) session.flush() ti1.refresh_from_db() assert State.SCHEDULED == ti1.state session.rollback()
131
test_scheduler_job.py
Python
tests/jobs/test_scheduler_job.py
49e336ae0302b386a2f47269a6d13988382d975f
airflow
1
275,776
17
10
7
82
11
1
18
74
to_json
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def to_json(self, **kwargs): config = self.get_config() timeseries_generator_config = { "class_name": self.__class__.__name__, "config": config, } return json.dumps(timeseries_generator_config, **kwargs) @keras_export("keras.preprocessing.sequence.make_sampling_table")
@keras_export("keras.preprocessing.sequence.make_sampling_table")
42
sequence.py
Python
keras/preprocessing/sequence.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
176,193
63
13
15
203
26
1
79
146
laplacian_matrix
Use scipy.sparse array datastructure (#5139) * Step 1: use sparse arrays in nx.to_scipy_sparse_matrix. Seems like a reasonable place to start. nx.to_scipy_sparse_matrix is one of the primary interfaces to scipy.sparse from within NetworkX. * 1: Use np.outer instead of mult col/row vectors Fix two instances in modularitymatrix where a new 2D array was being created via an outer product of two \"vectors\". In the matrix case, this was a row vector \* a column vector. In the array case this can be disambiguated by being explicit with np.outer. * Update _transition_matrix in laplacianmatrix module - A few instances of matrix multiplication operator - Add np.newaxis + transpose to get shape right for broadcasting - Explicitly convert e.g. sp.sparse.spdiags to a csr_array. * Update directed_combinitorial_laplacian w/ sparse array. - Wrap spdiags in csr_array and update matmul operators. * Rm matrix-specific code from lgc and hmn modules - Replace .A call with appropriate array semantics - wrap sparse.diags in csr_array. * Change hits to use sparse array semantics. - Replace * with @ - Remove superfluous calls to flatten. * Update sparse matrix usage in layout module. - Simplify lil.getrowview call - Wrap spdiags in csr_array. * lil_matrix -> lil_array in graphmatrix.py. * WIP: Start working on algebraic connectivity module. * Incorporate auth mat varname feedback. * Revert 1D slice and comment for 1D sparse future. * Add TODOs: rm csr_array wrapper around spdiags etc. * WIP: cleanup algebraicconn: tracemin_fiedler. * Typo. * Finish reviewing algebraicconnectivity. * Convert bethe_hessian matrix to use sparse arrays. * WIP: update laplacian. Update undirected laplacian functions. * WIP: laplacian - add comment about _transition_matrix return types. * Finish laplacianmatrix review. * Update attrmatrix. * Switch to official laplacian function. * Update pagerank to use sparse array. * Switch bipartite matrix to sparse arrays. * Check from_scipy_sparse_matrix works with arrays. Modifies test suite. * Apply changes from review. * Fix failing docstring tests. * Fix missing axis for in-place multiplication. * Use scipy==1.8rc2 * Use matrix multiplication * Fix PyPy CI * [MRG] Create plot_subgraphs.py example (#5165) * Create plot_subgraphs.py https://github.com/networkx/networkx/issues/4220 * Update plot_subgraphs.py black * Update plot_subgraphs.py lint plus font_size * Update plot_subgraphs.py added more plots * Update plot_subgraphs.py removed plots from the unit test and added comments * Update plot_subgraphs.py lint * Update plot_subgraphs.py typos fixed * Update plot_subgraphs.py added nodes to the plot of the edges removed that was commented out for whatever reason * Update plot_subgraphs.py revert the latest commit - the line was commented out for a reason - it's broken * Update plot_subgraphs.py fixed node color issue * Update plot_subgraphs.py format fix * Update plot_subgraphs.py forgot to draw the nodes... now fixed * Fix sphinx warnings about heading length. * Update examples/algorithms/plot_subgraphs.py * Update examples/algorithms/plot_subgraphs.py Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> Co-authored-by: Dan Schult <dschult@colgate.edu> * Add traveling salesman problem to example gallery (#4874) Adds an example of the using Christofides to solve the TSP problem to the example galery. Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037) * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() * Resolved Requested Changes * Revert changes to degree docstrings. * Update comments in example. * Apply wording to edges method in all graph classes. Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> * Compatibility updates from testing with numpy/scipy/pytest rc's (#5226) * Rm deprecated scipy subpkg access. * Use recwarn fixture in place of deprecated pytest pattern. * Rm unnecessary try/except from tests. * Replace internal `close` fn with `math.isclose`. (#5224) * Replace internal close fn with math.isclose. * Fix lines in docstring examples. * Fix Python 3.10 deprecation warning w/ int div. (#5231) * Touchups and suggestions for subgraph gallery example (#5225) * Simplify construction of G with edges rm'd * Rm unused graph attribute. * Shorten categorization by node type. * Simplify node coloring. * Simplify isomorphism check. * Rm unit test. * Rm redundant plotting of each subgraph. * Use new package name (#5234) * Allowing None edges in weight function of bidirectional Dijkstra (#5232) * added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None. * changed syntax for better readability and code duplicate avoidance Co-authored-by: Hohmann, Nikolas <nikolas.hohmann@tu-darmstadt.de> * Add an FAQ about assigning issues. (#5182) * Add FAQ about assigning issues. * Add note about linking issues from new PRs. * Update dev deps (#5243) * Update minor doc issues with tex notation (#5244) * Add FutureWarnings to fns that return sparse matrices - biadjacency_matrix. - bethe_hessian_matrix. - incidence_matrix. - laplacian functions. - modularity_matrix functions. - adjacency_matrix. * Add to_scipy_sparse_array and use it everywhere. Add a new conversion function to preserve array semantics internally while not altering behavior for users. Also adds FutureWarning to to_scipy_sparse_matrix. * Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix. * Handle deprecations in separate PR. * Fix docstring examples. Co-authored-by: Mridul Seth <mail@mriduls.com> Co-authored-by: Jarrod Millman <jarrod.millman@gmail.com> Co-authored-by: Andrew Knyazev <andrew.knyazev@ucdenver.edu> Co-authored-by: Dan Schult <dschult@colgate.edu> Co-authored-by: eskountis <56514439+eskountis@users.noreply.github.com> Co-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com> Co-authored-by: NikHoh <nikhoh@web.de> Co-authored-by: Hohmann, Nikolas <nikolas.hohmann@tu-darmstadt.de> Co-authored-by: Sultan Orazbayev <contact@econpoint.com> Co-authored-by: Mridul Seth <mail@mriduls.com>
https://github.com/networkx/networkx.git
def laplacian_matrix(G, nodelist=None, weight="weight"): import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") n, m = A.shape # TODO: rm csr_array wrapper when spdiags can produce arrays D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr")) import warnings warnings.warn( "laplacian_matrix will return a scipy.sparse array instead of a matrix in Networkx 3.0.", FutureWarning, stacklevel=2, ) # TODO: rm sp.sparse.csr_matrix in version 3.0 return sp.sparse.csr_matrix(D - A) @not_implemented_for("directed")
@not_implemented_for("directed")
120
laplacianmatrix.py
Python
networkx/linalg/laplacianmatrix.py
5dfd57af2a141a013ae3753e160180b82bec9469
networkx
2
38,418
40
17
11
205
17
0
55
98
get_all_tests
Update self-push workflow (#17177) * update push ci * install git-python * update comment * update deepspeed jobs * fix report * skip 2 more tests that require fairscale * Fix changes in test_fetcher.py (to deal with `setup.py` is changed) * set RUN_PT_TF_CROSS_TESTS=1 and final clean-up * remove SIGOPT_API_TOKEN * remove echo "$matrix_folders" Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
https://github.com/huggingface/transformers.git
def get_all_tests(): test_root_dir = os.path.join(PATH_TO_TRANFORMERS, "tests") # test folders/files directly under `tests` folder tests = os.listdir(test_root_dir) tests = sorted( list(filter(lambda x: os.path.isdir(x) or x.startswith("tests/test_"), [f"tests/{x}" for x in tests])) ) # model specific test folders model_tests_folders = os.listdir(os.path.join(test_root_dir, "models")) model_test_folders = sorted(list(filter(os.path.isdir, [f"tests/models/{x}" for x in model_tests_folders]))) tests.remove("tests/models") tests = model_test_folders + tests return tests
118
tests_fetcher.py
Python
utils/tests_fetcher.py
38043d8453b82a9c712f8d5c98323150fbee7503
transformers
4
10,922
75
10
29
171
18
0
110
276
mixin_base_deployment_parser
refactor: rename pod to deployment (#4230) * refactor: rename pod to deployment * style: fix overload and cli autocomplete * fix: undo daemon mistake * refactor: leftover cleanup * fix: more test fixes * fix: more fixes * fix: more fixes * fix: more fixes * fix: more tests * fix: fix more tests * refactor: fix more tests * refactor: more tests fixes * refactor: rename pea to pod * refactor: adjust docs * refactor: complete pea renaming * refactor: more fixes * fix: pea_type in k8s yamls * fix: adjust pod args name * refactor: rename peapods parser folder * fix: da init Co-authored-by: Jina Dev Bot <dev-bot@jina.ai>
https://github.com/jina-ai/jina.git
def mixin_base_deployment_parser(parser): gp = add_arg_group(parser, title='Deployment') gp.add_argument( '--uses-before', type=str, help='The executor attached after the Pods described by --uses, typically before sending to all ' 'shards, accepted type follows `--uses`', ) gp.add_argument( '--uses-after', type=str, help='The executor attached after the Pods described by --uses, typically used for receiving from ' 'all shards, accepted type follows `--uses`', ) gp.add_argument( '--external', action='store_true', default=False, help='The Deployment will be considered an external Deployment that has been started independently from the Flow.' 'This Deployment will not be context managed by the Flow.', ) # hidden CLI used for internal only gp.add_argument( '--deployment-role', type=DeploymentRoleType.from_string, choices=list(DeploymentRoleType), help='The role of this deployment in the flow' if _SHOW_ALL_ARGS else argparse.SUPPRESS, )
98
deployment.py
Python
jina/parsers/orchestrate/deployment.py
13edc16d806fb5d77a6849551178ccc75937f25f
jina
2
300,754
14
10
5
54
8
0
14
49
load_ignored_devices
Streamline setup of deCONZ binary sensor platform (#71820)
https://github.com/home-assistant/core.git
def load_ignored_devices(self) -> None: for add_entities, device_id in self.ignored_devices: add_entities(EventType.ADDED, device_id) self.ignored_devices.clear() # Callbacks
32
gateway.py
Python
homeassistant/components/deconz/gateway.py
007c6d22366b39c166bc4c511a145382d8fd7551
core
2
203,224
16
9
18
63
8
0
17
66
get_primary_key_column
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
https://github.com/django/django.git
def get_primary_key_column(self, cursor, table_name): cursor.execute( , [table_name], ) row = cursor.fetchone() return self.identifier_converter(row[0]) if row else None
41
introspection.py
Python
django/db/backends/oracle/introspection.py
c5cd8783825b5f6384417dac5f3889b4210b7d08
django
2
294,912
68
13
28
213
20
0
100
361
fill_out_missing_chromecast_info
Get cast type and manufacturer via http protocol (#68863)
https://github.com/home-assistant/core.git
def fill_out_missing_chromecast_info(self) -> ChromecastInfo: cast_info = self.cast_info if self.cast_info.cast_type is None or self.cast_info.manufacturer is None: # Manufacturer and cast type is not available in mDNS data, get it over http cast_info = dial.get_cast_type( cast_info, zconf=ChromeCastZeroconf.get_zeroconf(), ) if not self.is_audio_group or self.is_dynamic_group is not None: # We have all information, no need to check HTTP API. return ChromecastInfo(cast_info=cast_info) # Fill out missing group information via HTTP API. is_dynamic_group = False http_group_status = None http_group_status = dial.get_multizone_status( None, services=self.cast_info.services, zconf=ChromeCastZeroconf.get_zeroconf(), ) if http_group_status is not None: is_dynamic_group = any( g.uuid == self.cast_info.uuid for g in http_group_status.dynamic_groups ) return ChromecastInfo( cast_info=cast_info, is_dynamic_group=is_dynamic_group, )
136
helpers.py
Python
homeassistant/components/cast/helpers.py
cf39a61aa125c77a5a05ac48292467fcdccb2f12
core
7
60,252
27
11
6
81
9
0
29
95
set_transpose
Balanced joint maximum mean discrepancy for deep transfer learning
https://github.com/jindongwang/transferlearning.git
def set_transpose(self, in_, order): self.__check_input(in_) if len(order) != len(self.inputs[in_]) - 1: raise Exception('Transpose order needs to have the same number of ' 'dimensions as the input.') self.transpose[in_] = order
48
io.py
Python
code/deep/BJMMD/caffe/python/caffe/io.py
cc4d0564756ca067516f71718a3d135996525909
transferlearning
2
186,687
37
13
8
93
13
0
46
140
update_includes
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <ferrand.ad@gmail.com>
https://github.com/certbot/certbot.git
def update_includes(self) -> None: # Find_dir iterates over configuration for Include and IncludeOptional # directives to make sure we see the full include tree present in the # configuration files _ = self.find_dir("Include") matches = apache_util.parse_includes(self.configurator.options.ctl) if matches: for i in matches: if not self.parsed_in_current(i): self.parse_file(i)
53
parser.py
Python
certbot-apache/certbot_apache/_internal/parser.py
7d9e9a49005de7961e84d2a7c608db57dbab3046
certbot
4
106,057
4
6
2
19
3
0
4
18
info
Clean up Dataset and DatasetDict (#5344) * clean up docstrings * make style * apply review Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
https://github.com/huggingface/datasets.git
def info(self): return self._info
10
arrow_dataset.py
Python
src/datasets/arrow_dataset.py
cd3169f3f35afcf73a36a8276113e1881d92e5e0
datasets
1
264,370
75
15
23
282
24
0
132
312
get_selected_values
Fixes #8498: Fix display of selected content type filters in object list views
https://github.com/netbox-community/netbox.git
def get_selected_values(form, field_name): if not hasattr(form, 'cleaned_data'): form.is_valid() filter_data = form.cleaned_data.get(field_name) field = form.fields[field_name] # Non-selection field if not hasattr(field, 'choices'): return [str(filter_data)] # Model choice field if type(field.choices) is forms.models.ModelChoiceIterator: # If this is a single-choice field, wrap its value in a list if not hasattr(filter_data, '__iter__'): values = [filter_data] else: values = filter_data else: # Static selection field choices = unpack_grouped_choices(field.choices) if type(filter_data) not in (list, tuple): filter_data = [filter_data] # Ensure filter data is iterable values = [ label for value, label in choices if str(value) in filter_data or None in filter_data ] if hasattr(field, 'null_option'): # If the field has a `null_option` attribute set and it is selected, # add it to the field's grouped choices. if field.null_option is not None and None in filter_data: values.append(field.null_option) return values
169
utils.py
Python
netbox/utilities/forms/utils.py
e20ac803f306871d1397a40ed8d54b03b6748045
netbox
12
102,162
10
8
7
47
6
0
11
32
test_missing_cpp_namespace
Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950 This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33113545 Pulled By: bdhirsh fbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288
https://github.com/pytorch/pytorch.git
def test_missing_cpp_namespace(self) -> None: yaml_str = output_error = self.get_errors_from_gen_backend_stubs(yaml_str) self.assertExpectedInline(output_error, )
26
test_gen_backend_stubs.py
Python
tools/test/test_gen_backend_stubs.py
bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d
pytorch
1
78,145
17
13
6
77
10
0
17
67
get_indented_choices
Use a CollectionQuerySet method to simplify displaying indented collection lists
https://github.com/wagtail/wagtail.git
def get_indented_choices(self): min_depth = self.aggregate(models.Min("depth"))["depth__min"] or 2 return [ (collection.pk, collection.get_indented_name(min_depth, html=True)) for collection in self ]
47
collections.py
Python
wagtail/models/collections.py
7fd45a669317df14b40b9d661834d482caf911c1
wagtail
3
212,859
32
10
11
121
12
0
43
142
update
Fixed bug in Tree and Table when visible changed in the layout. Added Canvas.update method
https://github.com/PySimpleGUI/PySimpleGUI.git
def update(self, background_color=None, visible=None): if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow return if background_color not in (None, COLOR_SYSTEM_DEFAULT): self._TKCanvas.configure(background=background_color) if visible is False: self._pack_forget_save_settings() elif visible is True: self._pack_restore_settings() if visible is not None: self._visible = visible
74
PySimpleGUI.py
Python
PySimpleGUI.py
6c4dc01f4acc7396387008510f7574db9f1b6201
PySimpleGUI
6
320,781
22
10
6
84
12
0
24
82
set_pattern
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
https://github.com/qutebrowser/qutebrowser.git
def set_pattern(self, pattern): log.completion.debug("Setting completion pattern '{}'".format(pattern)) self.layoutAboutToBeChanged.emit() for cat in self._categories: # FIXME:mypy define a Protocol for set_pattern? cat.set_pattern(pattern) # type: ignore[attr-defined] self.layoutChanged.emit()
48
completionmodel.py
Python
qutebrowser/completion/models/completionmodel.py
a20bb67a878b2e68abf8268c1b0a27f018d01352
qutebrowser
2
41,392
5
7
2
26
5
0
5
19
_forward
Transition mappings->properties, leaving a few loose ends
https://github.com/mwaskom/seaborn.git
def _forward(self, values): return np.square(values)
15
properties.py
Python
seaborn/_core/properties.py
a07ef69882ed76e09a0ed43d6f3ea33780c1b2be
seaborn
1
337,586
16
14
9
108
18
0
17
56
test_tracking
Refactor tests to use accelerate launch (#373) Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
https://github.com/huggingface/accelerate.git
def test_tracking(self): with tempfile.TemporaryDirectory() as tmpdir: testargs = f.split() _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE) self.assertTrue(os.path.exists(os.path.join(tmpdir, "tracking")))
62
test_examples.py
Python
tests/test_examples.py
23c0341262bd396a3ba9265614b3818d6e08a6c1
accelerate
1
281,498
9
9
18
40
7
0
10
32
print_help
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: james <jmaslek11@gmail.com> Co-authored-by: jose-donato <zmcdonato@gmail.com>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def print_help(self): help_text = console.print(text=help_text, menu="Portfolio - Brokers - Degiro")
21
degiro_controller.py
Python
gamestonk_terminal/portfolio/brokers/degiro/degiro_controller.py
82747072c511beb1b2672846ae2ee4aec53eb562
OpenBBTerminal
1
212,818
19
10
7
80
9
0
26
91
widget_to_element
Added key and widget Element properties, new focus methods Element.get_next_focus, Element.get_previous_focus. New Window method Window.widget_to_element
https://github.com/PySimpleGUI/PySimpleGUI.git
def widget_to_element(self, widget): if self.AllKeysDict is None or len(self.AllKeysDict) == 0: return None for key, element in self.AllKeysDict.items(): if element.Widget == widget: return element return None
50
PySimpleGUI.py
Python
PySimpleGUI.py
9b814f003b0685757d76ce56ee9c98eae114d346
PySimpleGUI
5
19,953
16
12
7
63
9
0
16
37
is_pinned
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def is_pinned(self) -> bool: specifiers = self.specifier return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="}
36
req_install.py
Python
pipenv/patched/notpip/_internal/req/req_install.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
2
337,071
48
15
33
221
22
0
70
144
upsample_2d
Clean up resnet.py file (#780) * clean up resnet.py * make style and quality * minor formatting
https://github.com/huggingface/diffusers.git
def upsample_2d(hidden_states, kernel=None, factor=2, gain=1): r assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * (gain * (factor**2)) pad_value = kernel.shape[0] - factor output = upfirdn2d_native( hidden_states, kernel.to(device=hidden_states.device), up=factor, pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), ) return output
138
resnet.py
Python
src/diffusers/models/resnet.py
a73f8b725105b12a60a9b22918bda68f8b6d26c3
diffusers
4
181,839
43
13
19
139
16
0
46
239
export
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def export(self, output_file_name="", data_file_path=""): if self._optimized_pipeline is None: raise RuntimeError( "A pipeline has not yet been optimized. Please call fit() first." ) to_write = export_pipeline( self._optimized_pipeline, self.operators, self._pset, self._imputed, self._optimized_pipeline_score, self.random_state, data_file_path=data_file_path, ) if output_file_name != "": with open(output_file_name, "w") as output_file: output_file.write(to_write) else: return to_write
84
base.py
Python
tpot/base.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
3
313,273
16
11
6
67
9
1
16
38
should_retry
Improve stream robustness by always retrying worker (#66417) Improve stream robustness by always retrying in the worker on failure, rather than only when keepalive is enabled. This will make cloud cameras like nest more robust, since they have a tendency to be flaky. This is also needed to improve client side retry behavior because when the client attempts to retry, the stream token is already revoked because the worker stopped. The worker will still idle timeout if no streams are present, so it won't go on forever if no frontend is viewing the stream.
https://github.com/home-assistant/core.git
def should_retry() -> Generator[Mock, None, None]: with patch( "homeassistant.components.stream._should_retry", return_value=False ) as mock_should_retry: yield mock_should_retry @pytest.fixture(scope="package")
@pytest.fixture(scope="package")
28
conftest.py
Python
tests/components/stream/conftest.py
0a128d006f0c0de4bdfe2acda66beb8fa5731463
core
1
259,147
8
10
2
49
7
0
8
22
fit_transform
MNT Refactor KMeans and MiniBatchKMeans to inherit from a common base class (#22723) Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Julien Jerphanion <git@jjerphan.xyz>
https://github.com/scikit-learn/scikit-learn.git
def fit_transform(self, X, y=None, sample_weight=None): return self.fit(X, sample_weight=sample_weight)._transform(X)
32
_kmeans.py
Python
sklearn/cluster/_kmeans.py
6ab950ec081044a1f32c2d082772635bb56144d8
scikit-learn
1
20,836
7
8
8
40
4
0
7
28
pop
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def pop(self) -> Style: self._stack.pop() return self._stack[-1]
23
style.py
Python
pipenv/patched/notpip/_vendor/rich/style.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
1
246,790
89
15
39
438
34
0
132
557
test_context_as_admin
Replace assertEquals and friends with non-deprecated versions. (#12092)
https://github.com/matrix-org/synapse.git
def test_context_as_admin(self) -> None: # Create a room. We're not part of it. user_id = self.register_user("test", "test") user_tok = self.login("test", "test") room_id = self.helper.create_room_as(user_id, tok=user_tok) # Populate the room with events. events = [] for i in range(30): events.append( self.helper.send_event( room_id, "com.example.test", content={"index": i}, tok=user_tok ) ) # Now let's fetch the context for this room. midway = (len(events) - 1) // 2 channel = self.make_request( "GET", "/_synapse/admin/v1/rooms/%s/context/%s" % (room_id, events[midway]["event_id"]), access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual( channel.json_body["event"]["event_id"], events[midway]["event_id"] ) for found_event in channel.json_body["events_before"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]: self.assertTrue(j < midway) break else: self.fail("Event %s from events_before not found" % j) for found_event in channel.json_body["events_after"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]: self.assertTrue(j > midway) break else: self.fail("Event %s from events_after not found" % j)
261
test_room.py
Python
tests/rest/admin/test_room.py
02d708568b476f2f7716000b35c0adfa4cbd31b3
synapse
8
212,752
16
15
5
76
8
0
17
60
mapping
More demo programs updates 🤦‍♂️ wow.....I thought for sure these were checked in....
https://github.com/PySimpleGUI/PySimpleGUI.git
def mapping(func, sequence, *argc): if isinstance(sequence, list): return list(map(lambda i: func(i, *argc), sequence)) else: return func(sequence, *argc)
48
Demo_Desktop_Widget_Drive_Usage_Gauges.py
Python
DemoPrograms/Demo_Desktop_Widget_Drive_Usage_Gauges.py
430d1bc77fcdc0969a66ff370ec5e7e590423c83
PySimpleGUI
2
156,670
33
15
13
142
17
0
36
130
_register_entry_point_plugins
Feat: read entrypoints in ``dask.sizeof`` (#7688) Entry-points are only used when there is no dispatch for a given type which eliminates the cost for applications which do not need entry-point based implementations.
https://github.com/dask/dask.git
def _register_entry_point_plugins(): if sys.version_info >= (3, 10): sizeof_entry_points = importlib.metadata.entry_points(group="dask.sizeof") else: sizeof_entry_points = importlib.metadata.entry_points().get("dask.sizeof", []) for entry_point in sizeof_entry_points: registrar = entry_point.load() try: registrar(sizeof) except Exception: logger.exception( f"Failed to register sizeof entry point {entry_point.name}" ) _register_entry_point_plugins()
75
sizeof.py
Python
dask/sizeof.py
0a7c41dd1ffd84ea56e387eea51b7df1dbb81140
dask
4
269,475
9
7
2
57
10
1
9
12
minimum
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def minimum(x, y): return tf.minimum(x, y) @keras_export("keras.backend.sin") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
@keras_export("keras.backend.sin") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
17
backend.py
Python
keras/backend.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
153,818
6
11
2
42
9
0
6
20
dt_strftime
REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514) Co-authored-by: Rehan Sohail Durrani <rdurrani@berkeley.edu> Signed-off-by: jeffreykennethli <jkli@ponder.io>
https://github.com/modin-project/modin.git
def dt_strftime(self, date_format): return DateTimeDefault.register(pandas.Series.dt.strftime)(self, date_format)
26
query_compiler.py
Python
modin/core/storage_formats/base/query_compiler.py
57e29bc5d82348006c5170ef9ac0a9eedcd9acf9
modin
1
148,053
5
9
3
31
4
0
5
30
compute_first_block
[Datasets] [Out-of-Band Serialization: 1/3] Refactor `LazyBlockList`. (#23821) This PR refactors `LazyBlockList` in service of out-of-band serialization (see [mono-PR](https://github.com/ray-project/ray/pull/22616)) and is a precursor to an execution plan refactor (PR #2) and adding the actual out-of-band serialization APIs (PR #3). The following is included in this refactor: 1. `ReadTask`s are now a first-class concept, replacing calls; 2. read stage progress tracking is consolidated into `LazyBlockList._get_blocks_with_metadta()` and more of the read task complexity, e.g. the read remote function, was pushed into `LazyBlockList` to make `ray.data.read_datasource()` simpler; 3. we are a bit smarter with how we progressively launch tasks and fetch and cache metadata, including fetching the metadata for read tasks in `.iter_blocks_with_metadata()` instead of relying on the pre-read task metadata (which will be less accurate), and we also fix some small bugs in the lazy ramp-up around progressive metadata fetching. (1) is the most important item for supporting out-of-band serialization and fundamentally changes the `LazyBlockList` data model. This is required since we need to be able to reference the underlying read tasks when rewriting read stages during optimization and when serializing the lineage of the Dataset. See the [mono-PR](https://github.com/ray-project/ray/pull/22616) for more context. Other changes: 1. Changed stats actor to a global named actor singleton in order to obviate the need for serializing the actor handle with the Dataset stats; without this, we were encountering serialization failures.
https://github.com/ray-project/ray.git
def compute_first_block(self): if self._tasks: self._get_or_compute(0)
17
lazy_block_list.py
Python
python/ray/data/impl/lazy_block_list.py
efc5ac5ddfb1a83db931e015ce4697280e73a16d
ray
2
260,894
14
13
5
82
16
0
16
31
test_gpr_predict_input_not_modified
FIX Ensure that GaussianProcessRegressor predict method does not modify input (#24405)
https://github.com/scikit-learn/scikit-learn.git
def test_gpr_predict_input_not_modified(): gpr = GaussianProcessRegressor(kernel=CustomKernel()).fit(X, y) X2_copy = np.copy(X2) _, _ = gpr.predict(X2, return_std=True) assert_allclose(X2, X2_copy)
50
test_gpr.py
Python
sklearn/gaussian_process/tests/test_gpr.py
e04fb58e5d65cbb9a44840bf7c20f850d4081cb2
scikit-learn
1
76,543
7
9
4
35
3
0
8
40
classes
Move wagtail.admin.edit_handlers to wagtail.admin.panels
https://github.com/wagtail/wagtail.git
def classes(self): if self.classname: return [self.classname] return []
20
panels.py
Python
wagtail/admin/panels.py
b189ab8382cc2247129b221e80599df86acd5bb3
wagtail
2
68,939
45
14
22
183
14
0
53
37
validate_house_rent_dates
chore: keep back code to be a part of other apps / to be ported later
https://github.com/frappe/erpnext.git
def validate_house_rent_dates(doc): if not doc.rented_to_date or not doc.rented_from_date: frappe.throw(_("House rented dates required for exemption calculation")) if date_diff(doc.rented_to_date, doc.rented_from_date) < 14: frappe.throw(_("House rented dates should be atleast 15 days apart")) proofs = frappe.db.sql( , { "employee": doc.employee, "payroll_period": doc.payroll_period, "from_date": doc.rented_from_date, "to_date": doc.rented_to_date, }, ) if proofs: frappe.throw(_("House rent paid days overlapping with {0}").format(proofs[0][0]))
109
utils.py
Python
erpnext/regional/india/utils.py
12b7e14fded587abc0f7821e3c3dfbea64498a7d
erpnext
5
150,737
54
14
14
312
22
0
203
601
calculate_reward
make base 3ac and base 5ac environments. TDQN defaults to 3AC.
https://github.com/freqtrade/freqtrade.git
def calculate_reward(self, action): if self._last_trade_tick is None: return 0. # close long if (action == Actions.Short.value or action == Actions.Neutral.value) and self._position == Positions.Long: last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) return float(np.log(current_price) - np.log(last_trade_price)) # close short if (action == Actions.Long.value or action == Actions.Neutral.value) and self._position == Positions.Short: last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) return float(np.log(last_trade_price) - np.log(current_price)) return 0. # User can inherit and customize 5 action environment # class MyRLEnv(Base5ActionRLEnv): # # def calculate_reward(self, action): # if self._last_trade_tick is None: # return 0. # # close long # if action == Actions.Long_sell.value and self._position == Positions.Long: # last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) # current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) # return float(np.log(current_price) - np.log(last_trade_price)) # if action == Actions.Long_sell.value and self._position == Positions.Long: # if self.close_trade_profit[-1] > self.profit_aim * self.rr: # last_trade_price = self.add_buy_fee(self.prices.iloc[self._last_trade_tick].open) # current_price = self.add_sell_fee(self.prices.iloc[self._current_tick].open) # return float((np.log(current_price) - np.log(last_trade_price)) * 2) # # close short # if action == Actions.Short_buy.value and self._position == Positions.Short: # last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) # current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) # return float(np.log(last_trade_price) - np.log(current_price)) # if action == Actions.Short_buy.value and self._position == Positions.Short: # if self.close_trade_profit[-1] > self.profit_aim * self.rr: # last_trade_price = self.add_sell_fee(self.prices.iloc[self._last_trade_tick].open) # current_price = self.add_buy_fee(self.prices.iloc[self._current_tick].open) # return float((np.log(last_trade_price) - np.log(current_price)) * 2) # return 0.
184
ReinforcementLearningTDQN.py
Python
freqtrade/freqai/prediction_models/ReinforcementLearningTDQN.py
926023935f52a69d7e8830843729978121b68bc3
freqtrade
8
244,263
115
17
58
715
45
0
184
984
add_params
[Feature] Support ConvNeXt (#7281) * update * update * fix init_cfg * update * update * update * update * update1 * final * update * update * fix lint * fix backbone config * update cascade_mask_rcnn * update and fix lint * update * fix DefaultOptimizerConstructor error * update * update * update * fix year * update * fix lint
https://github.com/open-mmlab/mmdetection.git
def add_params(self, params, module, **kwargs): logger = get_root_logger() parameter_groups = {} logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}') num_layers = self.paramwise_cfg.get('num_layers') + 2 decay_rate = self.paramwise_cfg.get('decay_rate') decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') logger.info('Build LearningRateDecayOptimizerConstructor ' f'{decay_type} {decay_rate} - {num_layers}') weight_decay = self.base_wd for name, param in module.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith('.bias') or name in ( 'pos_embed', 'cls_token'): group_name = 'no_decay' this_weight_decay = 0. else: group_name = 'decay' this_weight_decay = weight_decay if 'layer_wise' in decay_type: if 'ConvNeXt' in module.backbone.__class__.__name__: layer_id = get_layer_id_for_convnext( name, self.paramwise_cfg.get('num_layers')) logger.info(f'set param {name} as id {layer_id}') else: raise NotImplementedError() elif decay_type == 'stage_wise': if 'ConvNeXt' in module.backbone.__class__.__name__: layer_id = get_stage_id_for_convnext(name, num_layers) logger.info(f'set param {name} as id {layer_id}') else: raise NotImplementedError() group_name = f'layer_{layer_id}_{group_name}' if group_name not in parameter_groups: scale = decay_rate**(num_layers - layer_id - 1) parameter_groups[group_name] = { 'weight_decay': this_weight_decay, 'params': [], 'param_names': [], 'lr_scale': scale, 'group_name': group_name, 'lr': scale * self.base_lr, } parameter_groups[group_name]['params'].append(param) parameter_groups[group_name]['param_names'].append(name) rank, _ = get_dist_info() if rank == 0: to_display = {} for key in parameter_groups: to_display[key] = { 'param_names': parameter_groups[key]['param_names'], 'lr_scale': parameter_groups[key]['lr_scale'], 'lr': parameter_groups[key]['lr'], 'weight_decay': parameter_groups[key]['weight_decay'], } logger.info(f'Param groups = {json.dumps(to_display, indent=2)}') params.extend(parameter_groups.values())
382
layer_decay_optimizer_constructor.py
Python
mmdet/core/optimizers/layer_decay_optimizer_constructor.py
1fd48f7318ac70bab6de371025c74a76c5219e1c
mmdetection
13
260,178
32
10
9
153
24
1
35
69
test_fastica_eigh_low_rank_warning
ENH Add `eigh` solver to `FastICA` (#22527) Co-authored-by: Pierre Ablin <pierreablin@gmail.com> Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com>
https://github.com/scikit-learn/scikit-learn.git
def test_fastica_eigh_low_rank_warning(global_random_seed): rng = np.random.RandomState(global_random_seed) X = make_low_rank_matrix( n_samples=10, n_features=10, random_state=rng, effective_rank=2 ) ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver="eigh") msg = "There are some small singular values" with pytest.warns(UserWarning, match=msg): ica.fit(X) @pytest.mark.parametrize("whiten_solver", ["this_should_fail", "test", 1, None])
@pytest.mark.parametrize("whiten_solver", ["this_should_fail", "test", 1, None])
73
test_fastica.py
Python
sklearn/decomposition/tests/test_fastica.py
54c150392fd3ccf28b8d843bbc41311f1da711df
scikit-learn
1
291,225
10
7
4
35
5
0
10
32
last_changed
Add type hints to template states (#82582) * Add type hints to template states * Undo rename * Remove invalid mypy issue link
https://github.com/home-assistant/core.git
def last_changed(self) -> datetime: # type: ignore[override] self._collect_state() return self._state.last_changed
19
template.py
Python
homeassistant/helpers/template.py
aa02a53ac667d08c66a536baf139993bcfe4d7d6
core
1
197,105
17
9
13
79
11
0
17
92
_collect_factor_and_dimension
Update the deprecation warnings in sympy.physics.units
https://github.com/sympy/sympy.git
def _collect_factor_and_dimension(expr, unit_system="SI"): sympy_deprecation_warning( , deprecated_since_version="1.5", active_deprecations_target="deprecated-quantity-methods", ) from sympy.physics.units import UnitSystem unit_system = UnitSystem.get_unit_system(unit_system) return unit_system._collect_factor_and_dimension(expr)
46
quantities.py
Python
sympy/physics/units/quantities.py
905eb426131ca9542a6b258462d9ae984e5b2563
sympy
1
267,051
53
12
6
126
10
0
69
140
set_attributes_from_cli
Deprecate PlayContext.verbosity (#77507) display.verbosity should be used instead
https://github.com/ansible/ansible.git
def set_attributes_from_cli(self): if context.CLIARGS.get('timeout', False): self.timeout = int(context.CLIARGS['timeout']) # From the command line. These should probably be used directly by plugins instead # For now, they are likely to be moved to FieldAttribute defaults self.private_key_file = context.CLIARGS.get('private_key_file') # Else default self._internal_verbosity = context.CLIARGS.get('verbosity') # Else default # Not every cli that uses PlayContext has these command line args so have a default self.start_at_task = context.CLIARGS.get('start_at_task', None) # Else default
69
play_context.py
Python
lib/ansible/playbook/play_context.py
494f9327734b3ef9d2b4e4f2a32d1d4c363cd400
ansible
2
46,261
8
11
4
52
7
0
9
41
full_path
Add cgroupspy to _vendor folder (#22206) This is just importing existing cgroupspy library without code modifications. In the next step we will modify the cgroupspy code to work from the new location, then we will fix it to implement Python 3.10 compatibility and finally we will change airflow to use the vendored package instead of the original package. This is part of the effort needed to implement Python 3.10 compatibility: #22050
https://github.com/apache/airflow.git
def full_path(self): if self.parent: return os.path.join(self.parent.full_path, self.name) return self.name
32
nodes.py
Python
airflow/_vendor/cgroupspy/nodes.py
68aa01936c37f9b03468cc83e3f32766ae0ba5cb
airflow
2
294,411
78
16
50
433
53
0
111
820
async_step_connect
Motion allow changing ip (#68589) Co-authored-by: J. Nick Koston <nick@koston.org>
https://github.com/home-assistant/core.git
async def async_step_connect(self, user_input=None): errors = {} if user_input is not None: key = user_input[CONF_API_KEY] multicast_interface = user_input[CONF_INTERFACE] # check socket interface if multicast_interface != DEFAULT_INTERFACE: motion_multicast = AsyncMotionMulticast(interface=multicast_interface) try: await motion_multicast.Start_listen() motion_multicast.Stop_listen() except gaierror: errors[CONF_INTERFACE] = "invalid_interface" return self.async_show_form( step_id="connect", data_schema=self._config_settings, errors=errors, ) connect_gateway_class = ConnectMotionGateway(self.hass, multicast=None) if not await connect_gateway_class.async_connect_gateway(self._host, key): return self.async_abort(reason="connection_error") motion_gateway = connect_gateway_class.gateway_device mac_address = motion_gateway.mac await self.async_set_unique_id(mac_address) self._abort_if_unique_id_configured( updates={ CONF_HOST: self._host, CONF_API_KEY: key, CONF_INTERFACE: multicast_interface, } ) return self.async_create_entry( title=DEFAULT_GATEWAY_NAME, data={ CONF_HOST: self._host, CONF_API_KEY: key, CONF_INTERFACE: multicast_interface, }, ) (interfaces, default_interface) = await self.async_get_interfaces() self._config_settings = vol.Schema( { vol.Required(CONF_API_KEY): vol.All(str, vol.Length(min=16, max=16)), vol.Optional(CONF_INTERFACE, default=default_interface): vol.In( interfaces ), } ) return self.async_show_form( step_id="connect", data_schema=self._config_settings, errors=errors )
278
config_flow.py
Python
homeassistant/components/motion_blinds/config_flow.py
faf1f229e1117090df69c0af526c1320f2e495c3
core
5