patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -36,15 +36,15 @@ import static org.junit.Assert.*; public class ComputerLauncherTest { - @Test public void jdk7() throws IOException { + @Test(expected=IOException.class) public void jdk7() throws IOException { assertChecked("java version \"1.7.0_05\"\nJava(TM) SE Runtime Environment (build 1.7.0_05-b05)\nJava HotSpot(TM) Server VM (build 23.1-b03, mixed mode)\n", "1.7.0"); } - @Test public void openJDK7() throws IOException { + @Test(expected=IOException.class) public void openJDK7() throws IOException { assertChecked("openjdk version \"1.7.0-internal\"\nOpenJDK Runtime Environment (build 1.7.0-internal-pkgsrc_2010_01_03_06_54-b00)\nOpenJDK 64-Bit Server VM (build 17.0-b04, mixed mode)\n", "1.7.0"); } - @Test public void jdk6() throws IOException { + @Test(expected=IOException.class) public void jdk6() throws IOException { assertChecked("java version \"1.6.0_33\"\nJava(TM) SE Runtime Environment (build 1.6.0_33-b03)\nJava HotSpot(TM) Server VM (build 20.8-b03, mixed mode)\n", "1.6.0"); }
[ComputerLauncherTest->[jdk7->[assertChecked],assertChecked->[assertTrue,ComputerLauncher_JavaVersionResult,checkJavaVersion,StringReader,ByteArrayOutputStream,BufferedReader,toString,contains,PrintStream],j2sdk4->[NullOutputStream,checkJavaVersion,StringReader,BufferedReader,PrintStream],openJDK7->[assertChecked],jdk6->[assertChecked],jdk5->[NullOutputStream,checkJavaVersion,StringReader,BufferedReader,PrintStream],jdk10->[assertChecked]]]
JDK 7 and 6 are supported.
Exception on a separate line would be preferable
@@ -84,13 +84,15 @@ public class HdfsConfigurationInitializer this.configurationInitializers = ImmutableSet.copyOf(requireNonNull(configurationInitializers, "configurationInitializers is null")); } - private static Configuration readConfiguration(List<String> resourcePaths) + private static Configuration readConfiguration(List<File> resourcePaths) { Configuration result = new Configuration(false); - for (String resourcePath : resourcePaths) { + for (File resourcePath : resourcePaths) { + checkArgument(resourcePath.exists(), "File does not exist: %s", resourcePath); + Configuration resourceProperties = new Configuration(false); - resourceProperties.addResource(new Path(resourcePath)); + resourceProperties.addResource(new Path(resourcePath.toString())); copy(resourceProperties, result); }
[HdfsConfigurationInitializer->[initializeConfiguration->[initializeConfiguration]]]
readConfiguration reads configuration from list of resource paths.
Make this `IllegalArgumentException`. We don't need `PrestoException` since this error isn't going to a user.
@@ -23,8 +23,8 @@ class Analytics } end - def ahoy - @ahoy ||= Rails.env.test? ? FakeAhoyTracker.new : Ahoy::Tracker.new(request: request) + def analytics + @analytics ||= Rails.env.test? ? FakeKeen : PublishAnalyticsJob end def uuid
[Analytics->[track_event->[info,merge!,track,key?],uuid->[uuid],ahoy->[test?,new],request_attributes->[user_agent,remote_ip],attr_reader,freeze]]
Returns an object with the n - th unique identifier for the request.
Rather than leak the `FakeKeen` into "production" code, what if we did a `stub_const("PublishAnalyticsJob", FakeKeen)` in tests?
@@ -94,8 +94,8 @@ import timber.log.Timber; import static com.ichi2.async.TaskManager.setLatestInstance; import static com.ichi2.libanki.Card.deepCopyCardArray; import static com.ichi2.libanki.Undoable.*; -import static com.ichi2.utils.BooleanGetter.False; -import static com.ichi2.utils.BooleanGetter.True; +import static com.ichi2.utils.BooleanGetter.FALSE; +import static com.ichi2.utils.BooleanGetter.TRUE; /** * Loading in the background, so that AnkiDroid does not look like frozen.
[CollectionTask->[getCol->[getCol],DeleteNote->[actualTask->[UndoDeleteNote]],DismissNotes->[task->[actualTask]],ExportApkg->[task->[getContext]],nonTaskUndo->[undo],ChangeDeckMulti->[actualTask->[UndoChangeDeckMulti]],PartialSearch->[doProgress->[doProgress,isCancelled,add],add->[add],isCancelled->[isCancelled]],actualDoInBackground->[task,getCol,doInBackground],DeleteNoteMulti->[actualTask->[UndoDeleteNoteMulti]],onPostExecute->[onPostExecute],DismissNote->[task->[actualTask]],MarkNoteMulti->[actualTask->[UndoMarkNoteMulti]],Undo->[task->[nonTaskUndo]],SuspendCardMulti->[actualTask->[UndoSuspendCardMulti]],RebuildCram->[task->[task]],SuspendCard->[actualTask->[UndoSuspendCard]],RescheduleRepositionReset->[actualTask->[UndoRepositionRescheduleResetCards]],SearchCards->[task->[add,isCancelled,PartialSearch]],EmptyCram->[task->[task]],onPreExecute->[onPreExecute],CheckCardSelection->[task->[isCancelled]],AnswerAndGetCard->[task->[task]],onCancelled->[onCancelled],CountModels->[task->[add,isCancelled]],onProgressUpdate->[onProgressUpdate],ConfSetSubdecks->[task->[task]],RenderBrowserQA->[task->[doProgress,add,isCancelled]],ImportReplace->[task->[doProgress,getContext,getCol]]]]
Package private for testing purposes. Cancels the task if it is finished.
That's really feel strange to see `FALSE` in all upper case. I know very well why it must be a constant and can't just be the Boolean or a boolean, but still it do not feel right to me to read it this way even if it's the right way. (And I just checked, `Boolean.FALSE` is the actual False constant)
@@ -104,6 +104,16 @@ namespace cryptonote ge_p1p1_to_p3(&A2, &tmp3); ge_p3_tobytes(&AB, &A2); } + + // a copy of rct::scalarmultKey, since we can't link to libringct to avoid circular dependencies + static void secret_key_mult_public_key(crypto::public_key & aP, const crypto::public_key &P, const crypto::secret_key &a) { + ge_p3 A; + ge_p2 R; + //CHECK_AND_ASSERT_THROW_MES_L1(ge_frombytes_vartime(&A, P.bytes) == 0, "ge_frombytes_vartime failed at "+boost::lexical_cast<std::string>(__LINE__)); + ge_frombytes_vartime(&A, (const unsigned char*)P.data); + ge_scalarmult(&R, (const unsigned char*)a.data, &A); + ge_tobytes((unsigned char*)aP.data, &R); + } } namespace cryptonote
[No CFG could be retrieved]
private static final int BLOCK_HASH_CALLED_COUNT = 0 ; - - - - - - - - - - - - - - - - - -.
That is already present. Merge error somewhere. I see there's a merge commit in your PR, it should be taken out (ie, git rebase master)
@@ -109,6 +109,11 @@ public final class PhoenixTableProperties TTL, "Number of seconds for cell TTL. HBase will automatically delete rows once the expiration time is reached.", null, + false), + stringProperty( + DATA_BLOCK_ENCODING, + "The block encoding algorithm to use for Cells in HBase blocks. Options are: NONE, PREFIX, DIFF, FAST_DIFF, ROW_INDEX_V1, and others.", + null, false)); }
[PhoenixTableProperties->[getSaltBuckets->[empty,get,requireNonNull,of],getTimeToLive->[empty,get,requireNonNull,of],getCompression->[empty,get,requireNonNull,of],getDefaultColumnFamily->[empty,get,requireNonNull,of],getMinVersions->[empty,get,requireNonNull,of],getBloomfilter->[empty,get,requireNonNull,of],getRowkeys->[of,get,requireNonNull,empty,collect,toImmutableList],getVersions->[empty,get,requireNonNull,of],getImmutableRows->[empty,get,requireNonNull,of],getDisableWal->[empty,get,requireNonNull,of],getSplitOn->[empty,get,requireNonNull,of],integerProperty,booleanProperty,stringProperty,of]]
getTableProperties - if tableProperties is null return all tableProperties.
Can we have a enum property for this like we define `STORAGE_FORMAT_PROPERTY` for HiveTableProperties
@@ -158,7 +158,7 @@ namespace Dnn.ExportImport.Components.Common #region Private Methods - private static bool AddFileToArchive(ZipArchive archive, string file, int folderOffset, string folder = null) + public static bool AddFileToArchive(ZipArchive archive, string file, int folderOffset, string folder = null) { var entryName = file.Substring(folderOffset); // Makes the name in zip based on the folder ZipArchiveEntry existingEntry;
[CompressionUtil->[AddFileToArchive->[AddFileToArchive]]]
Adds a file to the archive based on the folder and offset.
At least this could be been moved outsize the "Private Methods" region
@@ -311,6 +311,7 @@ public class DataflowElementExecutionTracker extends ElementExecutionTracker { executionCount.getKey(), (unused, total) -> { int numExecutions = executionCount.getValue(); + @SuppressWarnings("PreferJavaTimeOverload") Duration attributedSampleTime = duration.dividedBy(totalExecutionsFinal).multipliedBy(numExecutions); return total == null ? attributedSampleTime : total.plus(attributedSampleTime);
[DataflowElementExecutionTracker->[takeSample->[takeSample],ExecutionJournalWriter->[startProcessing->[ElementExecution]],ElementExecution->[toString->[toString],ElementExecution],Journal->[readUntil->[remove->[remove],next->[next],hasNext->[hasNext]],pruneUntil->[remove,next,hasNext],add->[add]],create->[DataflowElementExecutionTracker]]]
Attribute processing time.
Is this a false positive? I'm not sure what the check is expecting you to change here
@@ -22,12 +22,6 @@ module SamlIdpAuthConcern render nothing: true, status: :unauthorized end - # stores original SAMLRequest in session to continue SAML Authn flow - def store_saml_request_attributes_in_session - add_sp_metadata_to_session - session[:saml_request_url] = request.original_url - end - def add_sp_metadata_to_session session[:sp] = { loa3: loa3_requested?, logo: current_sp_metadata[:logo],
[requested_authn_context->[requested_authn_context],build_asserted_attributes->[attribute_asserter]]
Validates the nagios request.
note that this was removed *on purpose* - we are no longer keeping track of the `saml_request_url` session var and are instead using `user_return_to` to send users back to the service provider. but all session data is deleted during the `auth` request, which is what this spec is for (see `delete_branded_experience` method). That is *totally fine* because the `auth` method calls `render_template_for(saml_response, saml_request.response_url, 'SAMLResponse')` ... and `saml_request.response_url` is what sets the form action for the rendered form (which is submitted with javascript). In short: we don't care about this session var
@@ -1409,6 +1409,9 @@ func GetUserIssueStats(opts UserIssueStatsOptions) (*IssueStats, error) { if len(opts.RepoIDs) > 0 { cond = cond.And(builder.In("issue.repo_id", opts.RepoIDs)) } + if len(opts.IssueIDs) > 0 { + cond = cond.And(builder.In("issue.id", opts.IssueIDs)) + } switch opts.FilterMode { case FilterModeAll:
[DiffURL->[HTMLURL],ChangeStatus->[changeStatus,loadRepo,loadPoster],loadAttributes->[loadPullRequest,loadTotalTimes,loadRepo,loadLabels,loadAttributes,isTimetrackerEnabled,loadPoster,loadComments,loadReactions],ClearLabels->[loadPullRequest,loadRepo,clearLabels],ReplaceLabels->[removeLabel,addLabels,loadLabels],APIURL->[APIURL],isTimetrackerEnabled->[loadRepo,IsTimetrackerEnabled],clearLabels->[getLabels,removeLabel],BlockedByDependencies->[getBlockedByDependencies],changeStatus->[getLabels],PatchURL->[HTMLURL],BlockingDependencies->[getBlockingDependencies],ChangeTitle->[loadRepo],HasLabel->[hasLabel],LoadAttributes->[loadAttributes],LoadPullRequest->[loadPullRequest],apiFormat->[loadPullRequest,loadRepo,loadLabels,State,APIURL,APIFormat,loadPoster],ResolveMentionsByVisibility->[loadRepo],HTMLURL->[HTMLURL],addLabel,setupSession,LoadAttributes,loadAttributes,loadPoster]
GetIssueStats returns issue statistic information by given conditions. returns a session object that can be used to query issue stats for a user.
Be careful! There's a limit to the number of arguments an SQL statement accepts, and it depends on the engine.
@@ -47,9 +47,12 @@ var ( productVersion string vchConfig *config.VirtualContainerHostConfigSpec + + insecureRegistries []string + RegistryService *registry.Service ) -func Init(portLayerAddr, product string, config *config.VirtualContainerHostConfigSpec) error { +func Init(portLayerAddr, product string, config *config.VirtualContainerHostConfigSpec, insecureRegs []url.URL) error { _, _, err := net.SplitHostPort(portLayerAddr) if err != nil { return err
[CreateImageStore,Stop,Warn,Infof,NewTicker,SplitHostPort,WithBody,Ping,UUID,New,NewPingParamsWithContext,Errorf,ImageCache,Debugf,NewCreateImageStoreParamsWithContext,Info,TODO,Update]
Init - initializes a virtual container host object. portLayer returns an object that can be used to connect to the portlayer server.
These registries are used as insecure registries - call the arg that to make it explicit
@@ -898,7 +898,10 @@ daos_kill_server(test_arg_t *arg, const uuid_t pool_uuid, "disabled, svc->rl_nr %d)!\n", rank, arg->srv_ntgts, arg->srv_disabled_ntgts - 1, svc->rl_nr); - rc = daos_mgmt_svc_rip(grp, rank, true, NULL); + /* build and invoke dmg cmd to stop the server */ + snprintf(dmg_cmd, sizeof(dmg_cmd), + "dmg system stop -i --ranks=%d --force", rank); + rc = system(dmg_cmd); assert_int_equal(rc, 0); }
[test_setup_next_step->[test_setup_pool_create],test_get_leader->[test_pool_get_info],daos_exclude_server->[daos_exclude_target],daos_add_server->[daos_add_target],get_daos_prop_with_user_acl_perms->[get_daos_prop_with_owner_and_acl,get_daos_acl_with_owner_perms],test_teardown->[test_teardown_cont_hdl,pool_destroy_safe,test_teardown_cont],get_daos_prop_with_owner_acl_perms->[get_daos_acl_with_owner_perms],test_make_dirs->[test_make_dirs],test_rebuild_wait->[test_rebuild_query],bool->[test_pool_get_info],test_setup->[test_setup_next_step]]
Kill a server Get DAO ACL with owner permissions.
You need to check the return code. Alternatively, you could use dts_create_config here instead and make buffer use DTS_CFG_MAX as the size. This function checks the return code and ensures the string is null terminated.
@@ -279,7 +279,7 @@ func (i *Ingester) TransferTSDB(stream client.Ingester_TransferTSDBServer) error if !ok { file, err = createfile(f) if err != nil { - return err + return errors.Wrap(err, "unable to create file storing incoming TSDB block received from LEAVING ingester") } filesXfer++ files[f.Filename] = file
[TransferTSDB->[checkFromIngesterIsInLeavingState,transfer],transferOut->[TransferChunks],v2TransferOut->[TransferTSDB]]
TransferTSDB transfers a TSDB from one ingester to another This function creates a new file in the tmp dir and opens it if it doesn t already This function is called when a user receives a block of data from the server and attempts to This function is called when the from ingester is successfully transferred.
Isn't that a little wordy? Wouldn't "unable to create file to store incoming TSDB block" be enough? Also, it would be good to include filename in the error.
@@ -35,11 +35,10 @@ class S3Storage extends FlysystemStorage throw new \RuntimeException('This storage can only handle filesystems with "AwsS3Adapter".'); } - /** @var AwsS3Adapter $adapter */ - $adapter = $filesystem->getAdapter(); + $this->adapter = $filesystem->getAdapter(); - $this->endpoint = (string) $adapter->getClient()->getEndpoint(); - $this->bucketName = $adapter->getBucket(); + $this->endpoint = (string) $this->adapter->getClient()->getEndpoint(); + $this->bucketName = $this->adapter->getBucket(); } public function getPath(array $storageOptions): string
[S3Storage->[getPath->[getStorageOption],__construct->[getBucket,getEndpoint,getAdapter]]]
Constructs a new object with the given filesystem and number of segments.
Should we `ltrim` the `/` here or does this handle applyPathPrefix correctly?
@@ -2174,7 +2174,10 @@ class TypeChecker(NodeVisitor[None], CheckerPluginInterface): rvalue_type = get_proper_type(rvalue_type) if isinstance(rvalue_type, Instance): if rvalue_type.type == typ.type: - var.type = rvalue_type + if is_valid_inferred_type(rvalue_type): + var.type = rvalue_type + else: + var.type = self.inference_error_fallback_type(rvalue_type) del partial_types[var] elif isinstance(rvalue_type, AnyType): var.type = fill_typevars_with_any(typ.type)
[TypeChecker->[get_op_other_domain->[get_op_other_domain],analyze_async_iterable_item_type->[accept],visit_try_without_finally->[check_assignment,accept],visit_class_def->[accept],iterable_item_type->[lookup_typeinfo],visit_for_stmt->[accept_loop],visit_operator_assignment_stmt->[check_final,check_assignment,accept],check_return_stmt->[get_generator_return_type,accept,get_coroutine_return_type],visit_del_stmt->[accept],check_multi_assignment->[check_assignment,accept],check_async_with_item->[check_assignment,accept],check_multi_assignment_from_union->[check_multi_assignment],find_isinstance_check_helper->[conditional_callable_type_map,find_isinstance_check_helper],visit_decorator->[check_method_override,accept,check_func_item],_visit_overloaded_func_def->[accept],should_suppress_optional_error->[contains_none],visit_assert_stmt->[accept],accept->[accept],set_inference_error_fallback_type->[accept,set_inferred_type],partition_by_callable->[make_fake_callable,partition_by_callable],check_multi_assignment_from_tuple->[check_multi_assignment_from_union,check_assignment,accept,check_rvalue_count_in_assignment],check_override->[get_op_other_domain,erase_override,is_forward_op_method],visit_try_stmt->[accept],check_member_assignment->[check_simple_assignment],conditional_callable_type_map->[partition_by_callable],lookup_qualified->[lookup],infer_issubclass_maps->[named_type],check_except_handler_test->[accept],visit_with_stmt->[accept],try_infer_partial_type_from_indexed_assignment->[accept],check_for_missing_annotations->[get_coroutine_return_type,get_generator_return_type,is_unannotated_any],function_type->[named_type,function_type],visit_print_stmt->[accept],make_fake_callable->[intersect_instance_callable],visit_assignment_stmt->[accept],fail->[fail],note->[note],handle_partial_var_type->[handle_cannot_determine_type],check_init_subclass->[accept],check_with_item->[check_assignment,accept],visit_block->[accept],analyze_index_variables->[check_assignment],type_type->[named_type],handle_cannot_determine_type->[defer_node],refine_parent_types->[replay_lookup],check_compatibility->[determine_type_of_class_member,bind_and_map_method],check_func_def->[accept,get_generator_receive_type,get_coroutine_return_type,get_generator_return_type,get_generator_yield_type,is_async_generator_return_type,is_generator_return_type],try_infer_partial_generic_type_from_assignment->[accept],type_check_raise->[accept],contains_none->[contains_none],check_lvalue->[check_lvalue,accept],get_generator_receive_type->[is_generator_return_type,is_async_generator_return_type],visit_while_stmt->[accept_loop],analyze_iterable_item_type->[accept],check_method_override_for_base_with_name->[defer_node],is_raising_or_empty->[accept],accept_loop->[accept],check_compatibility_super->[accept],check_assignment_to_multiple_lvalues->[check_assignment],visit_expression_stmt->[accept],analyze_container_item_type->[analyze_container_item_type],check_multi_assignment_from_iterable->[type_is_iterable,check_assignment],get_generator_return_type->[is_generator_return_type],check_indexed_assignment->[accept],check_simple_assignment->[accept],get_generator_yield_type->[is_generator_return_type,is_async_generator_return_type],flatten_lvalues->[flatten_lvalues],visit_if_stmt->[accept],lvalue_type_for_inference->[append_types_for_inference],check_assignment->[accept,check_setattr_method,check_getattr_method],str_type->[named_type],get_types_from_except_handler->[get_types_from_except_handler]],flatten->[flatten],is_valid_inferred_type->[accept],get_isinstance_type->[flatten_types],SetNothingToAny->[visit_type_alias_type->[accept]],is_static->[type,is_static],CheckerScope->[active_self_type->[top_function,active_class,enclosing_class],enclosing_class->[top_function]],is_untyped_decorator->[is_untyped_decorator,is_typed_callable],flatten_types->[flatten_types],expand_func->[accept],detach_callable->[accept]]
Try to infer a precise type for partial generic type from assignment.
What if one just assigns the variable twice (without reading it), would it silence the error on first assignment? Not that it is too bad, but still may be surprising.
@@ -79,7 +79,7 @@ spack package at this time.''', # fix MPI_Barrier segmentation fault # see https://lists.mpich.org/pipermail/discuss/2016-May/004764.html # and https://lists.mpich.org/pipermail/discuss/2016-June/004768.html - patch('mpich32_clang.patch', when='@3.2%clang') + patch('mpich32_clang.patch', when='@3.2.0%clang') depends_on('libfabric', when='netmod=ofi')
[Mpich->[die_without_fortran->[InstallError],setup_dependent_environment->[join_path,set],setup_dependent_package->[join_path,format],autoreconf->[exists,bash,which],configure_args->[append,format],depends_on,conflicts,run_before,version,patch,provides,filter_compiler_wrappers,variant]]
Create a new device object. Setup the MPI wrappers.
There is no version 3.2.0. @tgamblin I think this is a bug in the concretizer. `@3.2` should only apply to version 3.2, not to every version 3.2.X. At the same time, I'm really confused, because I've always wanted to say `depends_on('python@2.7')`, but that's never worked because it would install exactly version 2.7. So now I'm really confused how the concretizer works with regards to MAJOR.MINOR version numbers.
@@ -84,7 +84,8 @@ export class Input { // mouse events. if (this.hasTouch_) { this.hasMouse_ = !this.hasTouch_; - listenOnce(win.document, 'mousemove', this.boundOnMouseMove_); + listenOnce(win.document, 'mousemove', + /** @const {!Function} */ (this.onMouseMove_.bind(this))); } }
[No CFG could be retrieved]
Registers an event handle in case of mouse input has been detected. Registers a function to be called when the mouse input has been detected.
why do we need this cast and the const declaration?
@@ -1162,6 +1162,11 @@ public class NexmarkLauncher<OptionT extends NexmarkOptions> { return null; } + if (configuration.query == PORTABILITY_BATCH && options.isStreaming()) { + NexmarkUtils.console("Query 13 does not support streaming mode"); + return null; + } + queryName = query.getName(); // Append queryName to temp location
[NexmarkLauncher->[shortTopic->[isStreaming],shortSubscription->[isStreaming],createSource->[invokeBuilderForPublishOnlyPipeline,sourceEventsFromAvro,sourceEventsFromSynthetic,sourceEventsFromPubsub,sourceEventsFromKafka,sinkEventsToKafka,setupPubSubResources,sinkEventsToFile,sinkEventsToPubsub],currentPerf->[captureSteadyState],textFilename->[isStreaming],sinkResultsToText->[textFilename],createZetaSqlQueries->[build],setupPubSubResources->[shortSubscription,shortTopic],monitor->[isStreaming,currentPerf],logsDir->[isStreaming],run->[monitor,modelResultRates,createSource,logsDir,run,sinkEventsToAvro,maxNumWorkers,sink],isStreaming->[isStreaming],createQueries->[isZetaSql,isSql],sourceEventsFromSynthetic->[isStreaming],sinkResultsToPubsub->[shortTopic],createJavaQueries->[build],createSqlQueries->[build],createJavaQueryModels->[build],captureSteadyState->[isStreaming],invokeBuilderForPublishOnlyPipeline->[build],createQueryModels->[isZetaSql,isSql],sink->[sinkResultsToBigQuery,sinkResultsToPubsub,sinkResultsToKafka,sinkResultsToText]]]
Runs the Nexmark query and returns the result. This method is called when the user wants to sink events to Avro format.
nit - maybe have "skipping" prefix here since this reads a bit right now like it should throw an error instead of just skipping if in streaming
@@ -184,11 +184,11 @@ static size_t dummy_dma_compute_avail_data(struct dma_chan_pdata *pdata) * @bytes bytes of data. Will copy exactly this much data if possible, however * it will stop short if you try to copy more data than available. */ -static size_t dummy_dma_do_copies(struct dma_chan_pdata *pdata, int bytes) +static ssize_t dummy_dma_do_copies(struct dma_chan_pdata *pdata, int bytes) { size_t avail = dummy_dma_compute_avail_data(pdata); - size_t copied = 0; - size_t crt_copied; + ssize_t copied = 0; + ssize_t crt_copied; if (!avail) return -ENODATA;
[size_t->[dcache_writeback_region,dummy_dma_compute_avail_data,MIN,memcpy_s,dummy_dma_comp_avail_data_noncyclic,dummy_dma_comp_avail_data_cyclic,dummy_dma_copy_crt_elem,dcache_invalidate_region,assert],dma_chan_data->[spin_lock_irq,spin_unlock_irq,atomic_add,trace_dummydma_error],void->[spin_unlock_irq,atomic_sub,notifier_unregister_all,dummy_dma_channel_put_unlocked,spin_lock_irq,dma_chan_get_data],int->[tracev_dummydma,spin_unlock_irq,dummy_dma_compute_avail_data,atomic_init,rzalloc,spin_lock_irq,timer_get,dma_chan_get_data,trace_dummydma_error,timer_get_system,dummy_dma_do_copies,notifier_event,rfree]]
This function does the actual copy of the data into the channel.
I think crt_copied won't actually ever go below 0, so it could remain unsigned. It will only be done += copied when copied is >= 0. Return type should indeed be signed though, thanks.
@@ -372,10 +372,11 @@ public class GcsUtil { }, IOException.class, sleeper); - return true; + + return bucket; } catch (GoogleJsonResponseException e) { if (errorExtractor.itemNotFound(e) || errorExtractor.accessDenied(e)) { - return false; + return null; } throw e; } catch (InterruptedException e) {
[GcsUtil->[copy->[executeBatches],expand->[isGcsPatternSupported],fileSize->[fileSize],bucketExists->[shouldRetry->[shouldRetry],bucketExists],remove->[executeBatches,makeRemoveBatches],GcsUtilFactory->[create->[GcsUtil]],enqueueCopy->[copy]]]
Checks if a bucket with the given path exists.
This seems like it should be wrapped in a more specific exception and rethrown, given how we're using it
@@ -60,3 +60,9 @@ class Instance: def fields(self): return self._fields + + def get_field(self, name: str): + """ + Returns the field with name ``name``. + """ + return self._fields[name]
[Instance->[pad->[pad],count_vocab_items->[count_vocab_items],get_padding_lengths->[get_padding_lengths]]]
Returns a list of fields in the sequence.
This unnecessarily clutters the API when we can use `self.fields()["name"]` instead. It also stops us accidentally using this in allennlp internals where we assume that an `Instance` has a particular key or something.
@@ -1218,6 +1218,16 @@ class NestTest(parameterized.TestCase, test.TestCase): expected, ) + def testInvalidCheckTypes(self): + with self.assertRaises(ValueError): + nest.assert_same_structure( + nest1=array_ops.zeros((1)), nest2=array_ops.ones((1, 1, 1)), + check_types=array_ops.ones((2))) + with self.assertRaises(ValueError): + nest.assert_same_structure( + nest1=array_ops.zeros((1)), nest2=array_ops.ones((1, 1, 1)), + expand_composites=array_ops.ones((2))) + class NestBenchmark(test.Benchmark):
[NestBenchmark->[benchmark_assert_structure->[run_and_report]],NestTest->[testAttrsFlattenAndPack->[BadAttr,SampleAttr],testFlattenCustomSequenceThatRaisesException->[_CustomSequenceThatRaisesException],testMapStructureUpTo->[_CustomMapping],testHeterogeneousComparison->[_CustomMapping],testFlattenAndPack_withDicts->[_CustomMapping],testAttrsMapStructure->[UnsortedSampleAttr],testAssertSameStructure->[SameNamedType1],testFlattenWithTuplePathsUpTo->[get_paths_and_values]]]
A benchmark that compares two sequence objects and reports the difference between them.
This got flagged by internal tests: in graph mode, attempting to `bool()` cast a Tensor raises a specific error instead (`OperatorNotAllowedInGraphError`, a subclass of `TypeError`). I think the easiest way is to test that either ValueError os TypeError is raised.
@@ -5388,8 +5388,8 @@ ROM_START( adillor ) ROM_LOAD( "am1ptru3.14f", 0x580000, 0x080000, CRC(5e4e6333) SHA1(5897251e4694c5c24f1810ff6f9177a0456baf2e) ) ROM_REGION( 0x1000000, "c352", 0 ) /* sound samples */ - ROM_LOAD( "am1wavea.2l", 0x000000, 0x400000, CRC(48f8c20c) SHA1(48b4fbcb7e9dbbb70a542ef7cb7ee0e46fad23fc) ) - ROM_LOAD( "am1waveb.1l", 0x800000, 0x400000, CRC(fd8e7384) SHA1(91e53ab0293f81f8357645fd319249abc128b78e) ) + ROM_LOAD( "am1wavea.2l", 0x000000, 0x400000, CRC(2dd64531) SHA1(255a44cb5cda2a76a8c557ef242295d7290712aa) ) + ROM_LOAD( "am1waveb.1l", 0x800000, 0x400000, CRC(eaa99997) SHA1(99eb51ed7a577815a699cb7f8387cbb994983b24) ) ROM_END
[No CFG could be retrieved]
region 0x600000 - > pointrom SHA1 - 1 hash of a sequence of 16 - bit words. - - - - - - - - - - - - - - - - - -.
Same here; use ROM_LOAD16_WORD_SWAP() instead of modifying the ROMs.
@@ -79,8 +79,9 @@ def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True, info = raw.info noise_cov = mne.read_cov(fname_cov) - noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, - eeg=0.1, proj=True) + with warnings.catch_warnings(record=True): # bad proj here + noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05, + eeg=0.1, proj=True) if data_cov: with warnings.catch_warnings(record=True): data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145)
[test_lcmv_source_power->[_get_data],test_lcmv->[_get_data],_get_data->[read_forward_solution_meg],test_lcmv_raw->[_get_data]]
Read in data used in tests raw epochs evoked data_cov noise_cov label forward.
this one still needs to stay
@@ -302,3 +302,14 @@ sphinx_gallery_conf = { } numpydoc_class_members_toctree = False + +def touch_example_backreferences(app, what, name, obj, options, lines): + # generate empty examples files, so that we don't get + # inclusion errors if there are no examples for a class / module + examples_path = os.path.join(app.srcdir, "generated", "%s.examples" % name) + if not os.path.exists(examples_path): + # touch file + open(examples_path, 'w').close() + +def setup(app): + app.connect('autodoc-process-docstring', touch_example_backreferences)
[get_html_theme_path,bool,abspath,int,append,isoformat,dirname,isdir,get,join,mkdir,today]
This is a helper function to set the numpydoc_class_members_toctree.
Why did you need to do this? If you are running latest `sphinx-gallery` it should not be necessary as they have lines like this in their code.
@@ -146,6 +146,10 @@ class Version(object): def dashed(self): return '-'.join(str(x) for x in self.version) + @property + def joined(self): + return ''.join(str(x) for x in self.version) + def up_to(self, index): """Return a version string up to the specified component, exclusive. e.g., if this is 10.8.2, self.up_to(2) will return '10.8'.
[coerced->[coercing_method->[coerce_versions]],VersionList->[copy->[VersionList],from_dict->[VersionList],union->[copy,update],update->[add],intersection->[VersionList,intersection,add],intersect->[intersection],add->[overlaps,add,union]],_string_to_version->[VersionList,VersionRange,Version],ver->[VersionList,_string_to_version],Version->[wildcard->[a_or_n],__lt__->[_numeric_lt,isnumeric,isdevelop],is_successor->[is_predecessor],__init__->[int_if_int]],coerce_versions->[check_type],VersionRange->[__repr__->[__str__],union->[VersionRange,is_predecessor,overlaps],intersection->[VersionRange,overlaps],__init__->[Version],satisfies->[overlaps,satisfies]]]
Returns a string with the version of the component in the order of the version.
a unit test would be nice to have to make sure the behaviour won't change down the road
@@ -0,0 +1,17 @@ +using Content.Server.GameObjects.Components.Atmos; +using Robust.Shared.GameObjects.Systems; + +namespace Content.Server.GameObjects.EntitySystems +{ + public class PumpSystem : EntitySystem + { + public override void Update(float frameTime) + { + base.Update(frameTime); + foreach (var pump in ComponentManager.EntityQuery<BasePumpComponent>()) + { + pump.Update(frameTime); + } + } + } +}
[No CFG could be retrieved]
No Summary Found.
It'd probably be best if all of pipenet updates were on their own update stage in GridAtmosphereComponent, and have them update only the pipenets on the current grid.
@@ -656,9 +656,11 @@ string WalletImpl::keysFilename() const return m_wallet->get_keys_file(); } -bool WalletImpl::init(const std::string &daemon_address, uint64_t upper_transaction_size_limit) +bool WalletImpl::init(const std::string &daemon_address, uint64_t upper_transaction_size_limit, const std::string &daemon_username, const std::string &daemon_password) { clearStatus(); + if(daemon_username != "") + m_daemon_login.emplace(daemon_username, daemon_password); return doInit(daemon_address, upper_transaction_size_limit); }
[LOG_PRINT_L1->[LOG_PRINT_L1],daemonBlockChainTargetHeight->[daemonBlockChainHeight],isNewWallet->[watchOnly,blockChainHeight],balance->[balance],loadUnsignedTx->[,status,errorString],store->[store],recoverFromKeys->[LOG_PRINT_L1],doInit->[daemonBlockChainHeight,init,setTrustedDaemon,isNewWallet,daemonSynced],doRefresh->[daemonSynced,refresh,getListener],setListener->[setListener],rescanSpent->[trustedDaemon,clearStatus],createSweepUnmixableTransaction->[status],parse_uri->[parse_uri],createTransaction->[,status],daemonSynced->[daemonBlockChainTargetHeight,daemonBlockChainHeight],close->[LOG_PRINT_L1],path->[path],amountFromDouble->[amountFromString]]
private static method for WalletImpl.
I would take by value, then `std::move` each string. It reduces a copy if the caller provides a temporary or moves the string when calling `WalletImpl::init`.
@@ -2152,7 +2152,14 @@ define([ this._minimumPixelSize = this.minimumPixelSize; var scale = getScale(this, context, frameState); - Matrix4.multiplyByUniformScale(this.modelMatrix, scale, this._computedModelMatrix); + var computedModelMatrix = this._computedModelMatrix; + Matrix4.multiplyByUniformScale(this.modelMatrix, scale, computedModelMatrix); + + //Apply rotation from glTF (Y up) to Cesium (Z up). + var translation = Matrix4.getTranslation(computedModelMatrix, scratchPosition); + var rotation = Matrix4.getRotation(computedModelMatrix, matrix3Scratch); + Matrix3.multiply(rotation, yUpTozUp, rotation); + Matrix4.fromRotationTranslation(rotation, translation, computedModelMatrix); } // Update modelMatrix throughout the graph as needed
[No CFG could be retrieved]
Updates the model matrix and transforms of the model. Check if the is shown.
Can't we just use a 4x4 matrix with a zero translation column for the rotation? For some performance gain, we can use `Matrix4.multiplyTransformation`. For even more performance gain, we can add `Matrix4.multiplyByRotationScaleTransformation` so it only considers the top-left 3x3 of the roation matrix..
@@ -933,6 +933,7 @@ module.exports = class stex extends Exchange { 'remaining': remaining, 'status': status, 'trades': trades, + 'fee': undefined, }; const fees = this.safeValue (order, 'fees'); if (fees === undefined) {
[No CFG could be retrieved]
Get order details for a specific order type. Create a new order on a given market pair.
This line should not be here, if `fees` is present.
@@ -38,8 +38,11 @@ const quiet = process.env.SERVE_QUIET == 'true'; const sendCachingHeaders = process.env.SERVE_CACHING_HEADERS == 'true'; const noCachingExtensions = process.env.SERVE_EXTENSIONS_WITHOUT_CACHING == 'true'; +const lazyBuildExtensions = process.env.LAZY_BUILD_EXTENSIONS == 'true'; const header = require('connect-header'); +const extensions = {}; + // Exit if the port is in use. process.on('uncaughtException', function(err) { if (err.errno === 'EADDRINUSE') {
[No CFG could be retrieved]
Creates a new http server to handle static files and directories for use with a gulp live server Exit in the event of a crash in the parent gulp process.
Why are you using both an ENV var and a command line argument later?
@@ -46,13 +46,15 @@ function $CacheFactoryProvider() { refresh(lruEntry); - if (isUndefined(value)) return; + if (isUndefined(value)) return value; if (!(key in data)) size++; data[key] = value; if (size > capacity) { this.remove(staleEnd.key); } + + return value; },
[No CFG could be retrieved]
Creates a cache provider that caches key - value pairs. Replies the object containing the number of non - negative negative values for a given key.
this is useless because `return undefined` is the same thing as `return`.
@@ -52,12 +52,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -func (c *Controller) shootAdd(obj interface{}) { +func (c *Controller) shootAdd(obj interface{}, resetRateLimiting bool) { key, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { logger.Logger.Errorf("Couldn't get key for object %+v: %v", obj, err) return } + + if resetRateLimiting { + c.getShootQueue(obj).Forget(key) + } c.getShootQueue(obj).Add(key) }
[checkSeedAndSyncClusterResource->[syncClusterResourceToSeed],finalizeShootDeletion->[deleteClusterResourceFromSeed],durationUntilNextShootSync->[respectSyncPeriodOverwrite,reconcileInMaintenanceOnly],deleteShoot->[checkSeedAndSyncClusterResource,syncClusterResourceToSeed,respectSyncPeriodOverwrite,initializeOperation],reconcileShoot->[checkSeedAndSyncClusterResource,reconcileInMaintenanceOnly,initializeOperation,syncClusterResourceToSeed,respectSyncPeriodOverwrite],shootUpdate->[shootAdd]]
shootAdd adds the object to the queue and updates the object if necessary.
Is resetting really necessary? IIUC, `.Add(key)` adds the element immediately to the queue and `.AddRateLimited(key)` with a delay.
@@ -31,7 +31,7 @@ type Backend struct { } type Server struct { - Url string + URL string `json:"Url"` Weight int }
[No CFG could be retrieved]
NewGlobalConfiguration creates a new global configuration object.
the `json:"Url"` is to keep the API the same :wink:.
@@ -5,8 +5,11 @@ * This source code is licensed in accordance with the terms specified in * the LICENSE file found in the root directory of this source tree. */ -#include <osquery/tables.h> +#include <Windows.h> +#include <set> + +#include <osquery/tables.h> #include "osquery/core/windows/wmi.h" namespace osquery {
[genLogicalDrives->[INTEGER,results,empty,GetString,push_back,wmiPartitionResults,r,wmiLogicalDiskToPartitionResults]]
Creates a new namespace. wmiLogicalDiskToPartitionReq - Request for Logical Disk To Partition.
If you don't need some specific stuff from windows api I'd suggest to include this header via `osquery/utils/system/system.h`. It is going to be much safer because of some predefined macro like `NOMINMAX`.
@@ -529,10 +529,11 @@ class User < ApplicationRecord end def temp_username - if twitter_username - twitter_username.downcase.gsub(/[^0-9a-z_]/i, "").delete(" ") - elsif github_username - github_username.downcase.gsub(/[^0-9a-z_]/i, "").delete(" ") + Authentication::Providers.username_fields.each do |username_field| + value = public_send(username_field) + next if value.blank? + + return value.downcase.gsub(/[^0-9a-z_]/i, "").delete(" ") end end
[User->[check_for_username_change->[path],send_welcome_notification->[send_welcome_notification],blocked_by?->[blocking?],auditable?->[any_admin?],blocking?->[blocking?],resave_articles->[path]]]
Returns an array of unique username values.
This is a very large PR, it might be nice to pull out a bunch of these refactors into their own PR so this is not as big and we can push them out and ensure everything works as refactored before trying to add an additional auth provider.
@@ -123,6 +123,15 @@ public final class Pipeline { return creationTimestamp; } + /** + * Return the suggested leaderId with high priority of pipeline. + * + * @return Suggested LeaderId + */ + public UUID getSuggestedLeaderId() { + return suggestedLeaderId; + } + /** * Set the creation timestamp. Only for protobuf now. *
[Pipeline->[sameDatanodes->[getNodeSet],Builder->[build->[setLeaderId,setCreationTimestamp,setNodesInOrder,Pipeline,isEmpty],getCreationTimestamp,getLeaderId],getNodesInOrder->[getNodes],getProtobufMessage->[isEmpty],getClosestNode->[getFirstNode],toString->[getCreationTimestamp,getId,getType,getFactor,toString,getPipelineState],isEmpty->[isEmpty]]]
get creation timestamp.
Return the suggested leaderId which has a high priority among DNs of the pipeline.
@@ -151,6 +151,16 @@ func (g *ASTTransformer) Transform(node *ast.Document, reader text.Reader, pc pa v.AppendChild(v, newChild) } } + case *ast.Text: + if v.SoftLineBreak() && !v.HardLineBreak() { + renderMetas := pc.Get(renderMetasKey).(map[string]string) + mode := renderMetas["mode"] + if mode != "document" { + v.SetHardLineBreak(setting.Markdown.EnableHardLineBreakInComments) + } else { + v.SetHardLineBreak(setting.Markdown.EnableHardLineBreakInDocuments) + } + } } return ast.WalkContinue, nil })
[RegisterFuncs->[Register],GenerateWithDefault->[HasPrefix,Sprintf,CleanValue,BytesToReadOnlyString],renderSummary->[WriteString],renderIcon->[MatchString,Sprintf,ToLower,WriteString,TrimSpace],Transform->[toMetaNode,Source,HasPrefix,Walk,RemoveChildren,InsertBefore,URLJoin,SetAttributeString,NewLink,ChildCount,Text,FirstChild,IsLink,ReplaceChild,Get,AppendChild,AttributeString,NextSibling,GetItems,BytesToReadOnlyString,Replace,ToRenderConfig,Parent,HasChildren],renderDocument->[WriteRune,Sprintf,AttributeString,WriteString],renderTaskCheckBoxListItem->[RenderAttributes,Attributes,FirstChild,WriteByte,WriteString],Generate->[GenerateWithDefault],renderDetails->[WriteString],Put->[BytesToReadOnlyString],MustCompile,NewConfig,SetHTMLOption]
Transform transforms an ast. Document into an AST. This function is used to create a link around an image if it is not already a link Walk through children and create TOC nodes.
Should this be set on each node?
@@ -1499,13 +1499,11 @@ static void dmic_irq_handler(void *data) if (val1 & OUTSTAT1_ROR_BIT) trace_dmic_error("dmic_irq_handler() error: " "full fifo B or PDM overrrun"); - - /* clear IRQ */ - platform_interrupt_clear(dmic_irq(dai), 1); } static int dmic_probe(struct dai *dai) { + int irq = dmic_irq(dai); struct dmic_pdata *dmic; int ret;
[int->[OUT_GAIN_LEFT_A_GAIN,find_min_int16,trace_dmic_error,MIC_CONTROL_CLK_EDGE,interrupt_disable,ipm_helper2,OUTCONTROL1_IPM,DC_OFFSET_LEFT_B_DC_OFFS,OUTCONTROL1_OF,FIR_COEF_B,MIC_CONTROL_PDM_EN_B,CIC_CONTROL_STEREO_MODE,pm_runtime_get_sync,OUTCONTROL1_IPM_SOURCE_1,memcpy_s,fir_coef_scale,OUTCONTROL1_FINIT,OUTCONTROL1_BFTH,match_modes,OUT_GAIN_LEFT_B_GAIN,FIR_CONTROL_A_MUTE,find_equal_int16,CIC_CONTROL_MIC_A_POLARITY,CIC_CONTROL_MIC_B_POLARITY,assert,OUTCONTROL0_IPM,MIC_CONTROL_PDM_CLKDIV,stereo_helper,dai_set_drvdata,dmic_start,OUTCONTROL0_OF,platform_interrupt_mask,db2lin_fixed,rfree,FIR_CONTROL_B_DCCOMP,OUTCONTROL0_FCI,interrupt_enable,FIR_CONFIG_A_FIR_SHIFT,OUTCONTROL1_IPM_SOURCE_2,interrupt_register,FIR_CONTROL_B_START,FIR_CONFIG_B_FIR_DECIMATION,OUTCONTROL0_IPM_SOURCE_2,dmic_stop,FIR_CONTROL_B_STEREO,OUTCONTROL0_SIP,OUTCONTROL1_FCI,get_fir,CIC_CONTROL_MIC_MUTE,FIR_CONFIG_B_FIR_LENGTH,OUTCONTROL0_TH,OUTCONTROL1_IPM_SOURCE_3,FIR_CONTROL_A_START,FIR_CONFIG_A_FIR_DECIMATION,FIR_CONTROL_B_ARRAY_START_EN,FIR_CONTROL_A_DCCOMP,OUTCONTROL0_BFTH,FIR_CONTROL_A_STEREO,OUTCONTROL1_SIP,MIC_CONTROL_PDM_SKEW,trace_dmic,DC_OFFSET_RIGHT_B_DC_OFFS,OUT_GAIN_RIGHT_B_GAIN,OUT_GAIN_RIGHT_A_GAIN,configure_registers,FIR_CONFIG_A_FIR_LENGTH,dmic_context_restore,dma_get_drvdata,DC_OFFSET_LEFT_A_DC_OFFS,CIC_CONTROL_CIC_START_B,FIR_CONTROL_B_MUTE,dmic_context_store,OUTCONTROL0_FINIT,OUTCONTROL1_TH,Q_MULTSR_32X32,schedule_task_init,CIC_CONTROL_CIC_START_A,CIC_CONFIG_CIC_SHIFT,OUTCONTROL0_IPM_SOURCE_1,OUTCONTROL1_IPM_SOURCE_4,OUTCONTROL0_TIE,MAX,OUTCONTROL0_IPM_SOURCE_3,OUTCONTROL0_IPM_SOURCE_4,interrupt_unregister,CIC_CONTROL_SOFT_RESET,FIR_CONFIG_B_FIR_SHIFT,dmic_irq,OUTCONTROL1_TIE,dmic_write,select_mode,dai_get_drvdata,CIC_CONFIG_COMB_COUNT,MIC_CONTROL_PDM_EN_A,FIR_COEF_A,FIR_CONTROL_A_ARRAY_START_EN,pm_runtime_put_sync,norm_int32,rzalloc,find_max_abs_int32,ipm_helper1,platform_interrupt_unmask,DC_OFFSET_RIGHT_A_DC_OFFS,find_modes],uint32_t->[io_reg_read,dai_base],pdm_decim->[trace_dmic,MIN],void->[dai_base,FIR_CONTROL_A_START,MAX,io_reg_update_bits,trace_dmic_error,ceil_divide,schedule_task,db2lin_fixed,platform_interrupt_clear,MIC_CONTROL_PDM_EN_B,Q_SHIFT_LEFT,trace_dmic,dmic_irq,spin_lock,spin_unlock,dai_get_drvdata,io_reg_write,dmic_update_bits,MIC_CONTROL_PDM_EN_A,CIC_CONTROL_CIC_START_B,FIR_CONTROL_B_START,dmic_read,CIC_CONTROL_CIC_START_A,find_equal_int16],uint64_t->[dai_get_drvdata,dmic_update_bits,OUT_GAIN_LEFT_A_GAIN,OUT_GAIN_LEFT_B_GAIN,tracev_dmic,spin_lock,dmic_write,spin_unlock,q_multsr_sat_32x32]]
This function is called when the DMI interrupt is received. IRQ_AUTO_UNMASK IRQ_SUPPLY_CALLBACK dmic_ir.
where is this cleared then ? I'd expect platform_clear() to call arch_clear().
@@ -47,12 +47,10 @@ func NewCmdCreateSecret(name, fullName string, f *clientcmd.Factory, out io.Writ options.Out = out cmd := &cobra.Command{ - Use: fmt.Sprintf("%s NAME SOURCE [SOURCE ...]", name), - Short: "Create a new secret based on a file or files within a directory", - Long: fmt.Sprintf(`Create a new secret based on a file or files within a directory. - - $ %s <secret-name> <source> [<source>...] - `, fullName), + Use: fmt.Sprintf("%s NAME [KEY=]SOURCE ...", name), + Short: "Create a new secret based on a key file or on files within a directory", + Long: newLong, + Example: fmt.Sprintf(newExamples, fullName), Run: func(c *cobra.Command, args []string) { cmdutil.CheckErr(options.Complete(args, f))
[CreateSecret->[Create,BundleSecret,Fprintf],Validate->[New,Errorf],BundleSecret->[Fprintf,Join,Stat,IsDir,Name,IsRegular,New,ReadDir,SecretType,Matches,Errorf,Mode],Complete->[DefaultNamespace,Secrets,Clients],CreateSecret,Flags,StringVar,BundleSecret,ReadFile,GetFlagString,Sprintf,Validate,AddPrinterFlags,IsSecretKey,VarP,Errorf,Base,CheckErr,BoolVarP,PrintObject,Complete]
NewCmdCreateSecret returns a command that creates a new secret based on a file or directory NewCreateSecretOptions returns a new instance of CreateSecretOptions.
Sourcename is optional?
@@ -242,8 +242,9 @@ def draw_segmentation_masks( masks (Tensor): Tensor of shape (num_masks, H, W) or (H, W) and dtype bool. alpha (float): Float number between 0 and 1 denoting the transparency of the masks. 0 means full transparency, 1 means no transparency. - colors (list or None): List containing the colors of the masks. The colors can - be represented as PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. + colors: Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]: List containing the colors + of the masks or single color for all masks. The colors can be represented as + PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. When ``masks`` has a single entry of shape (H, W), you can pass a single color instead of a list with one element. By default, random colors are generated for each mask.
[save_image->[make_grid],make_grid->[norm_range->[norm_ip],norm_range]]
Draws segmentation masks on given RGB image. Generate a from the image and the colors.
let's just remove this sentence below then, it doesn't add much to the description now > When ``masks`` has a single entry of shape (H, W), you can pass a single color instead of a list with one element
@@ -32,7 +32,7 @@ var _ = Suite(&testManagerSuite{}) type testManagerSuite struct{} func (s *testManagerSuite) TestManager(c *C) { - m := NewManager(core.NewStorage(kv.NewMemoryKV())) + m := NewManager(storage.NewMemoryStorage()) // register legal address c.Assert(m.Register("c1", "127.0.0.1:1"), IsNil) c.Assert(m.Register("c1", "127.0.0.1:2"), IsNil)
[TestManager->[Error,GetComponentAddrs,UnRegister,Contains,GetComponent,NewMemoryKV,GetAllComponentAddrs,Assert,Register,NewStorage]]
TestManager tests the manager Assert that c1 and c2 are registered.
`MemoryStorage`,`EtcdStorage` are different with `ConfigStorage`, `RuleStorage`.... That confuse me, we should distinguish them. Consider rename `MemoryStorage`, `EtcdStorage` to `MemoryStorageBackend`,`EtcdStorageBackend` etc.
@@ -24,7 +24,7 @@ public class JTextAreaOptionPane { private final JFrame windowFrame = new JFrame(); private final JButton okButton = new JButton(); private final boolean logToSystemOut; - private final WeakReference<Window> parentComponentReference; + private final Window parentComponentReference; private int counter; private final CountDownLatch countDownLatch;
[JTextAreaOptionPane->[appendNewLine->[append],dispose->[dispose],append->[append],countDown->[setWidgetActivation]]]
Creates a JTextAreaOptionPane that displays a text area that can be used to edit This method is called to set the layout of the window frame.
Might want to consider renaming to remove the `Reference` suffix in conjunction with the type change.
@@ -193,10 +193,10 @@ class SvnDriver extends VcsDriver if (null === $this->branches) { $this->branches = array(); - if (false === strpos($this->trunkPath, '/')) { + if(false === $this->trunkPath) { $trunkParent = $this->baseUrl . '/'; } else { - $trunkParent = $this->baseUrl . '/' . dirname($this->trunkPath) . '/'; + $trunkParent = $this->baseUrl . '/' . $this->trunkPath; } $output = $this->execute('svn ls --verbose', $trunkParent);
[SvnDriver->[execute->[getMessage,getErrorOutput,execute],getComposerInformation->[getMessage,write,read,format,splitLines,execute],getTags->[buildIdentifier,splitLines,execute],supports->[getErrorOutput,execute],getBranches->[buildIdentifier,splitLines,execute],initialize->[getBranches,get,getTags],normalizeUrl->[isAbsolutePath]]]
Get the list of branch identifiers in the repository.
I see a space missing here, I will add it.
@@ -1301,9 +1301,7 @@ public class TopNQueryRunnerTest ) ); - final ResponseContext responseContext = ResponseContext.createEmpty(); - responseContext.putAll(specialContext); - Sequence<Result<TopNResultValue>> results = runWithMerge(query, responseContext); + Sequence<Result<TopNResultValue>> results = runWithMerge(query); List<Result<BySegmentTopNResultValue>> resultList = results .map((Result<TopNResultValue> input) -> { // Stupid type erasure
[TopNQueryRunnerTest->[testTopNLexicographicDimExtractionWithPreviousStop->[assertExpectedResults],testFullOnTopNFloatColumn->[assertExpectedResults],testFullOnTopNDecorationOnNumeric->[assertExpectedResults],testTopNWithMultiValueDimFilter1->[assertExpectedResults],testTopNOverPartialNullDimension->[assertExpectedResults],testFullOnTopNWithAggsOnNumericDims->[assertExpectedResults],testTopNDimExtraction->[assertExpectedResults],testTopNWithFilter1->[assertExpectedResults],testTopNDimExtractionNoAggregators->[assertExpectedResults],testAlphaNumericTopNWithNullPreviousStop->[assertExpectedResults,withDuplicateResults],testTopNOverPartialNullDimensionWithFilterOnNOTNullValue->[assertExpectedResults],testFullOnTopN->[assertExpectedResults],testTopN->[assertExpectedResults],testNumericDimensionTopNWithNullPreviousStop->[assertExpectedResults,withDuplicateResults],testTopNWithEmptyStringProducingDimExtractionFn->[assertExpectedResults],testTopNInvertedLexicographicWithPreviousStop->[assertExpectedResults],testTopNWithFilter2OneDay->[assertExpectedResults],testTopNOnMissingColumn->[assertExpectedResults],testInvertedTopNLexicographicDimExtractionWithPreviousStop->[assertExpectedResults],testFullOnTopNLongColumn->[assertExpectedResults],testTopNWithOrFilter2->[assertExpectedResults],testTopNWithNonExistentFilterMultiDim->[assertExpectedResults],testEmptyTopN->[assertExpectedResults],testTopNWithExtractionFilterAndFilteredAggregatorCaseNoExistingValue->[assertExpectedResults],testTopNWithExtractionFilterNoExistingValue->[runWithPreMergeAndMerge,assertExpectedResults],testTopNDimExtractionFastTopNUnOptimalWithReplaceMissing->[assertExpectedResults],testTopNWithMultiValueDimFilter3->[assertExpectedResults],testSortOnTimeAsLong->[assertExpectedResults],runWithPreMergeAndMerge->[runWithMerge],testTopNCollapsingDimExtraction->[assertExpectedResults],testFullOnTopNNumericStringColumnAsLong->[assertExpectedResults],testFullOnTopNLongTimeColumn->[assertExpectedResults],testSortOnStringAsDouble->[assertExpectedResults],testTopNInvertedLexicographicWithNonExistingPreviousStop->[assertExpectedResults],testTopNOnMissingColumnWithExtractionFn->[assertExpectedResults],runWithMerge->[runWithMerge],testTopNWithNullProducingDimExtractionFn->[assertExpectedResults],testInvertedTopNLexicographicDimExtraction2->[assertExpectedResults],testTopNWithMultiValueDimFilter4->[assertExpectedResults],testTopNLexicographicDimExtractionOptimalNamespaceWithRunner->[assertExpectedResults],testTopNOverFirstLastFloatAggregatorUsingDoubleColumn->[assertExpectedResults],testTopNDimExtractionFastTopNOptimal->[assertExpectedResults],testTopNWithNonExistentFilterInOr->[assertExpectedResults],testFullOnTopNStringOutputAsLong->[assertExpectedResults],testTopNOverFirstLastAggregator->[assertExpectedResults],testTopNLexicographicDimExtractionWithSortingPreservedAndPreviousStop->[assertExpectedResults],testTopNWithNonExistentFilter->[assertExpectedResults],testFullOnTopNOverUniques->[assertExpectedResults],testTopNWithExtractionFilter->[assertExpectedResults],testFullOnTopNLongColumnAsString->[assertExpectedResults],testTopNWithNonExistentDimensionAndActualFilter->[assertExpectedResults],testTopNLexicographicWithPreviousStop->[assertExpectedResults],testTopNWithNonExistentDimensionAndNonExistentFilter->[assertExpectedResults],testTopNByUniques->[assertExpectedResults],testSortOnDoubleAsLong->[assertExpectedResults],testTopNWithFilter2->[assertExpectedResults],testTopNDimExtractionFastTopNOptimalWithReplaceMissing->[assertExpectedResults],testTopNLexicographicDimExtractionOptimalNamespace->[assertExpectedResults],testTopNOverPartialNullDimensionWithFilterOnNullValue->[assertExpectedResults],testTopNWithMultiValueDimFilter2->[assertExpectedResults],testTopNOverNullDimension->[assertExpectedResults],testTopNOverMissingUniques->[assertExpectedResults],testTopNWithTimeColumn->[assertExpectedResults],testInvertedTopNLexicographicDimExtractionWithPreviousStop2->[assertExpectedResults],testFullOnTopNFloatColumnAsString->[assertExpectedResults],testFullOnTopNLongVirtualColumn->[assertExpectedResults],testTopNTimeExtraction->[assertExpectedResults],testTopNOverHyperUniqueExpression->[assertExpectedResults],testTopNStringVirtualColumn->[assertExpectedResults],testTopNBySegmentResults->[runWithMerge],testTopNLexicographicNoAggregators->[assertExpectedResults],testTopNOverNullDimensionWithFilter->[assertExpectedResults],assertExpectedResults->[assertExpectedResults],testFullOnTopNNumericStringColumnWithDecoration->[assertExpectedResults],testTopNWithOrFilter1->[assertExpectedResults],testTopNWithMultiValueDimFilter5->[assertExpectedResults],testTopNWithNonExistentDimension->[assertExpectedResults],testInvertedTopNQuery->[assertExpectedResults],testSortOnDoubleAsDouble->[assertExpectedResults],testFullOnTopNLongTimeColumnWithExFn->[assertExpectedResults],testFullOnTopNNumericStringColumnAsFloat->[assertExpectedResults],testTopNQueryCardinalityAggregatorWithExtractionFn->[assertExpectedResults,withDuplicateResults],testFullOnTopNLongColumnWithExFn->[assertExpectedResults],testFullOnTopNOverPostAggsOnDimension->[assertExpectedResults],testTopNLexicographic->[assertExpectedResults],testTopNLexicographicWithNonExistingPreviousStop->[assertExpectedResults],testTopNDimExtractionFastTopNUnOptimal->[assertExpectedResults],testTopNOverFirstLastAggregatorChunkPeriod->[assertExpectedResults],testTopNDimExtractionTimeToOneLong->[assertExpectedResults],testTopNOverHyperUniqueFinalizingPostAggregator->[assertExpectedResults],testFullOnTopNOverPostAggs->[assertExpectedResults],testTopNOverFirstLastFloatAggregatorUsingFloatColumn->[assertExpectedResults],testTopNOverHyperUniqueExpressionRounded->[assertExpectedResults],testTopNLexicographicDimExtractionUnOptimalNamespace->[assertExpectedResults],testFullOnTopNBoundFilterAndLongSumMetric->[assertExpectedResults],testTopNBySegment->[runWithMerge,assertExpectedResults],testTopNLexicographicDimExtraction->[assertExpectedResults],testFullOnTopNFloatColumnWithExFn->[assertExpectedResults],testFullOnTopNDimExtractionAllNulls->[assertExpectedResults],testTopNDimExtractionToOne->[assertExpectedResults],testTopNDependentPostAgg->[assertExpectedResults],testTopNQueryByComplexMetric->[assertExpectedResults,withDuplicateResults]]]
This method performs a top - n test on the specified segments. Tests if the result list contains the top - n result value.
Is `specialContext` not required now to be a part of response context.
@@ -267,8 +267,14 @@ public class KsqlConfig extends AbstractConfig { public static final String KSQL_QUERY_PULL_TABLE_SCAN_ENABLED = "ksql.query.pull.table.scan.enabled"; public static final String KSQL_QUERY_PULL_TABLE_SCAN_ENABLED_DOC = - "Config to enable full table scans for pull queries"; - public static final boolean KSQL_QUERY_PULL_TABLE_SCAN_ENABLED_DEFAULT = false; + "Config to enable pull queries that scan over the data"; + public static final boolean KSQL_QUERY_PULL_TABLE_SCAN_ENABLED_DEFAULT = true; + + public static final String KSQL_QUERY_STREAM_PULL_QUERY_ENABLED + = "ksql.query.pull.stream.enabled"; + public static final String KSQL_QUERY_STREAM_PULL_QUERY_ENABLED_DOC = + "Config to enable pull queries on streams"; + public static final boolean KSQL_QUERY_STREAM_PULL_QUERY_ENABLED_DEFAULT = true; public static final String KSQL_QUERY_PULL_INTERPRETER_ENABLED = "ksql.query.pull.interpreter.enabled";
[KsqlConfig->[ConfigValue->[isResolved->[isResolved]],buildConfigDef->[defineLegacy,defineCurrent],getKsqlStreamConfigProps->[getKsqlStreamConfigProps],buildStreamingConfig->[applyStreamsConfig],streamTopicConfigNames->[getName],getAllConfigPropsWithSecretsObfuscated->[getKsqlConfigPropsWithSecretsObfuscated],overrideBreakingConfigsWithOriginalValues->[KsqlConfig],empty->[KsqlConfig],cloneWithPropertyOverwrite->[KsqlConfig,getKsqlStreamConfigProps,buildStreamingConfig],getKsqlStreamConfigPropsWithSecretsObfuscated->[convertToObfuscatedString],resolveStreamsConfig->[ConfigValue,empty],CompatibilityBreakingConfigDef->[defineCurrent->[define],defineLegacy->[define],define->[define]],configDef,buildStreamingConfig]]
region Public API. Enables whether scalable push queries are enabled. Scalable push queries require no window but.
Slight rewording, since this check is done in query parsing before we know whether it's a TPQ or SPQ.
@@ -650,7 +650,12 @@ duns_create_path(daos_handle_t poh, const char *path, struct duns_attr_t *attrp) #endif /** create a new directory if POSIX/MPI-IO container */ - rc = mkdir(path, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + if (backend_dfuse) + rc = mknod(path, + S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH | S_IFIFO, + 0); + else + rc = mkdir(path, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); if (rc == -1) { int err = errno;
[No CFG could be retrieved]
create a new container if necessary create a container with specified uuid or a generated random uuid.
(style) line over 80 characters
@@ -755,7 +755,7 @@ cont_open(struct rdb_tx *tx, struct ds_pool_hdl *pool_hdl, struct cont *cont, * ds_cont_op_handler. */ rc = cont_prop_read(tx, cont, in->coi_prop_bits, &prop); - out->cqo_prop = prop; + out->coo_prop = prop; out: D_DEBUG(DF_DSMS, DF_CONT": replying rpc %p: %d\n",
[No CFG could be retrieved]
finds the requested property in the list of properties and puts it in output. END of DSS.
Are these changes related to this patch?
@@ -518,6 +518,10 @@ public class ChannelPublishingJmsMessageListener return super.buildErrorMessage(null, throwable); } + MessagingTemplate getMessagingTemplate() { + return this.messagingTemplate; + } + @Override public String getComponentType() { if (ChannelPublishingJmsMessageListener.this.expectReply) {
[ChannelPublishingJmsMessageListener->[afterPropertiesSet->[afterPropertiesSet,setBeanFactory],setReplyChannel->[setReplyChannel],setRequestChannel->[setRequestChannel],setReplyTimeout->[setReplyTimeout],setErrorChannel->[setErrorChannel],setRequestTimeout->[setRequestTimeout],GatewayDelegate->[buildErrorMessage->[buildErrorMessage],send->[send],getErrorChannel->[getErrorChannel],sendAndReceiveMessage->[sendAndReceiveMessage]],getComponentName->[getComponentName],getComponentType->[getComponentType],setShouldTrack->[setShouldTrack],stop->[stop],start->[start],setErrorChannelName->[setErrorChannelName],setReplyChannelName->[setReplyChannelName],setComponentName->[setComponentName],setRequestChannelName->[setRequestChannelName]]]
This method is overridden to add the component name to the ErrorMessage.
Just curious: what made you to add this method? And if it is here already any way how about to make it `protected` ?
@@ -12,6 +12,12 @@ RSpec.describe RssReader, type: :service, vcr: vcr_option do let(:nonpermanent_link) { "https://medium.com/feed/@macsiri/" } let(:rss_data) { RSS::Parser.parse(HTTParty.get(link).body, false) } + # Override the default Rails logger as these tests require the Timber logger. + before do + timber_logger = Timber::Logger.new(nil) + Rails.logger = ActiveSupport::TaggedLogging.new(timber_logger) + end + describe "#get_all_articles" do before do [link, nonmedium_link, nonpermanent_link].each do |feed_url|
[create,new,let,get_all_articles,be,describe,current,ago,first,it,travel,freeze,to,verify,and_raise,body,before,require,count,change,from_now,parse,feed_fetched_at,fetch_user,times,body_markdown,receive,each,context,have_received,not_to,eq,and_call_original,find_by,at_least,update_column]
requires rails_helper logs an article creation error.
don't forget the `after` to reset the logger, otherwise this will stay attached to Timber in all the other spec files, as `Rails.logger` is a singleton
@@ -100,6 +100,7 @@ public class UserPrefs extends UserPrefsComputed public void writeUserPrefs(CommandWithArg<Boolean> onCompleted) { + UpdatePrefs(session_.getSessionInfo().getPrefs()); server_.setUserPrefs( session_.getSessionInfo().getUserPrefs(), new ServerRequestCallback<Void>()
[UserPrefs->[syncToggleTabKeyMovesFocusState->[syncToggleTabKeyMovesFocusState],writeUserPrefs->[writeUserPrefs],loadScreenReaderEnabledSetting->[setScreenReaderMenuState],onToggleTabKeyMovesFocus->[syncToggleTabKeyMovesFocusState,writeUserPrefs],onToggleScreenReaderSupport->[getScreenReaderEnabled,setScreenReaderMenuState,setScreenReaderEnabled,writeUserPrefs],onClearUserPrefs->[onError->[onError]]]]
Write user prefs.
nit: method name should be camelCase?
@@ -168,6 +168,10 @@ def _get_artemis123_info(fname): 'logno': i + 1, 'scanno': i + 1, 'range': 1.0, 'unit_mul': FIFF.FIFF_UNITM_NONE, 'coord_frame': FIFF.FIFFV_COORD_DEVICE} + # REF_018 has a zero cal which can cause problems. Let's set it to + # a value of another ref channel to make writers/readers happy. + if t['cal'] == 0: + t['cal'] = 4.716e-10 t['loc'] = loc_dict.get(chan['name'], np.zeros(12)) if (chan['name'].startswith('MEG')):
[RawArtemis123->[__init__->[_get_artemis123_info]]]
Function for extracting info from artemis123 header files. Build a dict of data structures for a . type - a function to build the structure of that is represented by a dict Figure out the type of the missing coil and add it to the info.
cc @bloyl okay with you? Without this change we get `invalid division` warnings when writing because we divide out by `cals`, which was causing division by zero.
@@ -83,8 +83,10 @@ def deprecated(reason, replacement, gone_in, issue=None): if issue is not None: url = "https://github.com/pypa/pip/issues/" + str(issue) message += " You can find discussion regarding this at {}.".format(url) + if gone_in is not None: + message += " pip {} will remove this functionality.".format(gone_in) + # Raise as an error if it has to be removed. + if parse(current_version) >= parse(gone_in): + raise PipDeprecationWarning(message) - # Raise as an error if it has to be removed. - if gone_in is not None and parse(current_version) >= parse(gone_in): - raise PipDeprecationWarning(message) warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
[_showwarning->[warning,getLogger,issubclass,_original_showwarning],install_warning_logger->[simplefilter],deprecated->[parse,str,format,warn,PipDeprecationWarning]]
Deprecate existing functionality.
I think the most important information should be listed first. So that would be (1) the version in which it was / will be removed, (2) then the replacement, and (3) ending with where to go for more info.
@@ -487,7 +487,7 @@ class ModuleTestHelper foreach ($private_oid as $oid) { if (isset($data[$oid])) { $parts = explode('|', $data[$oid], 3); - $parts[2] = '<private>'; + $parts[2] = $parts[1] === '4' ? '<private>' : '3C707269766174653E'; $data[$oid] = implode('|', $parts); } }
[ModuleTestHelper->[getModules->[getSupportedModules],saveSnmprec->[qPrint],generateTestData->[parseArgs,qPrint]]]
Cleans snmprec data for missing private OID.
I would change this to `$parts[1] === '4x' ? '3C707269766174653E' : '<private>'` but I'm not sure it makes a difference.
@@ -202,6 +202,12 @@ class ProductVariant(CountableDjangoObjectType): interfaces = [relay.Node] model = models.ProductVariant + @permission_required('product.manage_products') + def resolve_digital_content(self, info): + if hasattr(self, 'digital_content'): + return self.digital_content + return None + def resolve_stock_quantity(self, info): return self.quantity_available
[ProductVariant->[resolve_attributes->[resolve_attribute_list]],Category->[resolve_background_image->[resolve_background_image]],Product->[resolve_availability->[ProductAvailability],resolve_attributes->[resolve_attribute_list],resolve_thumbnail->[Image],resolve_margin->[Margin]],Collection->[resolve_background_image->[resolve_background_image]],AttributeValue->[resolve_type->[resolve_attribute_value_type]]]
Resolve stock quantity.
You would achieve the same result with: `return getattr(self, 'digital_content', None)`.
@@ -196,9 +196,16 @@ public class Partitioner { * Generate the partitions based on the lists specified by the user in job config */ private List<Partition> createUserSpecifiedPartitions() { + List<Partition> partitions = new ArrayList<>(); List<String> watermarkPoints = state.getPropAsList(USER_SPECIFIED_PARTITIONS); + boolean isEarlyStop = state.getPropAsBoolean(IS_EARLY_STOP); + + if (isEarlyStop && isEarlyStopEnabled() && isFullDump()) { + throw new UnsupportedOperationException("We found early stop is required for this source, but full dump doesn't support this mode."); + } + if (watermarkPoints == null || watermarkPoints.size() == 0 ) { LOG.info("There should be some partition points"); long defaultWatermark = ConfigurationKeys.DEFAULT_WATERMARK_VALUE;
[Partitioner->[compare->[compare],getPartitionList->[getPartitions],adjustWatermark->[adjustWatermark],getCurrentTime->[getCurrentTime],getPartitions->[getPartitions]]]
Creates a list of partitions based on user - specified partitions. This method returns partitions that are not yet in the partition list.
Always raise the configuration error and do it earlier.
@@ -459,6 +459,9 @@ public class ConfigurationKeys { "source.querybased.promoteUnsignedIntToBigInt"; public static final boolean DEFAULT_SOURCE_QUERYBASED_PROMOTE_UNSIGNED_INT_TO_BIGINT = false; + public static final String ENABLE_DELIMITED_IDENTIFIER = "enable.delimited.identifier"; + public static final boolean DEFAULT_DELIMITED_IDENTIFIER = false; + /** * Configuration properties used by the FileBasedExtractor */
[ConfigurationKeys->[name,toString,toMillis]]
This method is intend to provide a list of configuration properties that can be used by the Configuration properties for the KafkaSource class.
Convention is DEFAULT_<key>, so this should be DEFAULT_ENABLE_DELIMITED_IDENTIFIER.
@@ -107,6 +107,6 @@ class TestCryptoExamples(AsyncKeyVaultTestCase): from azure.keyvault.keys.crypto import SignatureAlgorithm verified = await client.verify(SignatureAlgorithm.rs256, digest, signature) - assert verified.result is True + assert verified.is_valid is True # [END verify]
[TestCryptoExamples->[test_wrap_unwrap_async->[get_resource_name,print,create_rsa_key,wrap_key,unwrap_key,get_cryptography_client],test_sign_verify_async->[get_resource_name,verify,print,create_rsa_key,sha256,sign,get_cryptography_client],test_encrypt_decrypt_async->[get_resource_name,print,decrypt,create_rsa_key,encrypt,get_cryptography_client],md5,AsyncVaultClientPreparer,ResourceGroupPreparer]]
Test signature and verify async.
My bad, style point, should be `assert verified.is_valid` because this evaluates to e.g. `assert True is True`
@@ -109,7 +109,10 @@ export class AmpStoryTooltip { * Clicked element producing the tooltip. Used to avoid building the same * element twice. * @private {?Element} */ - this.clickedEl_ = null; + this.previousClickedEl_ = null; + + /** @private */ + this.expandComponentHandler_ = this.onExpandComponent_.bind(this); this.storeService_.subscribe(StateProperty.TOOLTIP_ELEMENT, el => { this.onTooltipStateUpdate_(el);
[No CFG could be retrieved]
Popup element triggered by clickable elements in the amp - story - grid - layer. Hides the tooltip layer when the user clicks on the UI state.
nit: I think this is roughly equivalent to what other places call a "target" element; I think it would make sense to use that language here too (for `this.previousClickedEl_` as well as the local vars `clickedEl` below).
@@ -241,12 +241,17 @@ class RestApiClient(object): @handle_return_deserializer() def authenticate(self, user, password): - '''Sends user + password to get a token''' + """Sends user + password to get a token""" auth = HTTPBasicAuth(user, password) url = "%s/users/authenticate" % self._remote_api_url t1 = time.time() ret = self.requester.get(url, auth=auth, headers=self.custom_headers, verify=self.verify_ssl) + if ret.status_code == 401: + raise AuthenticationException("Wrong user or password") + # Cannot check content-type=text/html, conan server is doing it wrong + if not ret.ok or "html>" in ret.content: + raise ConanException("Invalid server response, check remote URL and try again") duration = time.time() - t1 log_client_rest_api_call(url, "GET", duration, self.custom_headers) return ret
[RestApiClient->[auth->[JWTAuth],_get_json->[get_exception_from_error],_remove_conanfile_files->[check_credentials],upload_files->[_file_server_capabilities],_remove_package_files->[check_credentials],remove_packages->[check_credentials],remove_conanfile->[check_credentials],download_files_to_folder->[_file_server_capabilities],get_path->[_file_server_capabilities,_get_json,is_dir],download_files->[_file_server_capabilities],handle_return_deserializer]]
Sends user + password to get a token.
Not display HTML? Maybe it is useful (as if it is a proxy). If not, maybe say something int he error message about proxies too. Why "html>" and not "<html>"?
@@ -1,9 +1,11 @@ -import Marketplace from '@origin/contracts/build/contracts/V00_Marketplace' +import MarketplaceV0 from '@origin/contracts/build/contracts/V00_Marketplace' +import MarketplaceV1 from '@origin/contracts/build/contracts/V01_Marketplace' import txHelper, { checkMetaMask } from '../_txHelper' import contracts, { setMarketplace } from '../../contracts' -const data = Marketplace.bytecode -async function deployMarketplace(_, { token, from, autoWhitelist }) { +async function deployMarketplace(_, { token, from, autoWhitelist, version }) { + const Marketplace = version === '001' ? MarketplaceV1 : MarketplaceV0 + const data = Marketplace.bytecode const web3 = contracts.web3Exec await checkMetaMask(from) const Contract = new web3.eth.Contract(Marketplace.abi)
[No CFG could be retrieved]
Deploy a marketplace to a contract.
nit: maybe throw an exception if version is unknown so that in future we don't deploy MarketplaceV0 again by mistake :)
@@ -35,7 +35,9 @@ type labelScheduler struct { // Now only used for reject leader schedule, that will move the leader out of // the store with the specific label. func newLabelScheduler(opController *schedule.OperatorController) schedule.Scheduler { - filters := []schedule.Filter{schedule.StoreStateFilter{TransferLeader: true}} + filters := []schedule.Filter{ + schedule.StoreStateFilter{TransferLeader: true}, + } return &labelScheduler{ baseScheduler: newBaseScheduler(opController), selector: schedule.NewBalanceSelector(core.LeaderKind, filters),
[IsScheduleAllowed->[OperatorCount,GetLeaderScheduleLimit],Schedule->[SelectTarget,GetStoreId,NewOperator,Inc,GetPeer,GetID,GetPendingPeers,GetLabels,GetName,CheckLabelProperty,GetStores,Reflect,WithLabelValues,GetDownPeers,Debug,NewExcludedFilter,Uint64,RandLeaderRegion,GetFollowerStores,GetRegionEpoch],RegisterScheduler,NewBalanceSelector]
returns a scheduler that will run on the given cluster. CheckLabelProperty - check if the label property is a reject leader.
No need to consider overload?
@@ -328,7 +328,7 @@ public class MavenArtifactResolver { final ArtifactDescriptorResult descr = resolveDescriptor(artifact); Collection<String> excluded; if(excludedScopes.length == 0) { - excluded = Arrays.asList(new String[] {"test", "provided"}); + excluded = Collections.emptyList(); } else if (excludedScopes.length == 1) { excluded = Collections.singleton(excludedScopes[0]); } else {
[MavenArtifactResolver->[Builder->[build->[MavenArtifactResolver]],collectManagedDependencies->[collectDependencies,collectManagedDependencies],newCollectRequest->[aggregateRepositories],install->[install],resolveDependencies->[resolveDependencies,resolveDescriptor,getRepositories],collectDependencies->[collectDependencies],builder->[Builder],resolveManagedDependencies->[resolveDependencies,resolveManagedDependencies],resolveVersionRange->[resolveVersionRange],newCollectManagedRequest->[resolveDescriptor,getRepositories],aggregateRepositories->[aggregateRepositories],newResolutionRepositories->[newResolutionRepositories],setOffline]]
Creates a new CollectRequest object with the dependencies and versions merged.
I agree this was an ugly choice to design it this way. But by default "test" and "provided" should be excluded. That is, if the caller doesn't provide anything.
@@ -99,9 +99,13 @@ func DownloadAsset(ctx context.Context, log logger.Logger, params chat1.S3Params // decrypt body dec := NewSignDecrypter() - decBody := dec.Decrypt(tee, asset.Key, asset.VerifyKey) - if err != nil { - return err + var decBody io.Reader + if asset.Nonce != nil { + var nonce [signencrypt.NonceSize]byte + copy(nonce[:], asset.Nonce) + decBody = dec.DecryptWithNonce(tee, &nonce, asset.Key, asset.VerifyKey) + } else { + decBody = dec.Decrypt(tee, asset.Key, asset.VerifyKey) } n, err := io.Copy(w, decBody)
[Write->[progress],Seek,GetReader,InitMulti,Duration,Close,PutReader,Done,Copy,ReadFull,Sum,Decrypt,New,Go,Errorf,After,Wait,Debug,Bucket,Equal,WithContext,Err,ACL,TeeReader,NewReader,PutPart,Complete]
putSingle uploads attachment data from r to S3 with the Put API. putSingle - put single attachment in the queue.
What's the circumstance where `asset.Nonce == nil` here?
@@ -62,10 +62,7 @@ def call_signing(file_obj, endpoint): temp_filename = temp_file.name # Extract jar signature. - jar = JarExtractor(path=storage.open(file_obj.file_path), - outpath=temp_filename, - omit_signature_sections=True, - extra_newlines=True) + jar = JarExtractor(path=storage.open(file_obj.file_path)) log.debug(u'File signature contents: {0}'.format(jar.signatures))
[sign_file->[get_endpoint,call_signing,supports_firefox],call_signing->[SigningError,get_id]]
Get the jar signature and send it to the signing server.
Can you move that below, closer to where it's needed ?
@@ -10,9 +10,9 @@ namespace NServiceBus.Extensibility /// <summary> /// Initialized a new instance of <see cref="ContextBag" />. /// </summary> - public ContextBag(ContextBag parentBag = null) + public ContextBag(ContextBag parentContext = null) { - this.parentBag = parentBag; + this.parentContext = parentContext; } /// <summary>
[ContextBag->[TryGet->[TryGet],Set->[Set],Remove->[Remove],T->[TryGet]]]
Constructors for a context bag. Gets the requested extension from the context or creates a new one if needed.
If we're changing the name of the parameter/field, perhaps `parent` would be a better name, since Bag/Context is a bit redundant?
@@ -951,7 +951,12 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIsReac limits := &validation.Limits{} flagext.DefaultValues(limits) - ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(maxSeriesLimit)) + ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(maxSeriesLimit, 0)) + t.Cleanup(func() { + // Reset the limiter for future tests. + ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0)) + }) + // Prepare distributors. ds, _, r, _ := prepare(t, prepConfig{ numIngesters: 3,
[series->[Lock,Unlock],MetricsForLabelMatchers->[trackCall,FromMetricsForLabelMatchersRequest,Lock,Unlock],QueryStream->[Time,Unlock,trackCall,Marshal,New,Lock,SampleValue,Encoding,Bytes,Sleep,FromQueryRequest,Add],MetricsMetadata->[trackCall,Lock,Unlock],Check->[trackCall,Lock,Unlock],countCalls->[Lock,Unlock],Push->[TenantID,trackCall,Lock,Unlock],Query->[Unlock,trackCall,Lock,Sleep,FromQueryRequest],SliceIsSorted,Message,initConfig,CAS,GetCodec,Inc,Itoa,StopAsync,New,NewMatcher,Len,HTTPResponseFromError,WithLabelValues,Time,GenerateTokens,FromStrings,cleanupInactiveUser,NoError,Fatalf,FromLabelAdaptersToLabels,NewQueryLimiter,FromLabelsToLabelAdapters,MetricsMetadata,Push,ResetTimer,StopAndAwaitTerminated,Now,checkReplica,countCalls,Add,QueryStream,ToWriteRequest,GatherAndCompare,HealthyInstancesCount,InstancesCount,TenantID,Validate,AddIngester,NotEqual,UnixNano,Cleanup,MetricsForLabelMatchers,ReportAllocs,NewBuilder,Sort,Repeat,SampleValue,InjectOrgID,NewReader,FromError,Fail,StartAndAwaitRunning,series,Duration,initLimits,Min,Poll,prepareSeries,Nil,Labels,NewInMemoryClient,LabelValue,Equal,AllocsPerRun,Query,Sprintf,String,DefaultValues,ElementsMatch,Run,True,NewPedanticRegistry,NewNopLogger,Set,LabelsToMetric,Error,Compare,NewOverrides,Tick,Errorf,PrefixClient,SeriesChunksToMatrix,Unix,Contains,Matches,MustNewMatcher,AddQueryLimiterToContext,Background,prepareConfig]
TestDistributor_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIsReached assert. Equal t.
You just made me realise that `ctx` is global, but shouldn't. Instead of using `t.Cleanup()` please remove the `ctx` global variable and define it in each test that needs it.
@@ -70,6 +70,15 @@ async function createListing(_, input) { let tx const deposit = contracts.web3.utils.toWei(String(input.deposit), 'ether') + let version = input.version + if (!version) { + version = Object.keys(contracts.marketplaces).sort()[0] + } + if (!contracts.marketplaces[version]) { + throw new Error(`No marketplace with version ${version}`) + } + const marketplace = contracts.marketplaces[version].contractExec + if (autoApprove && input.deposit > 0) { const fnSig = contracts.web3.eth.abi.encodeFunctionSignature( 'createListingWithSender(address,bytes32,uint256,address)'
[No CFG could be retrieved]
Create a listing of the given units. create a block of blocks for a single block of blocks.
Sorting is by ascendant order so this will use the contract with the oldest version. e.g. '000' Shouldn't we default to using the most recent contract when creating new listings ?
@@ -53,7 +53,7 @@ func (r *ClusterRoleReaper) Stop(namespace, name string, timeout time.Duration, } } - if err := r.roleClient.ClusterRoles().Delete(name, &metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) { + if err := r.roleClient.ClusterRoles().Delete(name, &metav1.DeleteOptions{}); err != nil { return err }
[Stop->[ClusterRoleBindings,Infof,ClusterRoles,RoleBindings,List,Delete,IsNotFound]]
Stop stops the roleReaper.
`&& !kerrors.IsNotFound(err)` looks to have been an intentional choice. Also, we risk breaking existing scripts by changing behavior for this case. Tagging @simo5 and @deads2k to confirm it'd be fine to remove that check.
@@ -67,6 +67,9 @@ const SidebarEvents = { CLOSE: 'sidebarClose', }; +/** @private @const {Object<string, Object<string, Array<string>>>} */ +const EXCLUDE_FROM_SWIPE_CLOSE = {'input': {'type': ['range']}}; + /** * @extends {AMP.BaseElement} */
[No CFG could be retrieved]
Provides a basic AMP sidebar with a single side - bar. Replies the element that is specified by the given index in the sequence of elements.
Can we simplify this? This looks like a YAGNI smell - we're making it more complex than it needs to be. IMHO, in `excludeFromSwipeClose` you can just check for input type="range" and return.
@@ -147,7 +147,7 @@ class Web3Provider extends Component { curr = curr && curr.toLowerCase() if (curr !== next) { - curr && Store.dispatch(showAlert('MetaMask account has changed.')) + curr && Store.dispatch(showAlert('{this.state.currentProvider} account has changed.')) this.setState({ accountsError: null,
[No CFG could be retrieved]
ComponentDidMount - Component that is called when the component is mounted. Check if network is connected and if not set it will delay and condition the use of the.
Looks like this is supposed to be a template literal.
@@ -130,6 +130,9 @@ import java.util.PriorityQueue; * */ final class PoolChunk<T> implements PoolChunkMetric { + @SuppressWarnings("unchecked") + private static final AtomicIntegerFieldUpdater<PoolChunk<?>> PINNED_UPDATER = (AtomicIntegerFieldUpdater<PoolChunk<?>>) + (AtomicIntegerFieldUpdater<?>) newUpdater(PoolChunk.class, "pinnedBytes"); private static final int SIZE_BIT_LENGTH = 15; private static final int INUSED_BIT_LENGTH = 1; private static final int SUBPAGE_BIT_LENGTH = 1;
[PoolChunk->[removeAvailRun->[removeAvailRun],allocateRun->[removeAvailRun],collapsePast->[getAvailRunByOffset,removeAvailRun],splitLargeRun->[insertAvailRun],free->[insertAvailRun,free],collapseNext->[getAvailRunByOffset,removeAvailRun],allocateSubpage->[allocateRun,allocate,calculateRunSize],toString->[toString],usage->[usage]]]
A pool chunk metric. private static final int MAX_SIZE ; private static int MAX_SIZE_FOR_P.
nit: Not strong about this but we could also just use `AtomicInteger` directly here as I wouldn't expect a massive amount of these instances. That said I am not strong on it at all.
@@ -144,6 +144,7 @@ process_credential_response(Drpc__Response *response, if (response->status != DRPC__STATUS__SUCCESS) { /* Recipient could not parse our message */ + D_ERROR("Agent credential drpc request failed: %d\n", response->status); return -DER_MISC; }
[char->[getenv],dc_sec_request_creds->[process_credential_response,drpc__response__free_unpacked,request_credentials_via_drpc],int->[security_credential__unpack,D_ALLOC,daos_iov_set,memcpy,drpc_close,drpc__call__free_unpacked,new_credential_request,drpc_call,get_agent_socket_path,drpc_connect,sanity_check_credential_response,security_credential__free_unpacked,send_drpc_message],Drpc__Call->[drpc__call__init,D_ALLOC_PTR]]
This function processes a credential response.
(style) line over 80 characters
@@ -1055,12 +1055,10 @@ class Toolbox extends Component<Props, State> { key = 'record' showLabel = { true } />, this._shouldShowButton('sharedvideo') - && <OverflowMenuItem + && <SharedVideoButton accessibilityLabel = { t('toolbar.accessibilityLabel.sharedvideo') } - icon = { IconShareVideo } key = 'sharedvideo' - onClick = { this._onToolbarToggleSharedVideo } - text = { _sharingVideo ? t('toolbar.stopSharedVideo') : t('toolbar.sharedvideo') } />, + showLabel = { true } />, this._shouldShowButton('etherpad') && <SharedDocumentButton key = 'etherpad'
[No CFG could be retrieved]
Returns an array of items that can be shown in the toolbar. A toolbar with menu items for stats.
I think the label is set in the component.
@@ -271,9 +271,9 @@ class LocalPantsRunner(object): except GracefulTerminationException as e: logger.debug('Encountered graceful termination exception {}; exiting'.format(e)) return e.exit_code - except Exception as e: - logger.warn('Encountered unhandled exception {} during rule execution!' - .format(e)) + except Exception: + logger.warning('Encountered unhandled exception during rule execution!') + logger.warning(traceback.format_exc()) return PANTS_FAILED_EXIT_CODE else: return PANTS_SUCCEEDED_EXIT_CODE
[LocalExiter->[exit->[exit]],LocalPantsRunner->[_maybe_run_v1->[create],_run->[_maybe_run_v1,exit,_compute_final_exit_code,_maybe_run_v2],create->[_maybe_init_graph_session,parse_options,_maybe_init_target_roots],set_start_time->[LocalExiter]]]
Runs the v2 rule if necessary.
*not really relevant note* Was this changed to `logger.warning()` instead of `.warn()` because of a deprecation warning when using `logger.warn()`? Because I thought I saw that happen elsewhere, too, and it seems like the fact that `logger.warn{,ing}()` are used in `except` blocks means those might not always show up when running pants. I might do a `s/logger\.warn/logger.warning/g` on the repo in another PR.
@@ -207,7 +207,7 @@ namespace auto subBasisOutputValues = getOutputView<OutputScalar>(fs, op, subBasis->getCardinality(), numPoints, spaceDim); subBasis->getValues(subBasisOutputValues, inputPoints, op); - + Kokkos::fence(); bool vectorValued = (outputValues.rank() == 3); // F,P,D -- if scalar-valued, F,P for (int pointOrdinal=0; pointOrdinal<numPoints; pointOrdinal++)
[CellTopology->[testSubBasis,runSubBasisTests],runSubBasisTests->[testSubBasis]]
Test if a sub - basis is a superset of the other. finds the best test case for the given node getHierarchicalBasis - > getHierarchicalBasis - > getHierarchicalBas OrderFieldOrdinals - > subBasis output values for the missing values in the basis.
Why `Kokkos::fence()` instead of `DeviceSpaceType().fence()`?
@@ -143,7 +143,8 @@ public class DistributedExecutionCompletionService<V> implements CompletionServi */ public Future<V> submit(Runnable task, V result) { if (task == null) throw new NullPointerException(); - Future<V> f = executor.submit(task, result); + NotifyingFuture<V> f = (NotifyingFuture<V>) executor.submit(task, result); + f.attachListener(listener); return f; }
[DistributedExecutionCompletionService->[take->[take],poll->[poll],submit->[submit],QueueingListener]]
Submits a task to the pool.
IMHO listener should be attached inside of executor.submit() between newTaskFor() and executeFuture(); It can be done if we use subclassing and delegation for executor; but I think it's quite common technique here (invoke done on late attach here). does it make sense?
@@ -12,6 +12,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" + "gorm.io/gorm" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/solidity_vrf_coordinator_interface" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/eth"
[ServicesForSpec->[NewVRFORM,Eth,With,Pipeline,ToInt,New,HeadBroadcaster,Errorf,MustGetABI,Address,LogBroadcaster,Named,NewVRFCoordinatorV2,NewVRFCoordinator,Get,Config,NewHighCapacityMailbox,TxManager,Client,VRF],Raw,Errorw,Replace,SetBytes,String,DecodeString,Scan]
Package vrf import imports a single object that implements the Sealed interface for the given ServicesForSpec returns a new instance of the delegate that can be used to retrieve the V.
What is this being used for. gorm should not be anywhere in the code now
@@ -44,6 +44,7 @@ module Search "image_url" => source["main_image"], "title" => source["title"] } + source["_score"] = hit["_score"] source.merge(timestamps_hash(hit)) end
[FeedContent->[timestamps_hash->[parse,to_i,dig],prepare_doc->[to_i,dig,merge,timestamps_hash,map],search_documents->[search,prepare_doc,paginate_hits,as_hash,set_query_size,map],index_settings->[production?],freeze,dig,pluralize,define_method,each]]
prepare_doc prepares a document for the next request.
Added this return value mainly for debugging purposes and additional information that is nice to have. It is not actually going to be used by anything in the code.
@@ -2539,7 +2539,7 @@ describe RequestController do context 'when there is a last response' do - let(:info_request){ FactoryBot.create(:info_request_with_incoming) } + let(:info_request) { FactoryBot.create(:info_request_with_incoming) } it 'should redirect to the "response url"' do session[:user_id] = info_request.user_id
[expect_redirect->[post_status],get_attachment,expect_hidden,expect_redirect,post_status]
This function checks that the user s response is redirected to the following url. expects the message to be rejected.
Line is too long. [81/80]
@@ -63,3 +63,18 @@ def mkdir_p(path): return path else: raise + + +def mkdir_p_sudo_safe(path): + if isdir(path): + return + base_dir = dirname(path) + if not isdir(base_dir): + mkdir_p_sudo_safe(base_dir) + log.trace('making directory %s', path) + mkdir(path) + if not on_win and os.environ.get('SUDO_UID') is not None: + uid = int(os.environ['SUDO_UID']) + gid = int(os.environ.get('SUDO_GID', -1)) + log.trace("chowning %s:%s %s", uid, gid, path) + os.chown(path, uid, gid)
[exp_backoff_fn->[fn,basename,sleep,pop,trace,random,_getframe,repr,warn,range],mkdir_p->[makedirs,isdir,trace],getLogger]
Create a directory and check if it exists.
What if `mkdir()` throws an exception? Partition ran out of inodes or something ...
@@ -35,7 +35,7 @@ namespace Microsoft.Xna.Framework.Graphics public bool SupportsBlitFramebuffer { get; private set; } #if IOS - internal const string OpenGLLibrary = MonoTouch.Constants.OpenGLESLibrary; + internal const string OpenGLLibrary = "/System/Library/Frameworks/OpenGLES.framework/OpenGLES"; #elif ANDROID internal const string OpenGLLibrary = "libGLESv2.dll"; #endif
[GraphicsDevice->[FramebufferHelper->[GLBlitFramebufferApple->[GLResolveMultisampleFramebufferApple],BindRenderbuffer->[BindRenderbuffer],BindFramebuffer->[BindFramebuffer],BindReadFramebuffer->[BindFramebuffer],FramebufferRenderbuffer->[FramebufferRenderbuffer],GenerateMipmap->[GenerateMipmap,GLGenerateMipmapExt],CheckFramebufferStatus->[CheckFramebufferStatus],InvalidateDrawFramebuffer->[GLDiscardFramebufferExt],BlitFramebuffer->[GLBlitFramebufferExt,BlitFramebuffer],RenderbufferStorageMultisample->[GLRenderbufferStorageMultisampleExt,RenderbufferStorageMultisample],InvalidateReadFramebuffer->[GLDiscardFramebufferExt],FramebufferTexture2D->[FramebufferTexture2D]],FramebufferHelperEXT->[BindFramebuffer->[BindFramebuffer],BindRenderbuffer->[BindRenderbuffer],BindReadFramebuffer->[BindFramebuffer],FramebufferRenderbuffer->[FramebufferRenderbuffer],GenerateMipmap->[GenerateMipmap],FramebufferTexture2D->[FramebufferTexture2D],CheckFramebufferStatus->[CheckFramebufferStatus],BlitFramebuffer->[BlitFramebuffer],RenderbufferStorageMultisample->[RenderbufferStorageMultisample]]]]
region GraphicsDevice Class for all framebuffer objects that can be discarded by GL. Call this to blit the framebuffer with the specified samples.
@dellis1972 is there a constant in xamarin.ios that matches the OpenGLESLibrary constant in MonoTouch? It would be smarter to use that here.
@@ -119,7 +119,6 @@ public class LazyMuleArtifactContext extends MuleArtifactContext muleContext.getCustomizationService().registerCustomServiceClass(NON_LAZY_VALUE_PROVIDER_SERVICE, MuleValueProviderService.class); - muleContext.getCustomizationService().overrideDefaultServiceImpl(LAZY_COMPONENT_INITIALIZER_SERVICE_KEY, this); }
[LazyMuleArtifactContext->[initializeComponents->[applyLifecycle,initializeComponents],applyLifecycle->[applyLifecycle],prepareBeanFactory->[prepareBeanFactory],initializeComponent->[applyLifecycle,initializeComponent],getParentComponentModelInitializerAdapter->[initializeComponents],createComponents->[initializeComponents]]]
Extend artifact properties.
Just remove this change as it is not modifying the code
@@ -58,3 +58,7 @@ func GetDuplicateResourceAliasError(urn resource.URN) *Diag { "Duplicate resource alias '%v' applied to resource with URN '%v' conflicting with resource with URN '%v'", ) } + +func GetResourceToRefreshCouldNotBeFoundError() *Diag { + return newError("", 2010, "Resource to refresh (%v) could not be found in the stack.") +} \ No newline at end of file
[No CFG could be retrieved]
Duplicate resource alias applied to a resource with URN conflicting with a resource with URN.
@lukehoban for wordsmithing ideas. We could consider looking to see if there's no `$` in the urn and giving an explicit message about that.
@@ -478,8 +478,14 @@ public class KsqlEngine implements Closeable { return true; } - public Map<QueryId, PersistentQueryMetadata> getPersistentQueries() { - return new HashMap<>(persistentQueries); + public PersistentQueryMetadata getPersistentQuery(QueryId queryId) { + return persistentQueries.get(queryId); + } + + public Collection<PersistentQueryMetadata> getPersistentQueries() { + return Collections.unmodifiableList( + new ArrayList<>( + persistentQueries.values())); } public static List<String> getImmutableProperties() {
[KsqlEngine->[close->[close],createQueries->[parseQueries,planQueries]]]
This method is called when a query is terminated.
Any reason not to return `List<PQM>` so users can benefit from richer API?
@@ -214,6 +214,10 @@ func (ctx *APIContext) RequireCSRF() { // CheckForOTP validates OTP func (ctx *APIContext) CheckForOTP() { + if skip, ok := ctx.Data["SkipLocalTwoFA"]; ok && skip.(bool) { + return // Skip 2FA + } + otpHeader := ctx.Req.Header.Get("X-Gitea-OTP") twofa, err := models.GetTwoFactorByUID(ctx.Context.User.ID) if err != nil {
[InternalServerError->[Error],NotFound->[Error],RequireCSRF->[Error],CheckForOTP->[Error],Error->[Error],InternalServerError,NotFound,Error]
CheckForOTP checks if the user has a valid 2FA OTP.
why we do need this for api ?!?
@@ -76,7 +76,10 @@ namespace System.Net.Http private byte[]? _http2AltSvcOriginUri; internal readonly byte[]? _http2EncodedAuthorityHostHeader; - private readonly bool _http3Enabled; + [SupportedOSPlatformGuard("linux")] + [SupportedOSPlatformGuard("macOS")] + [SupportedOSPlatformGuard("Windows")] + private readonly bool _http3Enabled = (OperatingSystem.IsLinux() && !OperatingSystem.IsAndroid()) || OperatingSystem.IsWindows() || OperatingSystem.IsMacOS(); private Http3Connection? _http3Connection; private SemaphoreSlim? _http3ConnectionCreateLock; internal readonly byte[]? _http3EncodedAuthorityHostHeader;
[No CFG could be retrieved]
Private variables for the daemon - level configuration.
I still think the condition logic should be in IsHttp3Supported method so it does not need to be duplicated in the file
@@ -306,6 +306,11 @@ filter_length : str | int * **int**: Specified length in samples. For fir_design="firwin", this should not be used. """ +docdict['filter_length_notch'] = docdict['filter_length'] + """ + When ``method=='spectrum_fit'``, this sets the effective window duration + over which fits are computed. The default in 0.21 is None, but this will + change to 10. in 0.22. +""" docdict['l_trans_bandwidth'] = """ l_trans_bandwidth : float | str Width of the transition band at the low cut-off frequency in Hz
[copy_doc->[wrapper->[ValueError,len]],copy_function_doc_to_method_doc->[wrapper->[split,lstrip,len,strip,ValueError,join,enumerate]],linkcode_resolve->[relpath,split,hasattr,getattr,dirname,getsourcefile,get,len,join,normpath,getsourcelines],deprecated->[__call__->[_decorate_class,isinstance,_decorate_fun],_decorate_class->[deprecation_wrapped->[init,warn],_update_doc],_update_doc->[split,lstrip,len,strip,enumerate],_decorate_fun->[deprecation_wrapped->[warn,fun],_update_doc]],copy_base_doc_to_subclass_doc->[dir,callable,mro,getattr],open_docs->[open_new_tab,keys,dict,sorted,_check_option,get_config],deprecated_alias->[split,int,deepcopy,currentframe,deprecated,str,join],dict,filterwarnings,unindent_dict,filldoc,docdict]
Required fields for the object. nannannannan filter - only used for FIR and linear - phase FIR filters.
Say it's in seconds
@@ -27,6 +27,7 @@ class Gpdb(AutotoolsPackage): version('5.23.0', sha256='b06a797eb941362d5473b84d5def349b5ce12ce87ab116bea7c74ad193738ae9') depends_on('zstd') + depends_on('py-setuptools@:44.99.99') depends_on('apr') depends_on('libevent') depends_on('curl')
[Gpdb->[depends_on,version]]
Create a list of arguments for the command line interface.
I don't really like this, because it should be up to the concretizer to know that this is necessary. But since the concretizer is currently broken, I'll allow it for now.
@@ -4882,6 +4882,17 @@ p { ), 'timeout' => $timeout, ); + + list( $activation_source_name, $activation_source_keyword ) = get_option( 'jetpack_activation_source', false ); + + if ( $activation_source_name ) { + $args['_as'] = urlencode( $activation_source_name ); + } + + if ( $activation_source_keyword ) { + $args['_ak'] = urlencode( $activation_source_keyword ); + } + $response = Jetpack_Client::_wp_remote_request( Jetpack::fix_url_for_bad_hosts( Jetpack::api_url( 'register' ) ), $args, true ); // Make sure the response is valid and does not contain any Jetpack errors
[Jetpack->[verify_json_api_authorization_request->[add_nonce],get_locale->[guess_locale_from_lang],admin_notices->[opt_in_jetpack_manage_notice,can_display_jetpack_manage_notice],authenticate_jetpack->[verify_xml_rpc_signature],admin_page_load->[disconnect,unlink_user,can_display_jetpack_manage_notice],wp_rest_authenticate->[verify_xml_rpc_signature],jumpstart_has_updated_module_option->[do_stats,stat],jetpack_getOptions->[get_connected_user_data],build_connect_url->[build_connect_url],opt_in_jetpack_manage_notice->[opt_in_jetpack_manage_url],display_activate_module_link->[opt_in_jetpack_manage_url],register->[do_stats,stat,get_remote_query_timeout_limit,validate_remote_register_response]]]
Register a new Jetpack_Options Register a Jetpack site Fires when a site is registered on WordPress.
These three lines (ok, 7, if we count the wraps :p) could probably be extracted into a function since they're repeated 3 times :p
@@ -168,14 +168,14 @@ func resourceAwsRoute53ResolverEndpointRead(d *schema.ResourceData, meta interfa return fmt.Errorf("error getting Route53 Resolver endpoint (%s) IP addresses: %s", d.Id(), err) } - ipAddresses = append(ipAddresses, flattenRoute53ResolverIpAddresses(resp.IpAddresses)...) + ipAddresses = append(ipAddresses, flattenRoute53ResolverEndpointIpAddresses(resp.IpAddresses)...) if resp.NextToken == nil { break } req.NextToken = resp.NextToken } - if err := d.Set("ip_address", schema.NewSet(route53ResolverHashIPAddress, ipAddresses)); err != nil { + if err := d.Set("ip_address", schema.NewSet(route53ResolverEndpointHashIpAddress, ipAddresses)); err != nil { return err }
[Difference,GetChange,NewSet,SetPartial,StringInSlice,SingleIP,Partial,Set,AssociateResolverEndpointIpAddress,UpdateResolverEndpoint,GetOk,HasChange,Errorf,ListResolverEndpointIpAddresses,SetId,GetResolverEndpoint,Timeout,PrefixedUniqueId,Id,Get,DeleteResolverEndpoint,Printf,DefaultTimeout,StringValue,CreateResolverEndpoint,Sprintf,List,WaitForState,String,WriteString,DisassociateResolverEndpointIpAddress]
getResourceResource resolves a Route53 Resolver endpoint by id. Update a Route53 Resolver endpoint.
Renamed as the function applies only to `aws_route53_resolver_endpoint`.
@@ -112,13 +112,15 @@ function isSrcdocSupported() { * @param {!Element} container * @param {!FriendlyIframeSpec} spec * @param {function(!Window, ?./service/ampdoc-impl.AmpDoc=)=} opt_preinstallCallback + * @param {boolean=} isInUnsignedExp * @return {!Promise<!FriendlyIframeEmbed>} */ export function installFriendlyIframeEmbed( iframe, container, spec, - opt_preinstallCallback // TODO(#22733): remove "window" argument. + opt_preinstallCallback, // TODO(#22733): remove "window" argument. + isInUnsignedExp ) { /** @const {!Window} */ const win = getTopWindow(toWin(iframe.ownerDocument.defaultView));
[No CFG could be retrieved]
Creates a new Friendly Iframe embed. Merges the given extension spec and the HTML into a single extension.
how about control the exp outside this function by passing in a `spec` with `spec.html=null`
@@ -343,7 +343,10 @@ class PermissionGroupDelete(ModelDeleteMutation): @classmethod def clean_instance(cls, info, instance): - if not can_user_manage_group(info.context.user, instance): + user = info.context.user + if user.is_superuser: + return + if not can_user_manage_group(user, instance): error_msg = "You can't manage group with permissions out of your scope." code = PermissionGroupErrorCode.OUT_OF_SCOPE_PERMISSION.value raise ValidationError(error_msg, code)
[PermissionGroupCreate->[Arguments->[PermissionGroupCreateInput]],PermissionGroupUpdate->[check_if_removing_user_last_group->[update_errors],clean_input->[clean_permissions],Arguments->[PermissionGroupUpdateInput],clean_users->[check_if_users_are_staff,can_manage_users],check_for_duplicates->[update_errors]]]
Checks if the group can be removed.
If you're using `requestor` everywhere else, we should use this name here as well for consistency.
@@ -262,6 +262,8 @@ public class FnHarness { } }); + MetricsEnvironment.setProcessWideContainer(new MetricsContainerImpl(null)); + ProcessBundleHandler processBundleHandler = new ProcessBundleHandler( options,
[FnHarness->[main->[getApiServiceDescriptor,main]]]
Main entry point for the BeamFn client. Load a specific process bundle descriptor. This method is called from the main loop of the SDK. It is called by the JVM.
Given that we don't hold onto a reference for this, why don't we have MetricsEnvironment create (if possibly lazily) the process-wide container?
@@ -213,12 +213,12 @@ int main(int argc, char *argv[]) if(config->agent_specific.common.show_classes) { - ShowContextsFormatted(ctx); + ShowContextsFormatted(ctx, config->agent_specific.common.show_classes); } if(config->agent_specific.common.show_variables) { - ShowVariablesFormatted(ctx); + ShowVariablesFormatted(ctx, config->agent_specific.common.show_variables); } PolicyDestroy(policy);
[main->[GenericAgentConfigApply,EvalContextNew,GetInputDir,SetupSignalsForAgent,PolicyToString,LoadPolicy,PolicyDestroy,PolicyToJson,Cf3ParseFile,GenericAgentFinalize,GenericAgentTagReleaseDirectory,GenericAgentPostLoadInit,WriterClose,GenericAgentConfigSetInputFile,GenericAgentDiscoverContext,ShowContextsFormatted,ShowVariablesFormatted,Log,JsonWrite,CheckOpts,FileWriter,exit,JsonDestroy],CheckOpts->[LoggingEnableTimestamps,GetInputDir,getopt_long,RlistDestroy,FileWriterDetach,xstrdup,LogSetGlobalLevel,GenericAgentWriteVersion,GenericAgentConfigSetInputFile,RlistFromSplitString,time,StringSetJoin,putenv,SyntaxToJson,StringSetFromString,StringConcatenate,WriterWriteHelp,strcmp,GetTTYInteractive,Log,GenericAgentConfigParseArguments,LogEnableModulesFromString,ManPageWrite,GenericAgentConfigParseWarningOptions,JsonWrite,free,GenericAgentConfigSetBundleSequence,GenericAgentConfigParseColor,GenericAgentConfigNewDefault,FileWriter,exit,JsonDestroy],void->[SeqSort,EvalContextVariableTags,printf,ClassTableIteratorNext,RvalWrite,StringSetToBuffer,ClassTableIteratorDestroy,EvalContextVariableTableIteratorNew,WriterClose,StringWriter,JsonWriteCompact,StringIsPrintable,SeqDestroy,SeqAt,xasprintf,VariableTableIteratorNext,SeqAppend,VarRefToString,EvalContextClassTableIteratorNewGlobal,VariableTableIteratorDestroy,DataTypeToRvalType,SeqNew,EvalContextClassTags,RvalContainerValue,free,BufferData,SeqLength,BufferDestroy,StringWriterData,ClassRefToString]]
This is the entry point for the generic agent policy tool. It parses the input file parses Checks if the policy is in the correct format and writes it to stdout.
This is no longer a bool, so an explicit comparison to `NULL` would be preferred.
@@ -1047,7 +1047,7 @@ def read_meas_info(fid, tree, clean_bads=False, verbose=None): meas : dict Node in tree that contains the info. """ - # Find the desired blocks + # Find the desired blocks meas = dir_tree_find(tree, FIFF.FIFFB_MEAS) if len(meas) == 0: raise ValueError('Could not find measurement data')
[write_info->[write_meas_info],write_meas_info->[_check_dates,_rename_comps,_check_consistency],create_info->[_update_redundant,_check_consistency],anonymize_info->[_add_timedelta_to_stamp,_check_dates,_check_consistency],_ensure_infos_match->[_check_consistency],_empty_info->[Info,_update_redundant,_check_consistency],Info->[__deepcopy__->[copy],__init__->[_format_trans],_repr_html_->[_get_chs_for_repr],_check_consistency->[_unique_channel_names,_check_ch_keys],pick_channels->[pick_channels]],_simplify_info->[Info,_update_redundant],read_meas_info->[_read_bad_channels,Info,_check_consistency,_update_redundant],_merge_info_values->[_check_isinstance,_flatten,_where_isinstance],_merge_info->[Info,_update_redundant,_merge_info_values,_check_consistency],_write_ch_infos->[copy],_make_ch_names_mapping->[copy,_unique_channel_names]]
Read the measurement info. read_tag - read the next tag. Read the next N - tuple of the next tag. Reads a specific from the file descriptor. Reads a n - node tag from a file. Reads all data in the file and returns it as a dict.
This is another one where we can just do `info._unlocked = True` at the beginning and then `info._unlocked = False` at the end. Since the object is actually being created here (rather than modified), this pattern is still pretty understandable, and then you don't need a bunch of context managers in here
@@ -13,6 +13,7 @@ */ package io.prestosql.plugin.hive.metastore.cache; +import com.google.common.base.Supplier; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache;
[CachingHiveMetastore->[dropRole->[dropRole],dropColumn->[dropColumn],acquireSharedReadLock->[acquireSharedReadLock],listTablePrivileges->[get],grantTablePrivileges->[grantTablePrivileges],loadTableColumnStatistics->[getExistingTable,getTableStatistics],updatePartitionStatistics->[updatePartitionStatistics],getAllViews->[get],updateTableStatistics->[updateTableStatistics],loadRoles->[listRoles],listGrantedPrincipals->[get],dropDatabase->[dropDatabase],sendTransactionHeartbeat->[sendTransactionHeartbeat],WithIdentity->[toString->[toString],equals->[equals]],renameDatabase->[renameDatabase],getPartitionsByNames->[get,getAll],setDatabaseOwner->[setDatabaseOwner],loadAllDatabases->[getAllDatabases],commentColumn->[commentColumn],memoizeMetastore->[CachingHiveMetastore],getTablesWithParameter->[get],listRoles->[get],revokeRoles->[revokeRoles],isImpersonationEnabled->[isImpersonationEnabled],commentTable->[commentTable],renameTable->[renameTable],replaceTable->[replaceTable],alterPartition->[alterPartition],loadAllTables->[getAllTables],createDatabase->[createDatabase],loadPartitionNamesByFilter->[getPartitionNamesByFilter],loadAllViews->[getAllViews],getDatabase->[get],dropPartition->[dropPartition],loadTablesMatchingParameter->[getTablesWithParameter],loadPartitionByName->[getPartition],grantRoles->[grantRoles],loadTable->[getTable],loadPrincipals->[listGrantedPrincipals],dropTable->[dropTable],getValidWriteIds->[getValidWriteIds],commitTransaction->[commitTransaction],getAllDatabases->[get],createTable->[createTable],renameColumn->[renameColumn],revokeTablePrivileges->[revokeTablePrivileges],getPartition->[get],getTable->[get],createRole->[createRole],cachingHiveMetastore->[cachingHiveMetastore,CachingHiveMetastore],getAll->[getAll],loadTablePrivileges->[listTablePrivileges],loadPartitionColumnStatistics->[getExistingTable,getPartitionStatistics,get],loadRoleGrants->[listRoleGrants],getConfigValue->[get],addPartitions->[addPartitions],loadPartitionsByNames->[getTable,get,getPartitionsByNames],loadConfigValue->[getConfigValue],getSupportedColumnStatistics->[getSupportedColumnStatistics],updateIdentity->[isImpersonationEnabled],addColumn->[addColumn],getPartitionStatistics->[get,getAll],getAllTables->[get],loadDatabase->[getDatabase],openTransaction->[openTransaction],listRoleGrants->[get],getTableStatistics->[get],getPartitionNamesByFilter->[get]]]
Imports a single object. Imports all metastore objects.
WRT to commit message. It looks like the refactor is not the most important change here. The most important part is that you added missing invalidate for caches. And refactor is only about preventing this to happen in future again. Please consider writing better commit message.
@@ -148,7 +148,7 @@ def collection_listing(request, base=None): filter = get_filter(request, base) # Counts are hard to cache automatically, and accuracy for this # one is less important. Remember it for 5 minutes. - countkey = hashlib.md5(str(filter.qs.query) + '_count').hexdigest() + countkey = hashlib.sha256(str(filter.qs.query) + '_count').hexdigest() count = cache.get(countkey) if count is None: count = filter.qs.count()
[edit_contributors->[collection_message],delete_icon->[delete_icon],user_listing->[get_filter,get_votes,render_cat],ajax_collection_alter->[change_addon],edit->[initial_data_from_request,get_notes,collection_message,get_filter,render_cat],collection_detail_json->[get_collection],edit_addons->[collection_message],collection_detail->[get_collection,CollectionAddonFilter,render_cat],mine->[user_listing,collection_detail],collection_listing->[get_filter,render_cat],add->[initial_data_from_request,render_cat,get_filter,collection_message],watch->[get_collection,delete],following->[get_filter,get_votes,render_cat],delete->[delete,render_cat],owner_required->[decorator->[wrapper->[get_collection]],decorator],get_filter->[CollectionFilter],collection_vote->[get_collection],collection_alter->[get_collection],owner_required]
List all collections.
Don't change it but this feels pretty stupid since cache-machine already caches counts for 5 minutes...
@@ -175,7 +175,11 @@ def get_or_create_user_cart(user, cart_queryset=Cart.objects.all()): defaults = { 'shipping_address': user.default_shipping_address, 'billing_address': user.default_billing_address} - return cart_queryset.get_or_create(user=user, defaults=defaults)[0] + + cart = cart_queryset.filter(user=user).first() + if cart is None: + cart = Cart.objects.create(user=user, **defaults) + return cart def get_anonymous_cart_from_token(token, cart_queryset=Cart.objects.all()):
[change_cart_user->[find_open_cart_for_user],get_or_create_cart_from_request->[get_or_create_user_cart,get_or_create_anonymous_cart_from_token],add_variant_to_cart->[update_cart_quantity],check_product_availability_and_warn->[contains_unavailable_variants,remove_unavailable_variants],recalculate_cart_discount->[get_voucher_for_cart,get_voucher_discount_for_cart],get_prices_of_products_in_discounted_categories->[get_variant_prices_from_lines],update_billing_address_in_anonymous_cart->[get_anonymous_summary_without_shipping_forms],update_billing_address_in_cart->[get_summary_without_shipping_forms],get_voucher_discount_for_cart->[_get_shipping_voucher_discount_for_cart,_get_products_voucher_discount],get_prices_of_discounted_products->[get_variant_prices_from_lines],get_prices_of_products_in_discounted_collections->[get_variant_prices_from_lines],change_billing_address_in_cart->[_check_new_cart_address],_get_products_voucher_discount->[get_prices_of_products_in_discounted_categories,get_prices_of_discounted_products,get_prices_of_products_in_discounted_collections],get_or_create_db_cart->[get_cart->[func->[set_cart_cookie,get_or_create_cart_from_request]]],get_cart_from_request->[get_anonymous_cart_from_token,get_user_cart],_process_voucher_data_for_order->[get_voucher_for_cart],ready_to_place_order->[is_valid_shipping_method,is_fully_paid],update_shipping_address_in_cart->[get_shipping_address_forms],find_and_assign_anonymous_cart->[get_cart->[func->[token_is_valid]]],change_shipping_address_in_cart->[_check_new_cart_address],create_order->[_fill_order_with_cart_data,_process_voucher_data_for_order,_process_shipping_data_for_order,_process_user_data_for_order],update_billing_address_in_cart_with_shipping->[get_billing_forms_with_shipping],get_or_empty_db_cart->[get_cart->[func->[get_cart_from_request]]]]
Get or create a cart for a given user.
Wouldn't we achieve the same with passing `user` to `defaults` in the original implementation?
@@ -715,7 +715,16 @@ namespace Microsoft.Xna.Framework result.M43 = (nearPlaneDistance * farPlaneDistance) / (nearPlaneDistance - farPlaneDistance); } - + /// <summary> + /// Creates a new projection <see cref="Matrix"/> for customized perspective view. + /// </summary> + /// <param name="left">Left coordinate of the viewing field.</param> + /// <param name="right">Right coordinate of the viewing field.</param> + /// <param name="bottom">Bottom coordinate of the viewing field.</param> + /// <param name="top">Top coordinate of the viewing field.</param> + /// <param name="nearPlaneDistance">Near plane of the viewing field.</param> + /// <param name="farPlaneDistance">Far plane of the viewing field.</param> + /// <returns>The new <see cref="Matrix"/> for customized perspective view.</returns> public static Matrix CreatePerspectiveOffCenter(float left, float right, float bottom, float top, float nearPlaneDistance, float farPlaneDistance) { Matrix result;
[Matrix->[GetHashCode->[GetHashCode],Equals->[Equals],CreateFromYawPitchRoll->[CreateFromQuaternion,CreateFromYawPitchRoll],CreateScale->[CreateScale],Add]]
Creates a matrix that transforms a projection matrix into a projection matrix that transforms near and far from.
The `of the viewing field` repetition here bothers me.... it feels like filler and we need more useful information. @KonajuGames ?