patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -670,6 +670,10 @@ class KillPublishService(BasePublishService): super().__init__(datasource=datasource, backend=backend) def on_update(self, updates, original): + # check if we are trying to kill and item that is contained in normal non takes package + if is_item_in_package(original): + raise ValidationError(['This item is in a package' + + ' it needs to be removed before the item can be killed']) updates[ITEM_OPERATION] = ITEM_KILL super().on_update(updates, original) self.takes_package_service.process_killed_takes_package(original)
[ArchivePublishService->[get_subscribers->[_get_subscribers_for_previously_sent_items,filter_subscribers]],CorrectPublishService->[get_subscribers->[_get_subscribers_for_previously_sent_items,filter_subscribers]],KillPublishService->[get_subscribers->[_get_subscribers_for_previously_sent_items],_publish_kill_for_takes->[_update_archive,_set_version_last_modified_and_state,publish,update_published_collection]],BasePublishService->[_publish_package_items->[_publish_package_items],update->[update],queue_transmission->[set_state],_update_archive->[update],_publish_takes_package->[update],process_takes->[update],publish->[update]]]
When an item is killed kill the item.
Why this is missed out from being `SuperdeskApiError.badRequestError()`?
@@ -131,12 +131,17 @@ public class KeyAffinityServiceImpl<K> implements KeyAffinityService<K> { } finally { maxNumberInvariant.readLock().unlock(); } + try{ + Thread.currentThread().sleep(POOL_INTERVAL); + }catch(InterruptedException e){ + e.printStackTrace(); + } } existingKeyCount.decrementAndGet(); log.tracef("Returning key %s for address %s", result, address); return result; } finally { - if (queue.size() < bufferSize * THRESHOLD + 1) { + if (queue != null && queue.size() < bufferSize * THRESHOLD + 1) { keyProducerStartLatch.open(); } }
[KeyAffinityServiceImpl->[handleCacheStopped->[stop],stop->[stop],getDistributionManager->[getDistributionManager],isKeyGeneratorThreadActive->[isActive]]]
Returns the key associated with the given address.
I think we could poll the queue here instead of waiting for a fixed amount of time: `queue.poll(POOL_INTERVAL, TimeUnit.MILLISECONDS);`
@@ -0,0 +1,8 @@ +env = Figaro.env +ttl = env.service_provider_request_ttl_hours || DEFAULT_TTL_HOURS +REDIS_POOL = ConnectionPool.new(size: 10) do + Readthis::Cache.new( + expires_in: ttl.to_i.hours.to_i, + redis: { url: env.redis_throttle_url, driver: :hiredis }, + ) +end
[No CFG could be retrieved]
No Summary Found.
This always uses the secondary redis for these requests...is that intentional?
@@ -132,8 +132,7 @@ function getListenForEvents(parentWin, sentinel, origin, triggerWin) { if (!contentWindow) { setTimeout(dropListenSentinel, 0, listenSentinel); } else if (sentinel === 'amp') { - // A non-3P code path, origin must match. - if (we.origin === origin && contentWindow == triggerWin) { + if (contentWindow == triggerWin) { windowEvents = we; break; }
[No CFG could be retrieved]
Returns a mapping of event names to listenFor listeners. Checks if a window is a descendant of another window.
Thanks @samjacoby! @dvoytenko @lannka: When a message comes from `srcdoc`, the `origin` is `null` by design. Do you remember why we check origin of message in non-3P case in addition to contentWindow? The 3P case never checks origin. I am hoping this is safe to remove. Thoughts? (Maybe a safer solution is checking whether origin is `null OR equal`)
@@ -46,6 +46,9 @@ def is_parameter(var): def is_persistable(var): + if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ + var.desc.type() == core.VarDesc.VarType.FETCH_LIST: + return False return var.persistable
[get_parameter_value_by_name->[get_parameter_value],save_persistables->[save_vars],load_persistables_if_exist->[_is_presistable_and_exist_->[is_persistable],load_vars],load_vars->[_clone_var_in_block_,load_vars],load_inference_model->[get_fetch_targets_names,load_persistables_if_exist,get_feed_targets_names],load_persistables->[load_vars],save_vars->[save_vars,_clone_var_in_block_],save_params->[save_vars],save_inference_model->[append_fetch_ops,prepend_feed_ops,save_params],load_params->[load_vars],get_parameter_value->[_clone_var_in_block_,is_parameter]]
Returns True if the variable is persistable.
@Xreki I have changed the code and go with option 3 using your suggestion. For option 2, there is problem. Because in the python side of the code, the operator `op` field of var will only be associated with the operator that have this variable as its output. So for feed variable, since it is not the output of any operator. Its `op` data member will be `None`.
@@ -272,9 +272,11 @@ public abstract class AbstractCorrelatingMessageHandler extends AbstractMessageH } public void setDiscardChannelName(String discardChannelName) { + Assert.hasText(discardChannelName, "'discardChannelName' must not be empty"); this.discardChannelName = discardChannelName; } + @Override public void setSendTimeout(long sendTimeout) { this.messagingTemplate.setSendTimeout(sendTimeout); }
[AbstractCorrelatingMessageHandler->[completeGroup->[completeGroup],onInit->[onInit],setTaskScheduler->[setTaskScheduler],forceComplete->[afterRelease],setSendTimeout->[setSendTimeout]]]
Set the name of the channel to discard.
Redundand method: super class does the same. So, just remove it from here
@@ -155,14 +155,14 @@ describe GradeEntryFormsController do if grade_entry_form.show_total csv_array = [ - ['', grade_entry_form_with_data.grade_entry_items[0].name,I18n.t('grade_entry_forms.grades.total')], - [I18n.t('grade_entry_forms.column_out_of'), String(grade_entry_form_with_data.grade_entry_items[0].out_of)], + ['', grade_entry_form_with_data.grade_entry_items[0].name, GradeEntryForm.human_attribute_name(:total)], + [GradeEntryItem.human_attribute_name(:out_of), String(grade_entry_form_with_data.grade_entry_items[0].out_of)], [@user.user_name, '', ''], ] else csv_array = [ ['', grade_entry_form_with_data.grade_entry_items[0].name], - [I18n.t('grade_entry_forms.column_out_of'), String(grade_entry_form_with_data.grade_entry_items[0].out_of)], + [GradeEntryItem.human_attribute_name(:out_of), String(grade_entry_form_with_data.grade_entry_items[0].out_of)], [@user.user_name, ''], ] end
[create,let,to_not,describe,fixture_file_upload,first,it,name,to,before,post,xit,second,with,t,require,out_of,generate,show_total,puts,redirect_to,short_identifier,context,user_name,build,grades_grade_entry_form_path,get,eq,head,and_return]
tests that action csv_downloads returns OK parse header object to check for the right disposition returns attachment as attachment.
Metrics/LineLength: Line is too long. [121/120]
@@ -121,6 +121,7 @@ public class V20161125142400_EmailAlarmCallbackMigration extends Migration { final AlarmCallbackConfiguration alarmCallbackConfiguration = alarmCallbackService.create(stream.getId(), CreateAlarmCallbackRequest.create( EmailAlarmCallback.class.getCanonicalName(), + "Email Alert Callback", defaultConfig ), localAdminUser.getId()
[V20161125142400_EmailAlarmCallbackMigration->[migrateStream->[getCanonicalName,create,getDefaultEmailAlarmCallbackConfig,debug,of,error,empty,getId,save],getDefaultEmailAlarmCallbackConfig->[getRequestedConfiguration,toMap,getDefaultValue,collect],MigrationCompleted->[create->[AutoValue_V20161125142400_EmailAlarmCallbackMigration_MigrationCompleted]],hasAlertReceivers->[isEmpty,get,getAlertReceivers],upgrade->[create,toMap,write,size,error,get,collect,info,allMatch,count],createdAt->[parse],getLogger,getAdminUser]]
Create a new email alarm callback for the given stream.
Shouldn't this be "Email Alert Notification"?
@@ -2339,6 +2339,7 @@ namespace DotNetNuke.Entities.Urls private static bool IgnoreRequestForWebServer(string requestedPath) { if (requestedPath.ToLowerInvariant().IndexOf("synchronizecache.aspx", StringComparison.Ordinal) > 1 + || requestedPath.ToLowerInvariant().IndexOf("synchronizeoutputcache.aspx", StringComparison.Ordinal) > 1 || requestedPath.EndsWith("keepalive.aspx", true, CultureInfo.InvariantCulture)) { return true;
[AdvancedUrlRewriter->[IgnoreRequest->[IgnoreRequestForWebServer,IgnoreRequestForInstall],RewriteAsChildAliasRoot->[RewriteUrl],IdentifyPortalAlias->[RedirectPortalAlias,ConfigurePortalAliasRedirect,IsPortalAliasIncorrect],Handle404OrException->[ShowDebugData],ConfigurePortalAliasRedirect->[CheckIfAliasIsCustomTabAlias,ConfigurePortalAliasRedirect],MakeUrlWithAlias->[MakeUrlWithAlias],IgnoreRequestForInstall->[IgnoreRequestForInstall],ProcessRequest->[RewriteUrl]]]
Returns true if the request should be ignored for a web server and false if it should be.
Let's put the `synchronizeoutputcache.aspx` as a static readonly field of `MemoryCacheSynchonizationHandler`
@@ -111,8 +111,9 @@ class ExceptionSink: # to properly setup global state. _log_dir = None # We need an exiter in order to know what to do after we log a fatal exception or handle a - # catchable signal. - _exiter: Optional[Exiter] = None + # catchable signal. We default to `sys.exit` to configure the behavior for import time, but expect + # this to changed for later in the Pants bootstrap process. + _exiter: Exiter = Exiter(exiter=sys.exit) # Where to log stacktraces to in a SIGUSR2 handler. _interactive_output_stream = None # Whether to print a stacktrace in any fatal error message printed to the terminal.
[ExceptionSink->[ignoring_sigint->[_ignoring_sigint],toggle_ignoring_sigint_v2_engine->[_toggle_ignoring_sigint_v2_engine],_log_unhandled_exception_and_exit->[log_exception,_exit_with_failure,_format_unhandled_exception_log],_handle_signal_gracefully->[log_exception,_format_traceback,_exit_with_failure],trapped_signals->[reset_signal_handler],_format_exception_message->[_iso_timestamp_for_now],_exit_with_failure->[_iso_timestamp_for_now],_check_or_create_new_destination->[ExceptionSinkError],_recapture_fatal_error_log_streams->[ExceptionSinkError,exceptions_log_path],_format_unhandled_exception_log->[_format_traceback]],SignalHandler->[handle_sigterm->[SignalHandledNonLocalExit],_ignoring_sigint->[_check_sigint_gate_is_correct],handle_sigquit->[SignalHandledNonLocalExit],_handle_sigint_if_enabled->[_check_sigint_gate_is_correct]],SignalHandler,_reset_exiter,reset_should_print_backtrace_to_terminal,reset_log_location,reset_interactive_output_stream,reset_signal_handler]
The signal handler to return. Set whether a backtrace gets printed to the terminal error stream on a fatal error.
@cosmicexplorer can you please sanity check that it is safe to move the default from line 546 to this line? It greatly simplifies the type hints for all this `ExceptionSink/Exiter` related code because `ExceptionSink._exiter` no longer needs to be `Optional`.
@@ -141,13 +141,14 @@ public abstract class BaseFlinkCommitActionExecutor<T extends HoodieRecordPayloa result.setWriteStats(writeStats); // Finalize write finalizeWrite(instantTime, writeStats, result); - syncTableMetadata(); try { LOG.info("Committing " + instantTime + ", action Type " + getCommitActionType()); HoodieActiveTimeline activeTimeline = table.getActiveTimeline(); HoodieCommitMetadata metadata = CommitUtils.buildMetadata(writeStats, result.getPartitionToReplaceFileIds(), extraMetadata, operationType, getSchemaToStoreInCommit(), getCommitActionType()); + syncTableMetadata(metadata); + activeTimeline.saveAsComplete(new HoodieInstant(true, getCommitActionType(), instantTime), Option.of(metadata.toJsonString().getBytes(StandardCharsets.UTF_8))); LOG.info("Committed " + instantTime);
[BaseFlinkCommitActionExecutor->[commit->[commit,getCommitActionType],getCommitActionType->[getCommitActionType]]]
Commit the current write.
here means we take syncing to metadata table into a commit. more reasonable than making sync table metadata in `postCommit`
@@ -369,7 +369,7 @@ class Sessions(WindowFn): to_merge = [] for w in sorted(merge_context.windows, key=lambda w: w.start): if to_merge: - if end > w.start: + if end > w.start: # pylint: disable=used-before-assignment to_merge.append(w) if w.end > end: end = w.end
[FixedWindows->[assign->[IntervalWindow]],WindowedValue->[with_value->[WindowedValue]],IntervalWindow->[union->[IntervalWindow]],SlidingWindows->[assign->[IntervalWindow]],GlobalWindows->[get_window_coder->[GlobalWindow],windowed_value->[WindowedValue,GlobalWindow],assign->[GlobalWindow]],Sessions->[merge->[merge,IntervalWindow],assign->[IntervalWindow]]]
Merge this window with another window.
Here, explicitly assigning `end` to some idempotent element would improve readability and remove the need for the pylint suppression. Am I missing something special?
@@ -156,8 +156,6 @@ class AzfsResourceId implements ResourceId { @Override public ResourceId resolve(String other, ResolveOptions resolveOptions) { checkState(isDirectory(), "Expected this resource to be a directory, but was [%s]", toString()); - // TODO: check if resolve options are an illegal name in any way, see: - // https://docs.microsoft.com/en-us/rest/api/storageservices/Naming-and-Referencing-Containers--Blobs--and-Metadata if (resolveOptions == ResolveOptions.StandardResolveOptions.RESOLVE_DIRECTORY) { if ("..".equals(other)) {
[AzfsResourceId->[getCurrentDirectory->[fromComponents,isDirectory],equals->[equals],getFilename->[isDirectory],fromUri->[fromComponents],resolve->[equals,fromUri,isDirectory,fromComponents,toString],fromComponents->[AzfsResourceId]]]
Resolves this resource id to a new resource id based on the given other resource id.
Is this resolved now?
@@ -158,10 +158,10 @@ public class CoordinatorServerView implements InventoryView synchronized (lock) { log.debug("Removing segment[%s] from server[%s].", segmentId, server); - final SegmentLoadInfo segmentLoadInfo = segmentLoadInfos.get(segmentId); + SegmentLoadInfo segmentLoadInfo = segmentLoadInfos.get(segmentId); if (segmentLoadInfo == null) { log.warn("Told to remove non-existant segment[%s]", segmentId); - return; + segmentLoadInfo = new SegmentLoadInfo(segment); } segmentLoadInfo.removeServer(server); if (segmentLoadInfo.isEmpty()) {
[CoordinatorServerView->[getInventoryValue->[getInventoryValue],getInventory->[getInventory],serverRemovedSegment->[removeServer],clear->[clear]]]
Remove a segment from the server.
Maybe should rephrase the logging statement now since and change level to "info"?
@@ -173,3 +173,14 @@ func (w *getTarConditionalWriter) Write(data []byte) (int, error) { w.bytesWritten += int64(len(data)) return len(data), nil } + +func (a *apiServer) ListFileNS(req *pfs.ListFileRequest, server pfs.API_ListFileNSServer) error { + pachClient := a.env.GetPachClient(server.Context()) + err := a.driver.listFileNS(pachClient, req.File, req.Full, req.History, func(finfo *pfs.FileInfoNewStorage) error { + return server.Send(finfo) + }) + if err != nil { + return err + } + return nil +}
[Write->[Write,Send],GetTarConditional->[Context,NewWriterSize,Flush,getFilesConditional,ReportRequestWithThroughput,Now,Log,Errorf,Send,Get,Info,Recv,Since],PutTar->[Context,SendAndClose,NewReader,ReportRequestWithThroughput,Now,withFileSet,Log,Errorf,Recv,Since,Put],Read->[Read,Recv,NewReader,Len],GetTar->[Context,NewStreamingBytesWriter,getFilesNewStorageLayer,ReportRequestWithThroughput,Now,Log,Errorf,Since]]
Write - writes data to the archive.
Similar to above, you can just return the result of the function call.
@@ -48,6 +48,11 @@ def primary_email_change_event(profile, changed_date, email): @user_profile_from_uid def delete_user_event(user, deleted_date): """Process the delete user event.""" - user.delete(addon_msg='Deleted via FxA account deletion') - log.info( - 'Account pk [%s] deleted from FxA on %s' % (user.id, deleted_date)) + if switch_is_active('fxa-account-delete'): + user.delete(addon_msg='Deleted via FxA account deletion') + log.info( + 'Account pk [%s] deleted from FxA on %s' % (user.id, deleted_date)) + else: + log.info( + f'Skipping deletion from FxA for account [{user.id}] because ' + 'waffle inactive')
[user_profile_from_uid->[wrapper->[f,info,get,fromtimestamp,warning],wraps],primary_email_change_event->[info,warning,update],delete_user_event->[info,delete],getLogger]
Process the delete user event.
It matters less for things that don't go in sentry, so that doesn't block this PR, but we should try to always let `logging` to the formatting to allow for grouping - So we should do logging formatting with `log.info('foo %s', bar)` and not `log.info('foo %s' % bar)`, `log.info('foo {bar}')` or `log.info('foo {}'.format(bar))` where possible.
@@ -740,12 +740,15 @@ int EVP_PKEY_assign(EVP_PKEY *pkey, int type, void *key) } # endif -const void *EVP_PKEY_get0(const EVP_PKEY *pkey) +void *EVP_PKEY_get0(EVP_PKEY *pkey) { if (pkey == NULL) return NULL; - return evp_pkey_get_legacy((EVP_PKEY *)pkey); + if (!evp_pkey_is_provided(pkey)) + return pkey->pkey.ptr; + + return NULL; } const unsigned char *EVP_PKEY_get0_hmac(const EVP_PKEY *pkey, size_t *len)
[No CFG could be retrieved]
private private methods ASN. 1_OCTET_STRING - > bytes.
Please do not remove the const on the parameter. That is actually an API break as we have const there in 1.1.1 already.
@@ -255,8 +255,13 @@ def set_config(key, value, home_dir=None, set_env=True): -------- get_config """ + if key is None: + warn('set_config(key=None, value=None) to get a list of valid keys ' + 'has been deprecated and will be removed in version 0.19. Use ' + 'get_config(key='') instead.', DeprecationWarning) return known_config_types + _validate_type(key, 'str', "key") # While JSON allow non-string types, we allow users to override config # settings using env, which are strings, so we enforce that here
[_get_stim_channel->[get_config],get_subjects_dir->[get_config],get_config->[get_config_path,_load_config],set_config->[get_config_path,_load_config]]
Set a MNE - Python preference key in the config file and environment. Write the missing node ID in the config file.
Travis was not happy with this insertion
@@ -1465,9 +1465,11 @@ def repro_dir(tmp_dir, dvc, run_copy): stages["dir_file_copy"] = stage last_stage = tmp_dir / "dir" / DVC_FILE + deps = [os.fspath(origin_copy_2), os.fspath(dir_file_copy)] stage = dvc.run( + cmd="echo {}".format(" ".join(deps)), fname=os.fspath(last_stage), - deps=[os.fspath(origin_copy_2), os.fspath(dir_file_copy)], + deps=deps, single_stage=True, ) assert stage is not None
[TestReproChangedDir->[test->[_run,_get_stage_target]],TestReproAllPipelines->[test->[_run]],TestReproCyclicGraph->[test->[_run]],TestReproChangedCode->[test->[_get_stage_target]],TestRepro->[setUp->[_run]],TestNonExistingOutput->[test->[_get_stage_target]],TestReproForce->[test->[_get_stage_target]],TestReproNoCommit->[test->[_get_stage_target]],TestReproExternalLOCAL->[write->[write]],TestReproPipelines->[setUp->[_run]],TestReproExternalSSH->[write->[write]],TestReproFail->[test->[_get_stage_target]],TestReproExternalHDFS->[write->[write]],TestReproAlreadyCached->[test->[_run,_get_stage_target]],TestReproLocked->[test->[_get_stage_target,_run,swap_foo_with_bar]],TestReproDry->[test->[_get_stage_target,swap_foo_with_bar]],TestReproUpToDate->[test->[_get_stage_target]],TestReproDepUnderDir->[test->[_run,_get_stage_target]],TestReproLockedUnchanged->[test->[_get_stage_target]],TestReproPipeline->[test_cli->[_get_stage_target],test->[_get_stage_target]],TestCmdRepro->[test->[_get_stage_target,swap_foo_with_bar]],test_recursive_repro_default->[_read_out,_rewrite_file],TestReproChangedData->[test->[_get_stage_target]],TestReproDataSource->[test->[swap_foo_with_bar]],TestReproChangedDirData->[test->[_run,_get_stage_target]],TestReproChangedDeepData->[test->[_get_stage_target,swap_foo_with_bar],setUp->[_run]],TestReproDepDirWithOutputsUnderIt->[test->[_run,_get_stage_target]],TestReproExternalHTTP->[test->[get_remote,_run,write]],TestReproPhony->[test->[_get_stage_target,_run,swap_foo_with_bar]],TestReproNoDeps->[test->[_run,_get_stage_target]],TestReproLockedCallback->[test->[_run,_get_stage_target]],TestReproExternalBase->[test->[_run,check_already_cached,should_test]],TestReproExternalGS->[write->[bucket]],test_recursive_repro_single->[_read_out,_rewrite_file]]
Reproutes the base directory of a node. Context manager for DVC_FILECOFFS.
For these kind of stage that does not generate any outs, I have used `echo` or `ls` or `cat` as a `cmd`.
@@ -47,14 +47,7 @@ export type OverridesT = { }; // Basically React.Node minus React.Portal and Iterable -export type ChildT = - | void - | null - | boolean - | number - | string - // eslint-disable-next-line flowtype/no-weak-types - | React.Element<any>; +export type ChildT = void | null | boolean | number | string | React.Element<*>; export type ChildrenT = React.ChildrenArray<ChildT>;
[No CFG could be retrieved]
Imports a React object containing all of the properties of the top - level popover. Props for a stateful popover.
wonder if this can be replaced with `React.Node` type
@@ -200,14 +200,14 @@ func (bc *BasicCluster) PutRegion(region *core.RegionInfo) error { return nil } -// UpdateWriteStatus update the write status -func (bc *BasicCluster) UpdateWriteStatus(region *core.RegionInfo) { +// IsUpdateWriteStatus update the write status +func (bc *BasicCluster) IsUpdateWriteStatus(region *core.RegionInfo) (bool, uint64, *core.RegionStat) { var WrittenBytesPerSec uint64 v, isExist := bc.WriteStatistics.Peek(region.GetId()) if isExist && !Simulating { interval := time.Since(v.(*core.RegionStat).LastUpdateTime).Seconds() if interval < minHotRegionReportInterval { - return + return false, region.GetId(), nil } WrittenBytesPerSec = uint64(float64(region.WrittenBytes) / interval) } else {
[GetLeaderStore->[GetStore],GetStore->[GetStore],RandLeaderRegion->[RandLeaderRegion],GetRegionStores->[GetStore],UnblockStore->[UnblockStore],RandFollowerRegion->[RandFollowerRegion],GetRegion->[GetRegion],GetStores->[GetStores],BlockStore->[BlockStore],GetFollowerStores->[GetStore]]
PutRegion - Put a region in the cluster.
I suppose the second return value is always the region's id, we can load it from region info, so don't need to return it.
@@ -1,8 +1,11 @@ # pylint: disable=invalid-name,line-too-long + +import pytest + from allennlp.data.dataset_readers.semantic_parsing.grammar_based_text2sql import GrammarBasedText2SqlDatasetReader from allennlp.common.testing import AllenNlpTestCase - +@pytest.mark.skip(reason="Mark will fix in a nearby PR.") class TestGrammarBasdText2SqlDatasetReader(AllenNlpTestCase): def setUp(self): super().setUp()
[TestGrammarBasdText2SqlDatasetReader->[test_reader_can_read_data_with_entity_pre_linking->[print,list,read,len],setUp->[str,GrammarBasedText2SqlDatasetReader,super]]]
Set up the properties of the object.
Minor typo: `GrammarBasd`
@@ -163,10 +163,4 @@ public class HiveUtils { return conf; } - /** - * @return true if {@link Table} is partitioned. - */ - public static boolean isPartitioned(Table table) { - return table.isPartitioned(); - } }
[HiveUtils->[getInputFormat->[getInputFormat],isPartitioned->[isPartitioned],getPartitions->[getPartitions]]]
Gets the hadoop configuration.
Please leave the method and mark as deprecated. It is possible other products are relying on this method as a library.
@@ -93,11 +93,11 @@ public class HoodieLogFormatWriter implements HoodieLogFormat.Writer { } if (!isAppendSupported) { this.logFile = logFile.rollOver(fs, rolloverLogWriteToken); - LOG.info("Append not supported.. Rolling over to " + logFile); + LOG.info("Append not supported.. Rolling over to {}", logFile); createNewFile(); } } else { - LOG.info(logFile + " does not exist. Create a new file"); + LOG.info("{} does not exist. Create a new file", logFile.getPath()); // Block size does not matter as we will always manually autoflush createNewFile(); }
[HoodieLogFormatWriter->[handleAppendExceptionOrRecoverLease->[createNewFile],close->[close],flush->[flush],rolloverIfNeeded->[HoodieLogFormatWriter]]]
Create a writer that writes a block of data to a log file. Reads the header of the HoodieLogBlock and writes it to the output.
Shall we use `logFile.getPath()` here?
@@ -68,7 +68,7 @@ class ConfigCache * * @return null|mixed Returns the value of the Config entry or null if not set */ - public function get($cat, $key = null) + public function get(string $cat, $key = null) { if (isset($this->config[$cat][$key])) { return $this->config[$cat][$key];
[ConfigCache->[setDefault->[set],load->[setDefault,set],__construct->[load]]]
Get a configuration value.
Why did you remove the `string` type-hint fromthe `$key` parameter in this specific method while the type-hint is present in the adapters?
@@ -198,7 +198,7 @@ class DefineWakeProcess2D(KratosMultiphysics.Process): elem.SetValue(CPFApp.KUTTA, True) @staticmethod - def CheckIfElemIsCutByWake(elem): + def __CheckIfElemIsCutByWake(elem): nneg=0 distances = elem.GetValue(KratosMultiphysics.ELEMENTAL_DISTANCES) for nodal_distance in distances:
[DefineWakeProcess2D->[MarkWakeTEElement->[CheckIfElemIsCutByWake],__init__->[__init__]]]
Checks if an element is cut by wake.
Note that in here you are retrieving the discontinuous elemental distances, which might not coincide with the nodal ones if you call this method after using the modify distance process.
@@ -999,7 +999,9 @@ public abstract class SeekableStreamSupervisor<PartitionIdType, SequenceOffsetTy throws ExecutionException, InterruptedException, TimeoutException, JsonProcessingException { possiblyRegisterListener(); + stateManager.setStateIfNoSuccessfulRunYet(SeekableStreamSupervisorStateManager.State.CONNECTING_TO_STREAM); updatePartitionDataFromStream(); + stateManager.setStateIfNoSuccessfulRunYet(SeekableStreamSupervisorStateManager.State.DISCOVERING_INITIAL_TASKS); discoverTasks(); updateTaskStatus(); checkTaskDuration();
[SeekableStreamSupervisor->[possiblyRegisterListener->[statusChanged->[RunNotice]],stopTasksInGroup->[stopTask,killTask],buildRunTask->[RunNotice],tryInit->[toString,handle],generateSequenceName->[toString],getTaskLocation->[getTaskId],addTaskGroupToActivelyReadingTaskGroup->[TaskData,TaskGroup],makeSequenceNumber->[makeSequenceNumber],checkCurrentTaskState->[stopTasksInGroup,isTaskCurrent,stopTask,taskIds],addDiscoveredTaskToPendingCompletionTaskGroups->[TaskData,TaskGroup],killTasksInGroup->[killTask],reset->[ResetNotice],stopTask->[apply->[killTask]],GracefulShutdownNotice->[handle->[handle]],killTaskGroupForPartitions->[killTasksInGroup],checkPendingCompletionTasks->[stopTasksInGroup,killTasksInGroup,taskIds],checkpoint->[addNotice,CheckpointNotice],stop->[GracefulShutdownNotice,ShutdownNotice],runInternal->[toString],checkSequenceAvailability->[makeSequenceNumber,getOffsetFromStreamForPartition],checkTaskDuration->[taskIds,killTask],checkpointTaskGroup->[apply->[taskIds,killTask],taskIds,killTask],addTaskGroupToPendingCompletionTaskGroup->[TaskData,TaskGroup],discoverTasks->[apply->[TaskData,TaskGroup],isTaskInPendingCompletionGroups,killTask],verifyAndMergeCheckpoints->[verifyAndMergeCheckpoints,taskIds,killTask],getCurrentTotalStats->[getTaskId,taskIds,getGroupId,StatsFromTaskResult,getStats],updateTaskStatus->[apply->[buildRunTask],killTask],getOffsetFromStorageForPartition->[resetInternal],createNewTasks->[buildRunTask,verifyAndMergeCheckpoints,TaskGroup],CheckpointNotice->[handle->[addNewCheckpoint]]]]
This method is called by the TaskManager when it is ready to run.
Hm, I feel like it would be cleaner if the state manager handled the decision of whether to transition to a particular state based on whether a successful run has occurred or not (I don't think the caller should have to know that it needs to call either `setStateIfNoSuccessfulRunYet` or `setState` depending on the state)
@@ -270,10 +270,9 @@ public class DescribableList<T extends Describable<T>, D extends Descriptor<T>> } public Object unmarshal(HierarchicalStreamReader reader, UnmarshallingContext context) { - CopyOnWriteList core = copyOnWriteListConverter.unmarshal(reader, context); - try { - DescribableList r = (DescribableList)context.getRequiredType().newInstance(); + DescribableList r = (DescribableList) context.getRequiredType().asSubclass(DescribableList.class).newInstance(); + CopyOnWriteList core = copyOnWriteListConverter.unmarshal(reader, context); r.data.replaceBy(core); return r; } catch (InstantiationException e) {
[DescribableList->[contains->[get],toArray->[toArray],buildDependencyGraph->[buildDependencyGraph],toMap->[toMap],rebuild->[get,rebuild],remove->[remove],get->[get],ConverterImpl->[unmarshal->[unmarshal],ConverterImpl]]]
Unmarshall a HierarchicalStreamReader into a List of objects.
`asSubclass` ensures that even if the problem occurs, we will get a `ClassCastException`, which is easier to diagnose than a `NoSuchMethodException`.
@@ -26,7 +26,8 @@ for attribute in dir(std_os): if not hasattr(ourselves, attribute): setattr(ourselves, attribute, getattr(std_os, attribute)) -# Similar to os.path, allow certbot.compat.os.path to behave as a module +# Import our path module, then allow certbot.compat.os.path to behave as a module (like os.path) +from certbot.compat import path # type: ignore # pylint: disable=wrong-import-position std_sys.modules[__name__ + '.path'] = path # Clean all remaining importables that are not from the core os module.
[chmod->[RuntimeError],replace->[RuntimeError],rename->[RuntimeError],mkdir->[RuntimeError],open->[RuntimeError],makedirs->[RuntimeError],chown->[RuntimeError],dir,getattr,hasattr,setattr]
This is the first round of wrapping. It will import all the attributes from the core os it states that the appropriate permissions will be set for the owner.
If we add this `type: ignore`, is all type checking disabled on all calls to all functions in `certbot.compat.os.path`? I suspect this is the case which doesn't seem great. Is there an easy way to avoid this `type: ignore`?
@@ -15,7 +15,12 @@ def test_estimate_gas_fail(deploy_client): assert len(deploy_client.web3.eth.getCode(to_checksum_address(address))) > 0 check_block = deploy_client.get_checking_block() - assert not contract_proxy.estimate_gas(check_block, "fail") + + msg = "Estimate gas should return None if the transaction hit an assert" + assert contract_proxy.estimate_gas(check_block, "fail_assert") is None, msg + + msg = "Estimate gas should return None if the transaction hit a revert." + assert contract_proxy.estimate_gas(check_block, "fail_require") is None, msg def test_estimate_gas_fails_if_startgas_is_higher_than_blockgaslimit(
[test_estimate_gas_defaults_to_pending->[poll,estimate_gas,get_block,deploy_rpc_test_contract,transact,get_transaction_receipt],test_estimate_gas_fails_if_startgas_is_higher_than_blockgaslimit->[deploy_rpc_test_contract,blockhash_from_blocknumber,estimate_gas,get_block],test_estimate_gas_fail->[getCode,estimate_gas,len,get_checking_block,deploy_rpc_test_contract,to_checksum_address],xfail]
Test estimate gas fail if a block_identifier is not available in the contract.
should be "require" instead of "revert"
@@ -508,9 +508,16 @@ public class RServerEvaluator { // direct graphical output String tryCode; connection.eval("do.call(svg,c(list('" + file + "'), beaker::saved_svg_options))"); - tryCode = "beaker_eval_=withVisible(try({ " + j.codeToBeExecuted + "\n},silent=TRUE))\n"+ - "list(beaker_eval_, beaker:::convertToJSON(beaker_eval_$value, beaker:::collapse_unit_vectors))"; + tryCode = "warn <- {};\n" + + "err <- {};\n" + + " beaker_eval_=withVisible(tryCatch({ " + j.codeToBeExecuted + "\n}" + + ", warning = function(w){warn <<- w }" + + ", error = function(e){err <<- e } ))\n"+ + "list(beaker_eval_, beaker:::convertToJSON(beaker_eval_$value, beaker:::collapse_unit_vectors))"; + REXP result = connection.eval(tryCode); + REXP e = connection.eval("err"); + REXP w = connection.eval("warn"); if (result!= null) { logger.trace("RESULT: {}", result);
[RServerEvaluator->[cancelExecution->[cancelExecution],exit->[cancelExecution],autocomplete->[autocomplete],writeRserveScript->[openTemp,makeTemp],getSvgResults->[getImage,MyTranscoder,fixSvgResults],evaluate->[jobDescriptor],workerThread->[run->[makeTemp,isVisible,startRserve,isError,getSvgResults],startRserve->[writeRserveScript,getPortFromCore]]]]
Runs the RPlot. This method is called when a beaker object is being processed.
instead of using variables named "warn" and "err", use "beaker_warn_" and "beaker_err_" to avoid any conflicts with user code.
@@ -476,6 +476,18 @@ PYTHON_DEMOS = [ }) )), + PythonDemo(name='face_detection_mtcnn_demo', device_keys=['-d'], test_cases=combine_cases( + TestCase(options={'--no_show': None, + '-i': image_net_arg('00000002')}), + [ + TestCase(options={ + '-m_p': ModelArg('mtcnn-p'), + '-m_r': ModelArg('mtcnn-r'), + '-m_o': ModelArg('mtcnn-o') + }), + ] + )), + PythonDemo(name='gesture_recognition_demo', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'--no_show': None, '-i': TestDataArg('msasl/global_crops/_nz_sivss20/clip_0017/img_%05d.jpg'),
[combine_cases->[join_cases],PythonDemo,single_option_cases,combine_cases,CppDemo]
Example of how to use the CVX - C2 model. Missing test cases.
This can be merged with previous `TestCase` instance.
@@ -54,7 +54,9 @@ public abstract class AbstractExpressionEvaluator implements BeanFactoryAware, I private volatile BeanFactory beanFactory; - private volatile MessageBuilderFactory messageBuilderFactory; + private volatile MessageBuilderFactory messageBuilderFactory = new DefaultMessageBuilderFactory(); + + private volatile boolean messageBuilderFactorySet; /** * Specify a BeanFactory in order to enable resolution via <code>@beanName</code> in the expression.
[AbstractExpressionEvaluator->[setConversionService->[setConversionService],afterPropertiesSet->[getMessageBuilderFactory],setBeanFactory->[setBeanFactory],evaluateExpression->[evaluateExpression,getEvaluationContext],getEvaluationContext->[getEvaluationContext]]]
Set the bean factory.
This class has a bug in the `afterPropertiesSet()` will be fixed tomorrow
@@ -337,6 +337,17 @@ export class AmpAdNetworkDoubleclickImpl extends AmpA4A { /** @private {?./safeframe-host.SafeframeHostApi} */ this.safeframeApi_ = null; + + /** @private {boolean} whether safeframe forced via tag */ + this.forceSafeframe_ = false; + if ('forceSafeframe' in this.element.dataset) { + if (!/^(1|(true))$/i.test(this.element.dataset['forceSafeframe'])) { + user().warn(TAG, 'Ignoring invalid data-force-safeframe attribute: ' + + this.element.dataset['forceSafeframe']); + } else { + this.forceSafeframe_ = true; + } + } } /** @override */
[AmpAdNetworkDoubleclickImpl->[postTroubleshootMessage->[dev,dict,stringify,now,userAgent],extractSize->[height,extractAmpAnalyticsConfig,get,setGoogleLifecycleVarsFromHeaders,width],getBlockParameters_->[serializeTargeting_,dev,user,isInManualExperiment,Number,assign,join,googleBlockParameters,getMultiSizeDimensions,map],constructor->[resolver,experimentFeatureEnabled,extensionsFor,rejector,getMode,promise,SRA],tearDownSlot->[promise,rejector,removeElement,resolver],shouldPreferentialRenderWithoutCrypto->[dev,experimentFeatureEnabled,CANONICAL_HLDBK_EXP,isCdnProxy],initLifecycleReporter->[googleLifecycleReporterFactory],rewriteRtcKeys_->[keys],onCreativeRender->[customElementExtensions,height,dev,user,addCsiSignalsToAmpAnalyticsConfig,getRefreshManager,installAnchorClickInterceptor,insertAnalyticsElement,getEnclosingContainerTypes,isReportingEnabled,setStyles,width],getCustomRealTimeConfigMacros_->[parseInt,getOrCreateAdCid,documentInfoForDoc,dev,stringify,tryParseJson,toLowerCase],generateAdKey_->[getAttribute,domFingerprintPlain,stringHash32],buildCallback->[getExperimentBranch,getIdentityToken,dev,getVisibilityState,PAUSED,addExperimentIdToElement,onVisibilityChanged,viewerForDoc,EXPERIMENT,randomlySelectUnsetExperiments],populateAdUrlState->[tryParseJson,Number],getSlotSize->[Number],getLocationQueryParameterValue->[parseQueryString],fireDelayedImpressions->[split,dev,dict,isSecureUrl,createElementWithAttributes],getA4aAnalyticsConfig->[getCsiAmpAnalyticsConfig],renderNonAmpCreative->[waitFor3pThrottle,is3pThrottled,incrementLoadingAds],isLayoutSupported->[FLUID,isLayoutSizeDefined],getAdUrl->[resolve,timerFor,all,googleAdUrl,dev,now,assign],groupSlotsForSra->[groupAmpAdsByType],onNetworkFailure->[dev,maybeAppendErrorParameter],idleRenderOutsideViewport->[isNaN,parseInt],getPreconnectUrls->[push],delayAdRequestEnabled->[experimentFeatureEnabled,DELAYED_REQUEST],getA4aAnalyticsVars->[getCsiAmpAnalyticsVariables],unlayoutCallback->[dev],mergeRtcResponses_->[error,keys,callout,deepMerge,response,join,forEach,rtcTime,push],getAdditionalContextMetadata->[dev],initiateSraRequests->[all,dev,shift,lineDelimitedStreamer,attemptCollapse,SAFEFRAME,map,forEach,metaJsonCreativeGrouper,hasAdPromise,resetAdUrl,element,length,isCancellation,sraResponseRejector,resolve,keys,xhrFor,checkStillCurrent,assignAdUrlToError,sraResponseResolver,constructSRARequest_,toLowerCase,utf8Encode],getNonAmpCreativeRenderingMethod->[SAFEFRAME]],dev,isInManualExperiment,join,encodeURIComponent,map,isArray,googlePageParameters,registerElement,initialSize_,getAttribute,extension,truncAndTimeUrl,adKey_,buildIdentityParams_,constructSRABlockParameters,now,assign,element,length,serializeItem_,push,split,serializeTargeting_,keys,getFirstInstanceValue_,jsonTargeting_,extractFn,forEach,combiner]
Initializes a missing node in the DOM. Private methods for handling any unknown node type. if vpRange = false return vpRange ;.
Why do we need to check for 1/true/True, and not just 1?
@@ -438,9 +438,7 @@ def test_pex_execution(rule_runner: RuleRunner) -> None: assert result.stdout == b"from main\n" -# TODO(John Sirois): Add VenvPex to the pex_type parameter list once Pants is upgraded to Pex with -# a fix for: https://github.com/pantsbuild/pex/issues/1239 -@pytest.mark.parametrize("pex_type", [Pex]) +@pytest.mark.parametrize("pex_type", [Pex, VenvPex]) def test_pex_environment(rule_runner: RuleRunner, pex_type: type[Pex | VenvPex]) -> None: sources = rule_runner.request( Digest,
[test_group_field_sets_by_constraints_with_unsorted_inputs->[create_for_test],test_entry_point->[create_pex_and_get_pex_info],test_pex_execution->[create_pex_and_get_all_data],parse_requirements->[parse],create_pex_and_get_pex_info->[create_pex_and_get_all_data],test_platforms->[create_pex_and_get_all_data],ExactRequirement->[parse->[parse]],test_merge_interpreter_constraints->[assert_merged],test_interpreter_constraints->[create_pex_and_get_pex_info],test_pex_environment->[create_pex_and_get_all_data],test_requirement_constraints->[assert_direct_requirements->[parse],assert_direct_requirements,create_pex_and_get_pex_info],test_additional_inputs->[create_pex_and_get_all_data],test_group_field_sets_by_constraints->[create_for_test],test_additional_args->[create_pex_and_get_pex_info],test_resolves_dependencies->[parse_requirements,create_pex_and_get_pex_info]]
Test if a pex or venv pex is available in the environment.
The VenvPex parametrization fails trying to extract PEX-INFO with PEX_TOOLS before the script fix.
@@ -797,7 +797,7 @@ if ($resql) // Label if (! empty($arrayfields['p.label']['checked'])) { - print '<td class="tdoverflowmax200">'.dol_trunc($obj->label, 40).'</td>'; + print '<td class="tdoverflowmax220">'.dol_trunc($obj->label, 70).'</td>'; if (! $i) $totalarray['nbfield']++; }
[selectMassAction,fetch_object,order,selectyesno,getNomUrl,print_all_ways,selectarray,display_price_product_fournisseur,initHooks,find_min_price_product_fournisseur,load,plimit,executeHooks,loadLangs,select_categories,LibStatut,escape,getDefaultLang,showCheckAddButtons,close,textwithpicto,load_stock,free,query,fetch_name_optionals_label,getCanvas,list_product_fournisseur_price,trans,num_rows,getOptionalsFromPost,multiSelectArrayWithCheckbox,showFilterButtons]
Get product static data from object Print barcodes for calendar component.
The css style tdoverflowmax220 does not exists.
@@ -237,7 +237,7 @@ class Contact extends ApiWrapper /** * Sets current position. * - * @param $position + * @param string $position */ public function setCurrentPosition($position) {
[Contact->[setMainUrl->[setMainUrl],getCreated->[getCreated],addCategory->[addCategory],addPhone->[addPhone],getTitle->[getId,getTitle],getChanged->[getChanged],getPosition->[getPosition,getId],getFormOfAddress->[getFormOfAddress],addFax->[addFax],setLastName->[setLastName],removeEmail->[removeEmail],setMiddleName->[setMiddleName],setBirthday->[setBirthday],getPhones->[getPhones],getLocales->[getLocales],getFullName->[getFullName],removeTag->[removeTag],setNewsletter->[setNewsletter],setGender->[setGender],getId->[getId],removeNote->[removeNote],setChanger->[setChanger],getUrls->[getUrls],getMainPhone->[getMainPhone],getMainFax->[getMainFax],setNote->[setNote],addMedia->[addMedia],getCategories->[getId],getBirthday->[getBirthday],removeUrl->[removeUrl],removeCategory->[removeCategory],getNote->[getNote],setFormOfAddress->[setFormOfAddress],addLocale->[addLocale],setMainPhone->[setMainPhone],getTitleName->[getTitle],setCurrentPosition->[setPosition],getNewsletter->[getNewsletter],removeSocialMediaProfile->[removeSocialMediaProfile],getMainUrl->[getMainUrl],getBankAccounts->[getBankAccounts],addEmail->[addEmail],addSocialMediaProfile->[addSocialMediaProfile],addNote->[addNote],removePhone->[removePhone],getSocialMediaProfiles->[getSocialMediaProfiles],getMedias->[getId,getMedias],setPosition->[setPosition],removeFax->[removeFax],addUrl->[addUrl],setFirstName->[setFirstName],getGender->[getGender],getFaxes->[getFaxes],getMainEmail->[getMainEmail],getFirstName->[getFirstName],getLastName->[getLastName],setTitle->[setTitle],getNotes->[getNotes],getAvatar->[getId],addTag->[addTag],removeLocale->[removeLocale],setMainFax->[setMainFax],getSalutation->[getSalutation],getMiddleName->[getMiddleName],setCreator->[setCreator],removeMedia->[removeMedia],getEmails->[getEmails],getPositionName->[getPosition],setMainEmail->[setMainEmail],setSalutation->[setSalutation],toArray->[getCreated,getFirstName,getLastName,getMiddleName,getTitle,getBirthday,getChanged,getPosition]]]
Set current position.
this is not a string but a `Position` (`Sulu\Bundle\ContactBundle\Entity\Position`) entity
@@ -313,7 +313,7 @@ class AnnotationsControllerTest < AuthenticatedControllerTest end should 'recognize action to destroy' do - assert_recognizes( {:action => 'destroy', :controller => 'annotations'}, - {:path => 'annotations', :method => 'delete'} ) + assert_recognizes({ action: 'destroy', controller: 'annotations', id: '1' }, + { path: 'annotations/1', method: 'delete' }) end end
[AnnotationsControllerTest->[create,post_as,should,assert,post,assigns,assert_not_nil,delete_as,content,put_as,make,id,delete,context,assert_response,get,render_template,setup,assert_recognizes],join,require,expand_path,dirname]
- > find all annotations with action to destroy - > find all annotations with method delete.
Align the parameters of a method call if they span more than one line.
@@ -122,6 +122,9 @@ namespace System.Net.Sockets.Tests [InlineData(false, 2)] public void CtorAndAccept_SocketNotKeptAliveViaInheritance(bool validateClientOuter, int acceptApiOuter) { + // 300 ms should be long enough to connect if the socket is actually present & listening. + const int ConnectionTimeoutMs = 300; + // Run the test in another process so as to not have trouble with other tests // launching child processes that might impact inheritance. RemoteExecutor.Invoke((validateClientString, acceptApiString) =>
[CreateSocket->[Ctor_Netcoreapp_Throws->[Raw,Linux,ControllerAreaNetwork,Assert,Packet],DualMode_Failure->[Assert,nameof],Ctor_Raw_NotSupported_ExpectedError->[AnyUnix,Icmp,Raw,SocketErrorCode,ProtocolNotSupported,nameof,InterNetworkV6,Contains,IcmpV6,Udp,AccessDenied,Tcp,Assert,InterNetwork],Ctor_Raw_Supported_Success->[AnyUnix,Icmp,Raw,nameof,InterNetworkV6,IcmpV6,Udp,Tcp,InterNetwork],DualMode_Success->[nameof],Ctor_Failure->[Assert,nameof],Ctor_Success->[nameof],CtorAndAccept_SocketNotKeptAliveViaInheritance->[Dispose],Ctor_Netcoreapp_Success->[SocketErrorCode,Raw,ProtocolNotSupported,Linux,ControllerAreaNetwork,AccessDenied,Close,AddressFamilyNotSupported,Packet],Stream,Seqpacket,InterNetworkV6,Unknown,Udp,Dgram,Rdm,Tcp,InterNetwork,IsProcessElevated]]
Creates a child process that is not alive via inheritance. This method checks that the child process is alive and that it is not able to accept new.
I'm wondering if this could go to SocketTestHelper.cs just in case we need to adjust it for ARM runs.
@@ -637,6 +637,8 @@ func (ss *StateSync) getMaxPeerHeight() uint64 { wg.Add(1) go func() { defer wg.Done() + //debug + //utils.GetLogInstance().Warn("[Sync] getMaxPeerHeight", "IP", peerConfig.ip, "Port", peerConfig.port) response := peerConfig.client.GetBlockChainHeight() ss.syncMux.Lock() if response != nil && maxHeight < response.BlockHeight {
[SyncLoop->[purgeAllBlocksFromCache,getMaxPeerHeight,purgeOldBlocksFromCache,RegisterNodeInfo,ProcessStateSync],GetBlockHashesConsensusAndCleanUp->[getHowManyMaxConsensus,cleanUpPeers],GetBlocks->[GetBlocks],purgeAllBlocksFromCache->[ForEachPeer],getMaxConsensusBlockFromParentHash->[ForEachPeer],getMaxPeerHeight->[ForEachPeer],generateStateSyncTaskQueue->[ForEachPeer],downloadBlocks->[ForEachPeer,GetBlocks],IsOutOfSync->[getMaxPeerHeight],GetConsensusHashes->[GetBlockHashesConsensusAndCleanUp,ForEachPeer],AddNewBlock->[FindPeerByHash],purgeOldBlocksFromCache->[ForEachPeer],IsSameBlockchainHeight->[getMaxPeerHeight],CreateSyncConfig->[AddPeer],RegisterNodeInfo->[ForEachPeer,registerToBroadcast],ProcessStateSync->[generateStateSyncTaskQueue,downloadBlocks,generateNewState,GetConsensusHashes],generateNewState->[updateBlockAndStatus,getBlockFromLastMileBlocksByParentHash,getMaxConsensusBlockFromParentHash,ForEachPeer,getBlockFromOldBlocksByParentHash]]
getMaxPeerHeight returns the maximum height of all peers in the system.
please remove the debug code or use Debug()
@@ -820,7 +820,12 @@ class TorchAgent(Agent): """ self._set_text_vec(obs, truncate, split_lines) self._set_label_vec(obs, add_start, add_end, truncate) - self._set_label_cands_vec(obs, add_start, add_end, truncate) + # vectorize label candidates if and only if we are using inline + # candidates + cands_key = ('candidates' if 'labels' in obs else + 'eval_candidates' if 'eval_labels' in obs else None) + if cands_key is not None and self.opt[cands_key] == 'inline': + self._set_label_cands_vec(obs, add_start, add_end, truncate) return obs def batchify(self, obs_batch, sort=False,
[TorchAgent->[add_cmdline_args->[optim_opts,dictionary_class],get_dialog_history->[_add_person_tokens],vectorize->[_set_text_vec,_set_label_cands_vec,_set_label_vec],receive_metrics->[_is_lr_warming_up],load->[load],init_optim->[optim_opts],observe->[get_dialog_history,last_reply,vectorize],__init__->[dictionary_class],_set_label_cands_vec->[_vectorize_text,_check_truncate],save->[save],update_params->[_is_lr_warming_up],_set_label_vec->[_vectorize_text,_check_truncate],_set_text_vec->[_vectorize_text,_check_truncate],batch_act->[batchify,match_batch,_save_history],zero_grad->[zero_grad],_copy_embeddings->[_get_embtype,_project_vec]]]
This function is used to vectorize the text and label_vec fields of an observation. This function is a base class method that can be used to create a sequence of missing missing Batch implementation of .
can we move this logic instead into `_set_label_cand_vec`? Then we can say in its docstring that it becomes a no-op in `-cands batch`?
@@ -103,7 +103,7 @@ public class HudsonPrivateSecurityRealm extends AbstractPasswordBasedSecurityRea * in Java {@code \w} is equivalent to {@code [A-Za-z0-9_]} (take care of "_") */ private static final String DEFAULT_ID_REGEX = "^[\\w-]+$"; - + /** * If true, sign up is not allowed. * <p>
[HudsonPrivateSecurityRealm->[hash->[encodePassword],doCreateFirstAccount->[loginAndTakeBack,hasSomeUser],doFilter->[doFilter],getACL->[getACL],loginAndTakeBack->[authenticate],isPasswordValid->[isPasswordValid,encodePassword],encodePassword->[encodePassword],getAllowsSignup->[allowsSignup],checkPermission->[checkPermission],commenceSignup->[generateResponse->[generateResponse]],hasPermission->[hasPermission],Details->[fromHashedPassword->[Details],fromPlainPassword->[Details],getProtectedPassword->[getPassword],DescriptorImpl->[newInstance->[fromHashedPassword,fromPlainPassword]],isPasswordCorrect->[getPassword]],createAccount->[createAccount,getErrorMessages],needsToCreateFirstUser->[hasSomeUser],_doCreateAccount->[allowsSignup,hasSomeUser],authenticate->[loadUserByUsername],getUser->[hasPermission]]]
Creates a private security realm that uses the given user ID check. Adds a filter that asks the user to create a new user account.
nit: remove the whitespace change there
@@ -16,7 +16,7 @@ struct timer_data { void *arg2; }; -static struct timer_data xtimer[3] = {}; +static struct timer_data xtimer[ARCH_TIMER_COUNT] = {}; void timer_64_handler(void *arg) {
[timer_64_handler->[handler2,xthal_get_ccompare,xthal_set_ccompare,arch_timer_clear],arch_timer_get_system->[arch_interrupt_global_disable,xthal_get_ccount,arch_interrupt_global_enable,xthal_get_ccompare,arch_interrupt_get_status],arch_timer_set->[arch_interrupt_global_enable,xthal_set_ccompare,arch_interrupt_global_disable]]
timer_64_handler - timer_64_handler cant be a cortex.
@tlauda please make this change also for imx: src/platform/imx8/include/platform/drivers/timer.h Working on multiple platforms comes with the responsability of keeping the code consistent and a change should cover all existing platforms.
@@ -360,6 +360,7 @@ module.exports = class extends BaseGenerator { saveConfig() { this.config.set('jhipsterVersion', packagejs.version); + this.config.set('applicationType', this.applicationType); this.config.set('baseName', this.baseName); this.config.set('packageName', this.packageName); this.config.set('packageFolder', this.packageFolder);
[No CFG could be retrieved]
Configure the global configuration. Missing properties - config options.
if this is added here then you need to do it for client sub gen as well
@@ -263,7 +263,7 @@ export default { } }} /> - <Checkbox onChange={onChange} inputRef={inputRef} autoFocus={isFocused}> + <Checkbox onChange={onChange} inputRef={inputRef}> Focused checkbox </Checkbox> </div>
[No CFG could be retrieved]
onChange - Focus checkbox if checkbox is not active.
so after removing `autoFocus` it doesnt' work as expected in this example
@@ -104,6 +104,17 @@ class FundCommand extends BaseCommand $io->write(""); $io->write("Please consider following these links and sponsoring the work of package authors!"); $io->write("Thank you!"); + } elseif ($fundings && $format === 'json') { + $fundingJson = array(); + foreach ($fundings as $vendor => $links) { + $fundingJson[$vendor] = array(); + foreach ($links as $url => $packages) { + $fundingJson[$vendor]['packages'] = implode(', ', $packages); + $fundingJson[$vendor]['url'] = $url; + } + } + + $io->write(JsonFile::encode($fundingJson)); } else { $io->write("No funding links were found in your package dependencies. This doesn't mean they don't need your support!"); }
[FundCommand->[insertFundingData->[getFunding,getPrettyName],execute->[isDefaultBranch,write,getName,getRepositories,getFunding,loadPackages,getComposer,getPackages,getLocalRepository,getIO,insertFundingData],configure->[setDescription]]]
Executes the command. Writes a line to the console.
this looks wrong to me. If there is no funding links found, the output should still be in JSON rather than plaintext when requesting json.
@@ -0,0 +1,18 @@ +# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other +# Spack Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +from spack import * + + +class PyDeprecation(PythonPackage): + """The deprecation library provides a deprecated decorator and a + fail_if_not_removed decorator for your tests. """ + + homepage = "http://deprecation.readthedocs.io/" + url = "https://pypi.io/packages/source/d/deprecation/deprecation-2.0.7.tar.gz" + + version('2.0.7', sha256='c0392f676a6146f0238db5744d73e786a43510d54033f80994ef2f4c9df192ed') + + depends_on('py-setuptools', type='build')
[No CFG could be retrieved]
No Summary Found.
Missing dependency on `py-packaging`.
@@ -51,7 +51,7 @@ module ActiveRecord ::File.join(fixtures_directory, fs_name)) end - all_loaded_fixtures.update(fixtures_map) + update_all_loaded_fixtures fixtures_map connection.transaction(:requires_new => true) do # Patch - replace this...
[FixtureSet->[create_fixtures->[disable_referential_integrity]]]
Creates all fixtures in the given directory. finds missing record in database.
Link to commit sha / line or something that shows where this comes from would be good
@@ -13,7 +13,7 @@ * @author LibreNMS Contributors */ -$alert_severities = [ +$alert_severities = array( // alert_rules.status is enum('ok','warning','critical') 'ok' => 1, 'warning' => 2,
[toArray,hasGlobalRead]
This method is used to find all alert rules that have a specific number of devices. count of alert log entries that have been logged.
please change "array()" back to "[.,..]" "array()" is old style
@@ -419,6 +419,8 @@ public class Geonetwork implements ApplicationHandler { Log.error(Geonet.DB, "Error occurred while trying to execute SQL", t); throw new RuntimeException(t); } + + context.getBean(IsoLanguagesMapper.class).init(); } }
[Geonetwork->[fillCaches->[run->[ServletWebRequest,MockFilterChain,toString,info,MockHttpServletRequest,loginAs,setAttribute,of,doFilter,getBean,set,setUsername,getAttribute,findAll,getServletContext,setAsThreadLocal,setSession,getNumberOfElements,MockHttpServletResponse,getUserSession,exec,getId,MockHttpSession],Thread,start,Runnable,setDaemon,getBean,setName,setPriority],start->[getValue,ServiceConfig,equalsIgnoreCase,getMessage,normalize,setSrvctx,equals,getLanguage,parseInt,getURL,Source,toString,getMandatoryValue,info,getVersion,configure,getSubVersion,isReadOnly,init,ServletPathFinder,getBaseUrl,getAppPath,getStackTrace,OaiPmhDispatcher,getBeanFactory,ServerStartup,getInstance,getValueAsBool,publishEvent,set,getBean,findOneByUuid,getChildren,getSiteUrl,valueOf,setProxyInfo,createDBHeartBeat,getSchemaPluginsDir,getResourcesDir,setConfigFilename,error,setAsThreadLocal,schedule,GeonetContext,registerSingleton,setProps,substring,setSubversionPath,parseBoolean,getConnection,close,setContext,getApplicationContext,importDatabaseData,ClassPathXmlApplicationContext,setSystemRunning,resolve,getSiteId,fillCaches,isNotEmpty,getLogger,createSiteLogo,updateURL,getServlet,save,getSiteName,refreshLogConfiguration],stop->[info,setSystemRunning,shutdownNow,shutdown],createSiteLogo->[resolve,locateLogosDir,error,getMessage,getBean,exists,copyLogo],importDatabaseData->[RuntimeException,getServletContext,error,DbLib,getAppPath,setSiteUuid,insertData,getBean,toString,two,warning,toPath,one,count],setProps->[getValue,registerXmlCatalogFiles,resolve,getProperty,equals,setProperty,toString,info,toPath],createDBHeartBeat->[checkDBWrite->[getMessage,flush,setValue,getBean,info,save,delete],run->[getMessage,debug,isReadOnly,error,getStackTrace,getBean,checkDBWrite,info,warning,setReadOnly],info,scheduleWithFixedDelay,newScheduledThreadPool,Runnable]]]
import database data if it has not been imported.
maybe is renit better, as init would append entries at the end of existing ones, they are maybe figures where entries are already there whene this init call is done.
@@ -153,13 +153,6 @@ namespace System 1e80 }; - // Used to fill uninitialized stack variables with non-zero pattern in debug builds - [Conditional("DEBUG")] - private static unsafe void DebugPoison<T>(ref T s) where T : unmanaged - { - MemoryMarshal.AsBytes(MemoryMarshal.CreateSpan(ref s, 1)).Fill(0xCD); - } - #region Decimal Math Helpers private static unsafe uint GetExponent(float f)
[Decimal->[DecDivMod1E9->[DecDivMod1E9,AsMutable],DecCalc->[VarDecModFull->[Div128By96,Div96By64,UInt32x32To64,DebugPoison],VarDecCmpSub->[UInt32x32To64],SearchScale->[Decimal],ScaleResult->[Decimal],Div128By96->[UInt32x32To64],VarDecDiv->[Add32To96,DebugPoison,IncreaseScale,Div96By64,Decimal,Div128By96,SearchScale,OverflowUnscale,UInt32x32To64,IncreaseScale64,Div96By32,Unscale],UInt64x64To128->[Decimal,UInt32x32To64],VarCyFromDec->[UInt32x32To64],Unscale->[Div96ByConst],VarDecMod->[UInt32x32To64,VarDecCmpSub,DebugPoison,SearchScale],IncreaseScale->[UInt32x32To64],Div96By64->[UInt32x32To64],DecAddSub->[ScaleResult,UInt32x32To64,DebugPoison,Decimal],OverflowUnscale->[Decimal],VarDecFromR8->[UInt64x64To128,Decimal,GetExponent,UInt32x32To64],IncreaseScale64->[UInt32x32To64],VarDecFromR4->[UInt64x64To128,Decimal,GetExponent,UInt32x32To64],GetHashCode->[Unscale],VarDecMul->[ScaleResult,UInt32x32To64,DebugPoison]]]]
Debug poison method for testing if a single object is missing a non - zero value.
great that you can delete it now, maybe keep their pattern "0xCD" in the general case?
@@ -103,11 +103,13 @@ type beatConfig struct { // elastic stack 'setup' configurations Dashboards *common.Config `config:"setup.dashboards"` - Template *common.Config `config:"setup.template"` Kibana *common.Config `config:"setup.kibana"` - // ILM Config options - ILM *common.Config `config:"output.elasticsearch.ilm"` + //new config opt, holding information about index, ilm and template + Indices *common.Config `config:"indices"` + + //deprecated + Template *common.Config `config:"setup.template"` } var (
[launch->[InitWithSettings,createBeater],TestConfig->[createBeater,Init],Setup->[createBeater,Init],createBeater->[BeatConfig],configure->[BeatConfig],Init->[InitWithSettings]]
Beat is a type that can be used to create a Beat instance. Initialize and run a Beater implementation.
why not `[]index.Config`? We seem to unpack the Indices config in a few places.
@@ -13,5 +13,14 @@ class AlaveteliPro::DashboardController < AlaveteliPro::BaseController @page = 1 if @page < 1 @per_page = 10 @activity_list = AlaveteliPro::ActivityList::List.new(@user, @page, @per_page) + @phase_counts = @user.request_summaries. + joins(:request_summary_categories). + references(:request_summary_categories). + group("request_summary_categories.slug"). + count("request_summary_categories.id") + @phase_counts = @phase_counts.with_indifferent_access + @phase_counts[:total] = @phase_counts.values.reduce(0, :+) + @phase_counts[:not_drafts] = + @phase_counts[:total] - @phase_counts[:draft].to_i end end
[index->[to_i,new]]
This method returns the next n - item in the index.
This and the identical code in `info_requests_controller.rb` should be refactored to a single method. I'm thinking probably the `User` model would be a good place for it. That method should have some specs and should also handle the conversion of the counts to integers currently being done in templates.
@@ -0,0 +1,15 @@ +module Middlewares + # Since we must explicitly set the cookie domain in session_store before SiteConfig is available, + # this ensures we properly set the cookie to SiteConfig.app_domain at runtime. + class SetCookieDomain + def initialize(app) + @app = app + end + + def call(env) + env["rack.session.options"][:domain] = ".#{SiteConfig.app_domain}" + + @app.call(env) + end + end +end
[No CFG could be retrieved]
No Summary Found.
added a `Middlewares` module for grouping
@@ -1,4 +1,3 @@ -require "pusher" require "pusher/push_notifications" Pusher.app_id = ApplicationConfig["PUSHER_APP_ID"]
[secret_key,cluster,instance_id,secret,key,logger,encrypted,require,app_id,configure]
Initialize PushNotifications with the application configuration.
pusher is automatically loaded
@@ -241,7 +241,7 @@ describe AdminCensorRuleController do context 'request_id param' do before(:each) do - @censor_rule_params = FactoryGirl.build(:info_request_censor_rule).serializable_hash + @censor_rule_params = FactoryGirl.attributes_for(:info_request_censor_rule) # last_edit_editor gets set in the controller @censor_rule_params.delete(:last_edit_editor) @info_request = FactoryGirl.create(:info_request)
[create_censor_rule->[post,id],create,admin_request_censor_rules_path,describe,match_array,it,put,serializable_hash,map,to,before,post,admin_user_path,public_body,require,user,dirname,receive,id,delete,redirect_to,context,admin_body_censor_rules_path,build,admin_body_path,get,admin_user_censor_rules_path,basic_auth_login,info_requests,eq,info_request,render_template,reload,admin_request_path,and_return,expand_path]
adds some checks to the censor rules administration page successfully saving the censor rule persists the censor rule and checks the flash message.
Line is too long. [83/80]
@@ -49,11 +49,11 @@ type MetricsStore struct { /* * Certificate metrics */ - // CertXdsIssuedCount is the metric counter for the number of xds certificates issued - CertXdsIssuedCount prometheus.Counter + // CertIssuedCount is the metric counter for the number of certificates issued + CertIssuedCount prometheus.Counter - // CertXdsIssuedCounter the histogram to track the time to issue xds certificates - CertXdsIssuedTime *prometheus.HistogramVec + // CertXdsIssuedCounter the histogram to track the time to issue a certificates + CertIssuedTime *prometheus.HistogramVec /* * MetricsStore internals should be defined below --------------
[Start->[MustRegister],Handler->[InstrumentMetricHandler,HandlerFor],Stop->[Unregister],NewCounterVec,NewGauge,NewHistogramVec,NewRegistry,NewCounter]
Metrics for the number of non - terminal metrics represents the number of namespaces monitored by OSM controller.
These metrics only pertain to XDS certs, why are they being removed from the variable names?
@@ -25,4 +25,17 @@ export function doGetJSON(url) { return Promise.reject(error); }); + + if (retry) { + return timeoutPromise(fetchPromise, RETRY_TIMEOUT) + .catch(response => { + if (response.status >= 400 && response.status < 500) { + return Promise.reject(response); + } + + return timeoutPromise(fetchPromise, RETRY_TIMEOUT); + }); + } + + return fetchPromise; }
[No CFG could be retrieved]
reject if error.
We have this in jitsi-meet-spot as well. You want to shove something into js-utils so both repos can share it?
@@ -1000,12 +1000,12 @@ class Flow: ) @cache - def build_environment(self) -> bytes: + def build_environment(self) -> dict: """ Build the flow's environment. Returns: - - bytes of a key that can be used to access the environment. + - dict: a key that can be used to recreate the environment. Raises: - ValueError: if no environment is specified in this flow
[Flow->[copy->[copy],upstream_tasks->[edges_to],update->[add_edge,add_task],reference_tasks->[terminal_tasks],chain->[add_edge],edges_to->[all_upstream_edges],set_dependencies->[add_edge,add_task],run->[parameters,run],edges_from->[all_downstream_edges],generate_local_task_ids->[all_upstream_edges,sorted_tasks,copy,serialize,all_downstream_edges],validate->[reference_tasks],serialize->[validate],_sorted_tasks->[copy,upstream_tasks,downstream_tasks,update],downstream_tasks->[edges_from],add_edge->[copy,add_task],visualize->[get_color,edges_to,id]]]
Build the flow s environment.
this wording is confusing to me; strictly speaking in the python world, dicts can be keys because they're mutable, but I'm guessing you're using the word "key" slightly differently. Could you clarify?
@@ -199,7 +199,6 @@ class TestCalibrationForResnet50(unittest.TestCase): def run_program(self, model_path, generate_int8=False, algo='direct'): image_shape = [3, 224, 224] - os.environ['FLAGS_use_mkldnn'] = 'True' fluid.memory_optimize(fluid.default_main_program())
[process_image->[resize_short,crop_image],val->[_reader_creator],TestCalibrationForResnet50->[download_data->[cache_unzipping],download_model->[download_data],run_program->[val],test_calibration->[download_model,run_program]],TestCalibrationForMobilenetv1->[download_model->[download_data]]]
Run the n - tuple model on the given model. time - related functions.
Since there is `os.environ['FLAGS_use_mkldnn'] = True`, do you need `FLAGS_use_mkldnn=true python test_calibration.py` again?
@@ -52,12 +52,8 @@ static int test_standard_exts() tmp = standard_exts; TEST_error("Extensions out of order!"); for (i = 0; i < STANDARD_EXTENSION_COUNT; i++, tmp++) - fprintf(stderr, "%d : %s\n", (*tmp)->ext_nid, - OBJ_nid2sn((*tmp)->ext_nid)); - } else { - fprintf(stderr, "Order OK\n"); + TEST_note("%d : %s", (*tmp)->ext_nid, OBJ_nid2sn((*tmp)->ext_nid)); } - return good; }
[register_tests->[ADD_TEST],int->[OBJ_nid2sn,TEST_error,OSSL_NELEM,fprintf]]
test_standard_exts - Test if standard extensions have a specific extension.
Same here - changing output to not have "Order OK" ... is that intentional?
@@ -305,6 +305,8 @@ function list_devices(Illuminate\Http\Request $request) if ($type == 'all' || empty($type)) { $sql = '1'; + } elseif ($type == 'device_id') { + $sql = "`d`.`device_id` = $query"; } elseif ($type == 'active') { $sql = "`d`.`ignore`='0' AND `d`.`disabled`='0'"; } elseif ($type == 'location') {
[get_graphs->[route],search_ports->[route,get,where,isEmpty,select,orWhere],edit_location->[route,getContent],update_device->[route,getContent],get_port_stack->[route,get],get_vlans->[route],get_vrf->[route],get_inventory_for_device->[route],get_device->[route],get_bill_history_graph->[route,get],get_devices_by_group->[toArray,route,get,isEmpty,first],list_vrf->[get,hasGlobalRead],get_fdb->[route],list_fdb->[route,get,isEmpty],get_link->[route],search_by_mac->[messages,fails,route,get,has,first,count],api_error->[json],maintenance_device->[displayName,route,json,format,save],add_device_group->[messages,getContent,fails,toSql,save,sync],get_inventory->[route,get],del_service_from_host->[route],remove_port_group->[detach,route,getContent],add_components->[route,createComponent],edit_components->[route,getContent,setComponentPrefs],get_components->[all,route,get,has,getComponents],list_vlans->[get,hasGlobalRead],unmute_alert->[route,getContent],api_success->[json],get_oxidized_config->[route],add_location->[getContent],validate_column_list->[getColumns],get_all_ports->[get,hasGlobalRead],get_device_ip_addresses->[route],get_device_groups->[toArray,route,get,groups,isEmpty,count],list_alert_rules->[route],get_ports_by_group->[toArray,route,get,isEmpty,first],add_edit_rule->[toSql,getContent],add_parents_to_host->[route,getContent,sync],add_service_template_for_device_group->[messages,getContent,fails,toSql,save],delete_rule->[route],show_endpoints->[getName,getPrefix,json,url,getRoutes,uri],get_bill_history_graphdata->[route,get],device_outages->[orderBy,route,get],get_service_templates->[toArray,cannot,get,isEmpty,count],list_available_health_graphs->[route,count],list_links->[route,hasGlobalRead],get_graph_generic_by_hostname->[has,route,get],get_bgp->[route],get_network_ip_addresses->[route],create_edit_bill->[getContent],get_graph_by_port_hostname->[has,route,get],add_port_group->[save,messages,getContent,fails],get_graph_by_portgroup->[has,route,get],get_bill_history->[route],get_bill_graph->[route,get],delete_bill->[route],list_bills->[route,get,hasGlobalRead],get_port_groups->[toArray,get,isEmpty,count],del_location->[route],trigger_device_discovery->[route],list_cbgp->[get,hasGlobalRead],ack_alert->[route,getContent],get_port_ip_addresses->[route],edit_service_for_host->[route,getContent],list_bgp->[uncompressed,get],list_sensors->[get,count],get_port_info->[route],add_service_for_host->[route,getContent],list_alerts->[has,route,get],assign_port_group->[attach,route,getContent],list_devices->[get,hasGlobalRead],list_ospf_ports->[get,isEmpty,count],list_logs->[route,get,getName],edit_bgp_descr->[save,route,json],list_ospf->[get],get_port_stats_by_port_hostname->[path,has,route,get],list_ipsec->[route],list_available_wireless_graphs->[route],search_oxidized->[route],list_oxidized->[get,json],rename_device->[route],list_arp->[getNetworkAddress,getNetmask,route,get],get_port_graphs->[route,get],add_device->[getMessage,getContent],delete_components->[deleteComponent,route],device_availability->[orderBy,route,get],del_device->[route],get_bill_graphdata->[route,get],list_services->[has,route,get],del_parents_from_host->[detach,route,getContent]]
List all devices in the system. Queries for all nodes that are not associated with any host. Get a list of all devices that have a node id.
This introduces a sql vulnerability
@@ -251,9 +251,17 @@ module Engine private def init_bank + return Bank.new(self.class::BANK_CASH[players.size], log: @log) if self.class::BANK_CASH.is_a?(Hash) + Bank.new(self.class::BANK_CASH, log: @log) end + def init_cert_limit + return self.class::CERT_LIMIT[players.size] if self.class::CERT_LIMIT.is_a?(Hash) + + self.class::CERT_LIMIT + end + def init_phase Phase.new(self.class::PHASES, self) end
[Base->[current_entity->[current_entity],trains->[trains],process_action->[process_action],end_game->[format_currency],inspect->[title],rollback->[clone]]]
Initializes the object variables related to a sequence of Banks Phases and Auctions.
it might just be cleaner to do cash = self.class::BANK_CASH cash = cash[players.size] if cash.is_a?(Hash)
@@ -327,7 +327,8 @@ public class ServerGroupManager { validateAccessCredentials(loggedInUser, sg, sg.getName()); validateAdminCredentials(loggedInUser); - SystemManager.addServersToServerGroup(servers, sg); + SystemManager systemManager = new SystemManager(ServerFactory.SINGLETON, ServerGroupFactory.SINGLETON, saltApi); + systemManager.addServersToServerGroup(servers, sg); updatePillarAfterGroupUpdateForServers(servers); }
[ServerGroupManager->[dissociateAdmins->[validateAdminCredentials,remove,validateAccessCredentials],associateAdmins->[validateAdminCredentials,validateAccessCredentials],lookupEntitled->[lookupEntitled],listAdministrators->[listAdministrators,validateAdminCredentials,validateAccessCredentials],create->[validateAdminCredentials,create],listNoAdminGroups->[listNoAdminGroups],addServers->[validateAdminCredentials,validateAccessCredentials],removeServers->[updatePillarAfterGroupUpdateForServers,validateAdminCredentials,removeServers,validateAccessCredentials],remove->[validateAdminCredentials,remove,validateAccessCredentials],processAdminList->[remove],listServers->[listServers],associateOrDissociateAdminsByLoginName->[validateAdminCredentials,validateAccessCredentials],validateAccessCredentials->[canAccess]]]
Adds a collection of servers to a server group.
this system manager should not be passed as a class field instead of being created on each method invocation?
@@ -1133,7 +1133,6 @@ class FreqtradeBot(LoggingMixin): Buy cancel - cancel order :return: True if order was fully cancelled """ - # TODO-lev: Pay back borrowed/interest and transfer back on leveraged trades was_trade_fully_canceled = False # Cancelled orders may have the status of 'canceled' or 'closed'
[FreqtradeBot->[cleanup->[cleanup],fee_detection_from_trades->[apply_fee_conditional],execute_trade_exit->[handle_insufficient_funds,_safe_exit_amount],execute_entry->[leverage_prep,update],_safe_exit_amount->[update],handle_cancel_enter->[_notify_enter_cancel,update],handle_cancel_exit->[update],get_real_amount->[apply_fee_conditional],exit_positions->[update],apply_fee_conditional->[update],handle_protections->[update],check_handle_timedout->[_check_timed_out],__init__->[update->[update],__init__],handle_stoploss_on_exchange->[create_stoploss_order],create_trade->[get_free_open_trades],_notify_exit_cancel->[update],handle_trailing_stoploss_on_exchange->[create_stoploss_order],update_trade_state->[_notify_exit,_notify_enter,update],_notify_exit->[update],create_stoploss_order->[handle_insufficient_funds]]]
Handle cancel enter. This function is called when the order is fully cancelled.
Created a trello task with the margin label
@@ -310,9 +310,17 @@ module.exports = class bitcoincoid extends Exchange { } async cancelOrder (id, symbol = undefined, params = {}) { + if (!symbol) + throw new ExchangeError (this.id + ' cancelOrder requires a symbol argument'); + if ('side' in params) + if (!params['side']) + throw new ExchangeError (this.id + ' cancelOrder requires a side as params argument'); await this.loadMarkets (); + let market = this.market (symbol); return await this.privatePostCancelOrder (this.extend ({ 'order_id': id, + 'pair': market['id'], + 'type': params['side'] }, params)); }
[No CFG could be retrieved]
Create a new order on a given symbol Get a single from the API.
One single comma missing at the end of this line to satisfy the syntax checkers. UPD: Ah, nvm, it's ok ))
@@ -1264,7 +1264,7 @@ public class MovePanel extends AbstractMovePanel { // is the only one for // which the route may actually change much) if (unitsThatCanMoveOnRoute.size() < selectedUnits.size() && (unitsThatCanMoveOnRoute.size() == 0 - || Match.allMatchNotEmpty(unitsThatCanMoveOnRoute, Matches.UnitIsAir))) { + || !unitsThatCanMoveOnRoute.isEmpty() && Match.allMatch(unitsThatCanMoveOnRoute, Matches.UnitIsAir))) { final Collection<Unit> airUnits = Match.getMatches(selectedUnits, Matches.UnitIsAir); if (airUnits.size() > 0) { route = getRoute(getFirstSelectedTerritory(), territory, airUnits);
[MovePanel->[updateUnitsThatCanMoveOnRoute->[sortUnitsToMove],cancelMoveAction->[updateRouteAndMouseShadowUnits,setFirstSelectedTerritory,setSelectedEndpointTerritory],getRouteForced->[getRoute],getUnitsToUnload->[sortTransportsToUnload,sortUnitsToMove],getRouteNonForced->[getUnitOwner],deselectUnits->[updateRouteAndMouseShadowUnits,getRoute,updateUnitsThatCanMoveOnRoute],getMovableMatch->[getUnitOwner],mouseMoved->[updateRouteAndMouseShadowUnits,getRoute,updateUnitsThatCanMoveOnRoute],getTransportsToLoad->[sortTransportsToLoad,getUnitOwner],selectUnitsToMove->[updateRouteAndMouseShadowUnits,updateUnitsThatCanMoveOnRoute,getMovableMatch,getRoute,getUnitOwner],mouseEnter->[getUnitOwner],sortUnitsToMove->[getUnitOwner],selectEndPoint->[updateRouteAndMouseShadowUnits,getUnloadableMatch,getTransportsToLoad,getRoute,getUnitsToUnload],display->[display],setUpSpecific->[setFirstSelectedTerritory],sortTransportsToLoad->[getUnitOwner],allowSpecificUnitSelection->[getMovableMatch,getTransportsToLoad,sortUnitsToMove],sortTransportsToUnload->[getUnitOwner],cleanUpSpecific->[updateRouteAndMouseShadowUnits],selectWayPoint->[updateRouteAndMouseShadowUnits,getRoute]]]
Called when the user mouses the mouse.
Parentheses are not required here, but it may make it easier to read due to the `||`.
@@ -64,7 +64,7 @@ func (l *zapLogger) Named(name string) Logger { func (l *zapLogger) NewRootLogger(lvl zapcore.Level) (Logger, error) { newLogger := *l - newLogger.config.Level = zap.NewAtomicLevelAt(lvl) + newLogger.config.LogLevel = lvl zl, err := newLogger.config.Build() if err != nil { return nil, err
[Sync->[Sync],NewRootLogger->[With,Named],Named->[Named],ErrorIfClosing->[Helper],With->[With],ErrorIf->[Helper]]
NewRootLogger creates a new Logger that will log to the root logger.
It looks like `l` and `newLogger` point to the same config here now?
@@ -268,14 +268,15 @@ def _resnet(arch, Block, depth, pretrained, **kwargs): def resnet18(pretrained=False, **kwargs): - """ResNet 18-layer model - + """ResNet 18-layer model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ + Args: pretrained (bool): If True, returns a model pre-trained on ImageNet Examples: .. code-block:: python - + import paddle from paddle.vision.models import resnet18 # build model
[resnet50->[_resnet],resnet152->[_resnet],resnet101->[_resnet],resnet34->[_resnet],resnet18->[_resnet],_resnet->[ResNet]]
ResNet 18 - layer model with pre - trained weight on ImageNet .
.. code-block import bug ~
@@ -148,6 +148,8 @@ public class DubboBootstrap { private final AtomicBoolean awaited = new AtomicBoolean(false); + private volatile BootstrapTakeoverState bootstrapTakeoverState = BootstrapTakeoverState.AUTO; + private final Lock lock = new ReentrantLock(); private final Condition condition = lock.newCondition();
[DubboBootstrap->[await->[await],clearConfigs->[destroy],destroy->[unexportServices,unreferServices,unexportMetadataService,unregisterServiceInstance],onStop->[onStop],service->[service],shutdown->[shutdown,isShutdown],reset->[reset],getMetadataType->[getMetadataType],protocol->[protocol],start->[start,initialize,isOnlyRegisterProvider],unreferServices->[getCache],registry->[registry],getInstance->[DubboBootstrap],application->[application],startMetadataCenter->[getMetadataType],shouldAddDefaultConfig->[isRequired],provider->[provider],getCache->[getCache],onStart->[onStart],destroyProtocols->[destroy],reference->[reference],referServices->[getCache],consumer->[consumer],destroyMetadataReports->[destroy],createServiceInstance->[getMetadataType]]]
This class is designed to be used to create a singleton Dubbo object.
Use a short name? `private volatile BootstrapTakeoverState takeoverState = BootstrapTakeoverState.AUTO;`
@@ -1649,9 +1649,9 @@ static int dmic_remove(struct dai *dai) interrupt_unregister(dmic->irq, dai); /* The next end tasks must be passed if another DAI FIFO still runs */ - dai_info(dai, "dmic_remove(), dmic_active_fifos_mask = 0x%x", - uncached_dmic_active_fifos_mask); - if (uncached_dmic_active_fifos_mask) + dai_info(dai, "dmic_remove(), dmic_active_fifos_mask = 0x%x, dmic_pause_mask = 0x%x", + uncached_dmic_active_fifos_mask, uncached_dmic_pause_mask); + if (uncached_dmic_active_fifos_mask || uncached_dmic_pause_mask) return 0; pm_runtime_put_sync(DMIC_CLK, dai->index);
[No CFG could be retrieved]
Initialize DMIC start sequence handler Get the offset of the DMIC FIFO in the SOF DAO.
This is really problematic. There's no actual reference counting in cavs_pm_runtime_dis_dmic_clk_gating() and the "dai->index" passed to pm_runtime_put_sync() is just printed out. The problem is that if we switch to pm_runtime_put_sync() that actual does refcounting, this code will break. FYI @lgirdwood @lyakh @marc-hb Oh well, this goes beyond this PR's scope.
@@ -160,9 +160,8 @@ public final class ReferenceMatcher { Set<HelperReferenceWrapper.Method> plainMethods = new HashSet<>(); collectMethodsFromTypeHierarchy(helperWrapper, abstractMethods, plainMethods); - Set<HelperReferenceWrapper.Method> unimplementedMethods = - Sets.difference(abstractMethods, plainMethods); - for (HelperReferenceWrapper.Method unimplementedMethod : unimplementedMethods) { + abstractMethods.removeAll(plainMethods); + for (HelperReferenceWrapper.Method unimplementedMethod : abstractMethods) { mismatches = lazyAdd( mismatches,
[ReferenceMatcher->[findMethod->[findMethod],checkThirdPartyTypeMatch->[matches],findField->[findField,matchesPrimitive],collectMethodsFromTypeHierarchy->[collectMethodsFromTypeHierarchy]]]
Checks if a helper class matches a given type pool.
I think there is a change of behaviour here: in the new version you mutate `abstractMethods`, the old version did not that, did it?
@@ -911,7 +911,7 @@ class Assignment < ApplicationRecord end def create_peer_review_assignment_if_not_exist - if has_peer_review and Assignment.where(parent_assignment_id: id).empty? + if assignment_properties.has_peer_review && Assignment.where(parent_assignment_id: id).empty? peerreview_assignment = Assignment.new peerreview_assignment.parent_assignment = self peerreview_assignment.submission_rule = NoLateSubmissionRule.new
[Assignment->[summary_json->[group_by],get_marks_list->[max_mark],group_by->[group_assignment?],current_submission_data->[group_by],average_annotations->[get_num_marked,get_num_annotations],percentage_grades_array->[calculate_total_percent],get_num_marked->[is_criteria_mark?,get_num_assigned],summary_csv->[max_mark],past_all_collection_dates?->[past_collection_date?],reset_collection_time->[reset_collection_time],grouping_past_due_date?->[past_all_due_dates?]]]
create a new peerreview assignment if the record doesn t already exist.
Style/GuardClause: Use a guard clause instead of wrapping the code inside a conditional expression.
@@ -18,10 +18,10 @@ mock_client: (DocAuthRouter.doc_auth_vendor == 'mock').presence, document_capture_session_uuid: flow_session[:document_capture_session_uuid], endpoint: FeatureManagement.document_capture_async_uploads_enabled? ? - idv_doc_auth_step_path(step: :verify_document) : + send(@step_url, step: :verify_document) : api_verify_images_url, status_endpoint: FeatureManagement.document_capture_async_uploads_enabled? ? - idv_doc_auth_step_path(step: :verify_document_status) : + send(@step_url, step: :verify_document_status) : nil, status_poll_interval_ms: AppConfig.env.poll_rate_for_verify_in_seconds.to_i * 1000, sp_name: sp_name,
[No CFG could be retrieved]
Renders a list of tags that can be used to verify a document. Renders the object.
reason this is needed: In the non-hybrid flow, we link to `/idv/doc_auth` etc endpoints. However, in the hybrid flow, the mobile path hits `/idv/capture_doc/` versions of the corresponding endpoints, so this needed to be updated to be dynamic based on the flow
@@ -14,6 +14,7 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; import static org.mule.extension.validation.internal.ImmutableValidationResult.error; import static org.mule.extension.validation.internal.ValidationExtension.DEFAULT_LOCALE; + import org.mule.api.MuleEvent; import org.mule.extension.validation.api.MultipleValidationException; import org.mule.extension.validation.api.MultipleValidationResult;
[BasicValidationTestCase->[email->[assertValid,assertInvalidEmail,getTestEvent],getTimeEvent->[getTestEvent,setFlowVariable],isFalse->[failedBooleanValidation,assertValid,assertInvalid,getTestEvent],size->[assertSize,asList,put],TestCustomValidator->[validate->[error]],assertCustomValidator->[getMessage,assertThat,getRootCause,getTestEvent,is,runFlow,fail,setFlowVariable],assertSize->[greaterThanMaxSize,assertValid,lowerThanMinSize,getSizeValidationEvent,assertInvalid],customValidationByClass->[assertCustomValidator],getAllEvent->[getTestEvent,setFlowVariable],successfulAll->[getAllEvent,assertThat,getPayload,is,runFlow,sameInstance],notEmpty->[collectionIsEmpty,valueIsBlankLiteral,of,assertValid,arrayIsEmpty,mapIsEmpty,valueIsNull,clear,getTestEvent,stringIsBlank,asList,assertInvalid,put],empty->[Object,of,collectionIsNotEmpty,arrayIsNotEmpty,mapIsNotEmpty,assertValid,stringIsNotBlank,getTestEvent,asList,assertInvalid,put],assertInvalidEmail->[invalidEmail,assertInvalid,getTestEvent],time->[getTimeEvent,assertValid,invalidTime,toString,assertInvalid],ip->[assertValid,assertInvalid,invalidIp,getTestEvent],customValidationByRef->[assertCustomValidator],oneFailInAll->[getFailedValidationResults,getMessage,getAllEvent,isError,getRootCause,getMultipleValidationResult,hasSize,is,runFlow,instanceOf,assertThat,fail],url->[invalidUrl,assertValid,assertInvalid,getTestEvent],matchesRegex->[assertValid,getTestEvent,assertInvalid,setPayload,regexDoesNotMatch,setFlowVariable],getSizeValidationEvent->[getTestEvent,setFlowVariable],usesValidatorAsRouter->[assertThat,is,getPayloadAsString],customValidatorWithCustomMessage->[assertCustomValidator],isTrue->[failedBooleanValidation,assertValid,assertInvalid,getTestEvent],twoFailuresInAllWithoutException->[getFailedValidationResults,getMessage,getAllEvent,isError,invalidUrl,getRootCause,getMultipleValidationResult,hasSize,is,runFlow,join,instanceOf,invalidEmail,assertThat,fail]]]
Creates a new validation test case using the CPAL specification. Test if the ip is in the list of valid IPs.
remove empty line
@@ -131,6 +131,10 @@ T_RequestMonitoring = TypeVar( bound='raiden.messages.RequestMonitoring', ) +T_UpdatePFS = TypeVar( + 'T_UpdatePFS', + bound='raiden.messages.UpdatePFS', +) T_TransactionHash = bytes TransactionHash = NewType('TransactionHash', T_TransactionHash)
[NewType,TypeVar]
Create a type for the next block of signature.
There's no need to introduce a type for every class we introduce.
@@ -48,14 +48,6 @@ module ApplicationHelper (sp_session[:issuer].blank? || sp_session[:ial2_strict]) end - def sign_up_or_idv_no_js_link - if user_signing_up? - destroy_user_path - elsif user_verifying_identity? - idv_doc_auth_url - end - end - def cancel_link_text if user_signing_up? t('links.cancel_account_creation')
[cancel_link_text->[user_signing_up?],sign_up_or_idv_no_js_link->[user_verifying_identity?,user_signing_up?],liveness_checking_enabled?->[liveness_checking_enabled?]]
Check if there is a nexus node in the system that can be used to perform l.
With this removal, `user_verifying_identity?` may now be unused as well?
@@ -1149,9 +1149,11 @@ class TestMasksToBoxes: class TestStochasticDepth: + @pytest.mark.parametrize("seed", range(10)) @pytest.mark.parametrize("p", [0.2, 0.5, 0.8]) @pytest.mark.parametrize("mode", ["batch", "row"]) - def test_stochastic_depth(self, mode, p): + def test_stochastic_depth_random(self, seed, mode, p): + torch.manual_seed(seed) stats = pytest.importorskip("scipy.stats") batch_size = 5 x = torch.ones(size=(batch_size, 3, 4, 4))
[TestPSRoIPool->[expected_fn->[get_slice],test_boxes_shape->[_helper_boxes_shape]],TestBoxIou->[test_iou->[iou_check]],TestMasksToBoxes->[test_masks_box->[masks_box_check,_create_masks,_get_image]],TestBoxArea->[test_box_area->[area_check]],TestDeformConv->[test_forward->[get_fn_args,expected_fn],test_backward->[script_func_no_mask,get_fn_args,script_func],test_wrong_sizes->[get_fn_args],test_autocast->[test_forward],expected_fn->[bilinear_interpolate]],TestRoIAlign->[test_qroi_align_multiple_images->[_make_rois],test_qroialign->[_make_rois],test_autocast->[test_forward],expected_fn->[bilinear_interpolate],test_boxes_shape->[_helper_boxes_shape]],RoIOpTester->[_helper_boxes_shape->[func],test_autocast->[test_forward]],TestGenBoxIou->[test_gen_iou->[gen_iou_check]],TestNMS->[test_nms_cuda->[_create_tensors_with_iou],test_qnms->[_create_tensors_with_iou],test_autocast->[test_nms_cuda],test_nms_ref->[_reference_nms,_create_tensors_with_iou]],TestPSRoIAlign->[expected_fn->[bilinear_interpolate],test_boxes_shape->[_helper_boxes_shape]],TestBoxConversion->[_get_box_sequences],TestRoiPool->[expected_fn->[get_slice],test_boxes_shape->[_helper_boxes_shape]]]
Test the stochastic depth.
We maintain the original test because it allows us to check that the different `mode` values operate as expected. Using p values in the interval (0, 1) is critical because for p=0 and p=1 the two modes behave the same. The mitigation for the flakiness here is to set the seed.
@@ -171,8 +171,11 @@ export class AmpConsent extends AMP.BaseElement { () => this.handleAction_(ACTION_TYPE.DISMISS)); this.registerAction('prompt', invocation => { const args = invocation.args; - const consentId = args && args['consent']; - this.handlePostPrompt_(consentId); + let consentId = args && args['consent']; + if (!this.isMultiSupported_) { + consentId = Object.keys(this.consentConfig_)[0]; + } + this.handlePostPrompt_(consentId || ''); }); }
[No CFG could be retrieved]
Registers user action functions that can be invoked by the consent policy manager. If consent UI is not available register it.
This is just getting a random key, rather than the first. Is that ok?
@@ -33,7 +33,7 @@ namespace System.Text.Json.Serialization Type type = typeof(T); Debug.Assert(!type.IsAbstract); - Debug.Assert(type.GetConstructors(BindingFlags.Public | BindingFlags.Instance).Contains(constructor)); + Debug.Assert(Array.IndexOf(type.GetConstructors(BindingFlags.Public | BindingFlags.Instance), constructor) >= 0); int parameterCount = constructor.GetParameters().Length;
[ReflectionMemberAccessor->[CreateAddMethodDelegate->[Invoke,ObjectType,GetMethod],CreateImmutableEnumerableCreateRangeDelegate->[GetImmutableEnumerableCreateRangeMethod,CreateDelegate],CreateImmutableDictionaryCreateRangeDelegate->[CreateDelegate,GetImmutableDictionaryCreateRangeMethod],CreatePropertyGetter->[Invoke,GetMethod],CreateConstructor->[Public,GetConstructor,IsAbstract,EmptyTypes,CreateInstance,Assert,IsValueType,Instance],CreateParameterizedConstructor->[Invoke,InnerException,Contains,IsAbstract,UnboxedParameterCountThreshold,Assert,Fail,MaxParameterCount,Length],CreateFieldGetter->[GetValue],CreatePropertySetter->[SetMethod,Invoke],CreateFieldSetter->[SetValue]]]
Creates a parameterized constructor that can be called with the given arguments.
@stephentoub why don't we have an `Array.Contains` helper for this scenario? Could `Array.Exists` be used here with some predicate, or is that not worth it.
@@ -305,11 +305,14 @@ public class BytecodeVisitor extends ClassVisitor { if (bytecodeNames == null) { return ImmutableList.of(); } - ImmutableList.Builder<JavaType> types = ImmutableList.builder(); + List<JavaType> types = new ArrayList<>(); for (String bytecodeName : bytecodeNames) { - types.add(getClassSymbol(bytecodeName).type); + JavaSymbol.TypeJavaSymbol completedClassSymbol = getClassSymbol(bytecodeName); + if(!completedClassSymbol.isUnknown()) { + types.add(completedClassSymbol.type); + } } - return types.build(); + return types; } private class ReadGenericSignature extends SignatureVisitor {
[BytecodeVisitor->[defineOuterClass->[getClassSymbol],getCompletedClassSymbolsType->[getClassSymbol],convertAsmType->[getClassSymbol,convertAsmType],ReadGenericSignature->[boundVisitor->[visitEnd->[visitEnd]],visitClassType->[getClassSymbol],visitSuperclass->[visitEnd->[visitEnd]],visitInterface->[visitEnd->[visitEnd]]],ReadType->[visitBaseType->[visitEnd],visitArrayType->[visitEnd->[visitEnd],ReadType],visitTypeVariable->[visitEnd],visitTypeArgument->[visitEnd->[visitEnd],ReadType],visitClassType->[getClassSymbol]],getClassSymbol->[getClassSymbol],defineInnerClass->[getClassSymbol],visit->[getClassSymbol],ReadMethodSignature->[visitParameterType->[visitEnd->[visitEnd]],visitExceptionType->[visitEnd->[visitEnd]],visitReturnType->[visitEnd->[visitEnd]],visitClassBound->[visitEnd->[visitEnd]],visitInterfaceBound->[visitEnd->[visitEnd]]]]]
Returns the complete list of class symbols type.
We should not remove unknown exception types, or we will fail on replacing thrown types later with `IndexOutOfBoundExceptions`.
@@ -71,6 +71,17 @@ public class DefaultHostListener implements HypervisorHostListener { assert (answer instanceof ModifyStoragePoolAnswer) : "Well, now why won't you actually return the ModifyStoragePoolAnswer when it's ModifyStoragePoolCommand? Pool=" + pool.getId() + "Host=" + hostId; ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer)answer; + if (mspAnswer.getLocalDatastoreName() != null && pool.isShared()) { + String datastoreName = mspAnswer.getLocalDatastoreName(); + List<StoragePoolVO> localStoragePools = this.primaryStoreDao.listLocalStoragePoolByPath(pool.getDataCenterId(), datastoreName); + for (StoragePoolVO localStoragePool : localStoragePools) { + if (datastoreName.equals(localStoragePool.getPath())) { + s_logger.warn("Storage pool: " + pool.getId() + " has already been added as local storage: " + localStoragePool.getName()); + throw new StorageConflictException("Cannot add shared storage pool: " + pool.getId() + " because it has already been added as local storage:" + + localStoragePool.getName()); + } + } + } StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(pool.getId(), hostId); if (poolHost == null) {
[DefaultHostListener->[hostConnect->[getCapacityBytes,getResult,setUsedBytes,info,setLocalPath,getDataCenterId,replaceAll,getDataStore,getDetails,CloudRuntimeException,StoragePoolHostVO,setCapacityBytes,findByPoolHost,getAvailableBytes,persist,update,sendAlert,getPodId,easySend,ModifyStoragePoolCommand,findById,getId],getLogger]]
Connect to a host return true if host is not found.
Might want to put a space after the colon for readability.
@@ -226,9 +226,11 @@ module.exports = { sameWalletAsInvoiceError: 'Você não pode pagar uma fatura com a mesma carteira que a criou.', }, pleasebackup: { - title: 'Your wallet is created...', + title: 'Sua carteira foi criada...', text: - "Please take a moment to write down this mnemonic phrase on a piece of paper. It's your backup you can use to restore the wallet on other device. You can use Electrum wallet on desktop (https://electrum.org/) to restore the same wallet.", - ok: 'OK, I wrote this down!', + "Por favor anote num pedaço de papel essa seqüencia de palavras. Isso será o seu backup e você pode usá-lo para" + + " recuperar a sua carteira em outros dispositivos. Por exemplo, você pode usar o programa Electrum para desktops" + + " (https://electrum.org/) para acessar esta mesma carteira.", + ok: 'Certo, já anotei!', }, };
[No CFG could be retrieved]
Você não pagar uma fatura com a mes.
there's an error in "sequencia", no?
@@ -359,7 +359,7 @@ func (d *Dispatcher) configIso(conf *config.VirtualContainerHostConfigSpec, vm * log.Errorf("Failed to create Cdrom device for appliance: %s", err) return nil, err } - cdrom = devices.InsertIso(cdrom, fmt.Sprintf("[%s] %s/appliance.iso", conf.ImageStores[0].Host, d.vmPathName)) + cdrom = devices.InsertIso(cdrom, fmt.Sprintf("[%s] %s/%s", conf.ImageStores[0].Host, d.vmPathName, settings.ApplianceISO)) devices = append(devices, cdrom) return devices, nil }
[createAppliance->[createApplianceSpec,GenerateExtensionName],createApplianceSpec->[addIDEController,addNetworkDevices,addParaVirtualSCSIController],makeSureApplianceRuns->[waitForKey,applianceConfiguration],reconfigureApplianceSpec->[configIso],checkExistence->[isVCH]]
configIso creates a virtual machine with the specified configuration. create a new virtual machine in the system settings. Extension = types. Extension This function is called by the init function of the virtual machine.
Should we be using the actual path of the appliance VM rather than composing in this fashion. It'll currently work because we use ImageStores[0] as the applianceVM datastore, but if that ever changes then this will break.
@@ -11,6 +11,7 @@ $yoast_seo_block_title = sprintf( __( '%1$s Recipe', 'wordpress-seo' ), 'Yoast' $yoast_seo_block_template = [ [ 'yoast/recipe-name' ], [ 'core/image' ], + [ 'yoast/recipe-description' ], [ 'yoast/ingredients' ], [ 'yoast/steps' ], ];
[No CFG could be retrieved]
Yoast Recipe block template. Yoast Seo Block.
The two functions in line 9 should also be preceded by `\`.
@@ -1373,13 +1373,13 @@ namespace ProtoCore protected void Backpatch(int bp, int pc) { if (ProtoCore.DSASM.OpCode.JMP == codeBlock.instrStream.instrList[bp].opCode - && ProtoCore.DSASM.AddressType.LabelIndex == codeBlock.instrStream.instrList[bp].op1.optype) + && codeBlock.instrStream.instrList[bp].op1.IsLabelIndex()) { Validity.Assert(ProtoCore.DSASM.Constants.kInvalidIndex == codeBlock.instrStream.instrList[bp].op1.opdata); codeBlock.instrStream.instrList[bp].op1.opdata = pc; } else if (ProtoCore.DSASM.OpCode.CJMP == codeBlock.instrStream.instrList[bp].opCode - && ProtoCore.DSASM.AddressType.LabelIndex == codeBlock.instrStream.instrList[bp].op3.optype) + && codeBlock.instrStream.instrList[bp].op3.IsLabelIndex()) { Validity.Assert(ProtoCore.DSASM.Constants.kInvalidIndex == codeBlock.instrStream.instrList[bp].op3.opdata); codeBlock.instrStream.instrList[bp].op3.opdata = pc;
[CodeGen->[EmitJgz->[GetDebugObject],DFSGetSymbolList->[DFSGetSymbolList],EmitThrow->[AppendInstruction,SetEntry],EmitBounceIntrinsic->[SetEntry],EmitAlloc->[SetEntry],EmitPopString->[SetEntry],EmitPushReplicationGuide->[SetEntry],DFSGetSymbolList_Simple->[DFSGetSymbolList_Simple],EmitJlz->[GetDebugObject],EmitPushArrayIndex->[SetEntry],EmitPushType->[SetEntry],EmitPushForSymbol->[AppendInstruction,GetDebugObject,SetEntry],EmitCallBaseCtor->[SetEntry],EmitPushForSymbolW->[AppendInstruction,SetEntry],EmitThisPointerNode->[EmitPush,EmitInstrConsole],EmitPopGuide->[SetEntry],EmitPushNull->[EmitPush,EmitInstrConsole],EmitCall->[GetDebugObject,SetEntry],EmitPopList->[GetDebugObject],EmitMov->[AppendInstruction,SetEntry],EmitDynamicNode->[EmitPush,EmitInstrConsole],BuildSSADependency->[DFSGetSymbolList],EmitBinary->[AppendInstruction,GetDebugObject,SetEntry],EmitCharNode->[EmitPush,EmitInstrConsole],EmitJz->[GetDebugObject],EmitCJmp->[GetDebugObject,EmitInstrConsole],EmitPopArray->[SetEntry],EmitPushForSymbolGuide->[AppendInstruction,SetEntry],EmitPushm->[AppendInstruction,GetDebugObject,SetEntry],EmitUnary->[AppendInstruction,GetDebugObject,SetEntry],EmitNullNode->[EmitPush,EmitInstrConsole,EmitPushG],EmitJleq->[GetDebugObject],IsConstantExpression->[IsConstantExpression],EmitPush->[SetEntry],EmitIdentifierListNode->[BuildSSADependency,EmitPushList,EmitInstrConsole,DfsEmitIdentList,EmitGetterSetterForIdentList,BuildRealDependencyForIdentList],EmitPushBlockID->[EmitInstrConsole],EmitExprListNode->[EmitReplicationGuides,EmitPopArray,EmitInstrConsole,EmitPushReplicationGuide,EmitPushArrayIndex,DfsEmitArrayIndexHeap],EmitFunctionPointer->[EmitPush,EmitInstrConsole,EmitPushVarData],EmitPop->[GetDebugObject],EmitJmp->[GetDebugObject,EmitInstrConsole],EmitPushG->[SetEntry],EmitReplicationGuides->[EmitPush,EmitInstrConsole],DfsEmitIdentList->[DfsEmitIdentList,EmitInstrConsole],EmitPopForSymbol->[GetDebugObject],EmitBinaryOperation->[EmitPop,EmitPush,EmitInstrConsole,EmitBinary],EmitIntNode->[EmitPush,EmitInstrConsole,EmitPushG],EmitPopm->[AppendInstruction,GetDebugObject,SetEntry],BuildRealDependencyForIdentList->[DFSGetSymbolList_Simple,BuildIdentifierList],EmitDynamicCall->[GetDebugObject,SetEntry],EmitDefaultArgNode->[EmitPush,EmitInstrConsole],EmitCallSetter->[GetDebugObject,SetEntry],EmitPopBlockID->[EmitInstrConsole],EmitPushVarData->[EmitPush,EmitPushType,EmitInstrConsole],Backpatch->[Backpatch],EmitStringNode->[EmitPush,EmitInstrConsole,EmitPopString],EmitDepX->[AppendInstruction,EmitInstrConsole],EmitDoubleNode->[EmitPush,EmitInstrConsole,EmitPushG],EmitJle->[GetDebugObject],EmitReturnStatement->[EmitReturnToRegister],EmitBooleanNode->[EmitPush,EmitInstrConsole,EmitPushG],EmitReturnToRegister->[EmitPop,EmitReturn,EmitInstrConsole,EmitRetb],EmitJgeq->[GetDebugObject]]]
Backpatch method.
Can you remind me why we decided to use methods here, not properties? I seem to remember discussing this, but I don't remember the outcome
@@ -63,6 +63,9 @@ public abstract class MessageProducerSupport extends AbstractEndpoint implements @Override protected void onInit() { Assert.notNull(this.outputChannel, "outputChannel is required"); + if (this.getBeanFactory() != null) { + this.messagingTemplate.setBeanFactory(this.getBeanFactory()); + } } /**
[MessageProducerSupport->[setSendTimeout->[setSendTimeout]]]
On init.
We don't really need it here because `MPS` only send()s messages. But I guess it doesn't hurt.
@@ -190,6 +190,8 @@ OSSL_PROVIDER *ossl_provider_new(OPENSSL_CTX *libctx, const char *name, if (prov == NULL) CRYPTOerr(CRYPTO_F_OSSL_PROVIDER_NEW, ERR_R_MALLOC_FAILURE); + else + prov->store = store; /* * At this point, the provider is only partially "loaded". To be
[No CFG could be retrieved]
Returns a provider object that can be used to provide the given name. Frees the given OSSL_PROVIDER object.
Shouldn't this happen inside the lock since prov is already pushed into the store?
@@ -49,9 +49,9 @@ namespace System.Data.OleDb public override object? ExecuteScalar() { throw null; } public override void Prepare() { } public void ResetCommandTimeout() { } - System.Data.IDataReader System.Data.IDbCommand.ExecuteReader() { throw null; } - System.Data.IDataReader System.Data.IDbCommand.ExecuteReader(System.Data.CommandBehavior behavior) { throw null; } - object System.ICloneable.Clone() { throw null; } + System.Data.IDataReader? System.Data.IDbCommand.ExecuteReader() { throw null; } + System.Data.IDataReader? System.Data.IDbCommand.ExecuteReader(System.Data.CommandBehavior behavior) { throw null; } + object? System.ICloneable.Clone() { throw null; } } public sealed partial class OleDbCommandBuilder : System.Data.Common.DbCommandBuilder {
[OleDbParameter->[All],OleDbConnection->[Hidden,All],OleDbException->[Content],OleDbParameterCollection->[Hidden],OleDbCommand->[All,Both,Content,Hidden,Text],OleDbConnectionStringBuilder->[All]]
Method to prepare the command.
Was this just a missed auto-gen the ref code from a previous commit?
@@ -623,12 +623,14 @@ def new_document(request): 'show_toc': True }) + allow_add_attachment = Attachment.objects.allow_add_attachment_by(request.user) return jingo.render(request, 'wiki/new_document.html', {'is_template': is_template, 'parent_slug': parent_slug, 'parent_id': initial_parent_id, 'document_form': doc_form, 'revision_form': rev_form, + 'allow_add_attachment': allow_add_attachment, 'attachment_form': AttachmentRevisionForm(), 'parent_path': parent_path})
[edit_document->[_join_slug,_split_slug,_format_attachment_obj],mindtouch_to_kuma_redirect->[mindtouch_namespace_redirect],new_attachment->[_format_attachment_obj],_version_groups->[split_slug],translate->[_join_slug,_split_slug],document->[set_common_headers,get_seo_description,_split_slug,_format_attachment_obj],_version_groups]
Create a new wiki document. Adds a new to the document.
nit: our view methods seem to accumulate more ad-hoc context var's over time. is it possible to create a `@attachment_permission` decorator that will automatically add this?
@@ -627,6 +627,11 @@ class TypeChecker(NodeVisitor[None]): if has_any_from_unimported_type(arg_type): prefix = "Argument {} to \"{}\"".format(idx + 1, fdef.name()) self.msg.unimported_type_becomes_any(prefix, arg_type, fdef) + if ('explicit' in self.options.disallow_any and + not self.is_typeshed_stub and + fdef.type and + has_explicit_any(fdef.type)): + self.msg.explicit_any(fdef) if name in nodes.reverse_op_method_set: self.check_reverse_op_method(item, typ, name) elif name in ('__getattr__', '__getattribute__'):
[TypeChecker->[analyze_async_iterable_item_type->[accept],visit_try_without_finally->[check_assignment,accept],visit_class_def->[accept],iterable_item_type->[lookup_typeinfo],visit_for_stmt->[accept_loop],visit_operator_assignment_stmt->[check_assignment,accept],check_return_stmt->[get_generator_return_type,accept],visit_del_stmt->[accept],check_multi_assignment->[check_assignment,accept],check_async_with_item->[check_assignment,accept],visit_decorator->[accept],should_suppress_optional_error->[contains_none],visit_assert_stmt->[accept],accept->[accept],set_inference_error_fallback_type->[set_inferred_type],check_multi_assignment_from_tuple->[check_assignment,accept,check_rvalue_count_in_assignment],check_override->[erase_override],visit_try_stmt->[accept],check_member_assignment->[check_simple_assignment],warn->[warn],visit_overloaded_func_def->[accept],check_except_handler_test->[accept],visit_with_stmt->[accept],try_infer_partial_type_from_indexed_assignment->[accept],function_type->[named_type,function_type],visit_print_stmt->[accept],visit_assignment_stmt->[accept],fail->[fail],check_with_item->[check_assignment,accept],visit_block->[accept],analyze_index_variables->[check_assignment],type_type->[named_type],check_func_def->[accept,get_generator_receive_type,is_implicit_any,get_generator_return_type,get_generator_yield_type,is_async_generator_return_type,is_generator_return_type],type_check_raise->[accept],contains_none->[contains_none],check_overlapping_op_methods->[check_overlapping_op_methods],check_lvalue->[check_lvalue,accept],get_generator_receive_type->[is_generator_return_type,is_async_generator_return_type],visit_while_stmt->[accept_loop],analyze_iterable_item_type->[accept],check_compatibility_super->[accept],accept_loop->[accept],check_assignment_to_multiple_lvalues->[check_assignment],visit_expression_stmt->[accept],lookup_qualified->[lookup],note->[note],check_multi_assignment_from_iterable->[type_is_iterable,check_assignment],get_generator_return_type->[is_generator_return_type],check_indexed_assignment->[accept],check_simple_assignment->[accept],get_generator_yield_type->[is_generator_return_type,is_async_generator_return_type],visit_if_stmt->[accept],find_isinstance_check->[find_isinstance_check],lvalue_type_for_inference->[append_types_for_inference],check_assignment->[accept],str_type->[named_type],get_types_from_except_handler->[get_types_from_except_handler]],flatten->[flatten],partition_by_callable->[partition_by_callable],get_isinstance_type->[flatten_types],conditional_callable_type_map->[partition_by_callable],find_isinstance_check->[is_true_literal,remove_optional,or_conditional_maps,conditional_callable_type_map,find_isinstance_check,is_optional,conditional_type_map,is_false_literal,and_conditional_maps,is_literal_none],flatten_types->[flatten_types],expand_func->[accept],is_valid_inferred_type_component->[is_valid_inferred_type_component],Scope->[active_self_type->[active_class]]]
Type check a function definition. Check that return type is not imported from any function. Add a to the function definition. Check if the type of self is missing for a non - static method or a class method Check if a function return a .
This same piece of code gets repeated almost identically many times. Can you refactor this into a function or a few functions?
@@ -162,7 +162,7 @@ class DistributedAdam(DistributedOptimizerImplBase): opt_info["fleet_desc"] = ps_param opt_info["worker_skipped_ops"] = worker_skipped_ops opt_info["use_cvm"] = strategy.get("use_cvm", False) - + opt_info["dump_slot"] = True for loss in losses: loss.block.program._fleet_opt = opt_info
[DistributedAdam->[__init__->[super],_minimize->[add_data_norm_table,add_dense_table,DownpourServer,PSParameter,Merge,get_desc,extend,find_distributed_lookup_table_inputs,i,program_configs,range,find_distributed_lookup_table_outputs,append,add_sparse_table,DownpourWorker,len,sorted,read,open,id,find_distributed_lookup_table,append_backward,get,str,CopyFrom]]]
Minimizes the given by using a distributed optimizer. Adds the necessary tables to the network. Adds a dense table to the model and outputs the necessary parameters.
why dump_slot is default true and user can not set it from strategy ? what if a user doesn't need dump_slot
@@ -301,7 +301,8 @@ func (s *APIServer) RegisterContainersHandlers(r *mux.Router) error { // - in: query // name: detachKeys // type: string - // description: needs description + // description: "Override the key sequence for detaching a container. Format is a single character [a-Z] or ctrl-<value> where <value> is one of: a-z, @, ^, [, , or _." + // default: ctrl-p,ctrl-q // produces: // - application/json // responses:
[RegisterContainersHandlers->[HandleFunc,Methods]]
RegisterContainersHandlers registers the http handlers for the containers API swagger - specification This function returns a description of a container. swagger - operation GET - containers swagger - specification - specification - description This function allows you to start a container with a timeout before sending kill signal to the container description - This method allows you to stop a container and list processes running inside a container.
The value of description should not be quoted.
@@ -91,8 +91,15 @@ dfuse_cb_create(fuse_req_t req, struct dfuse_inode_entry *parent, oh->doh_dfs = parent->ie_dfs->dfs_ns; oh->doh_ie = ie; - if (fi->direct_io) - fi_out.direct_io = 1; + if (!fs_handle->dpi_info->di_no_direct_io) { + if (parent->ie_dfs->dfs_attr_timeout == 0) { + fi_out.direct_io = 1; + } else { + if (fi->flags & O_DIRECT) + fi_out.direct_io = 1; + } + } + fi_out.fh = (uint64_t)oh; strncpy(ie->ie_name, name, NAME_MAX);
[dfuse_cb_create->[D_GOTO,dfs_ostat,LOG_FLAGS,dfs_dup,DFUSE_REPLY_ERR_RAW,DFUSE_TRA_UP,D_FREE,dfuse_reply_entry,DFUSE_TRA_INFO,LOG_MODES,DFUSE_TRA_DEBUG,dfs_release,dfs_open,fuse_req_userdata,strncpy,atomic_fetch_add,D_ALLOC_PTR]]
Private function to handle the creation of a node node. open a fuse handle for a file return the new inode data and keep the parent ref.
Just a minor nit. This would read nicer if it were positive rather than a double negative e.g. if (fs_handle->dpi_info->di_direct_io) so it would require changing the option parsing above
@@ -42,11 +42,15 @@ class Jetpack_Autoupdate { add_action( 'shutdown', array( $this, 'log_results' ) ); } - // Anytime WordPress saves update data, we'll want to update our Jetpack option as well. if ( is_main_site() ) { + // Anytime WordPress saves update data, we'll want to update our Jetpack option as well. add_action( 'set_site_transient_update_plugins', array( $this, 'save_update_data' ) ); add_action( 'set_site_transient_update_themes', array( $this, 'save_update_data' ) ); add_action( 'set_site_transient_update_core', array( $this, 'save_update_data' ) ); + // Anytime a connection to jetpack is made, sync the update data + add_action( 'update_option_jetpack_blog_token', array( $this, 'save_update_data' ) ); + // Anytime the Jetpack Version changes, sync the the update data + add_action( 'updating_jetpack_version', array( $this, 'save_update_data' ) ); } }
[Jetpack_Autoupdate->[autoupdate_theme->[expect],log_results->[log,do_stats,log_items],autoupdate_plugin->[expect],log_items->[get_error_message,get_successful_updates,query,stat]]]
This method is called by the Jetpack plugin when a user is opted in.
Good idea :) but this won't work as `blog_token` is stored inside the option `jetpack_private_options`. I would add a new hook `do_action( 'jetpack_site_registered' )` to the `register` method inside `class.jetpack.php`. It could be useful in the future!
@@ -68,6 +68,8 @@ public class StickyTest { invokers.add(invoker2); clusterinvoker = new StickyClusterInvoker<StickyTest>(dic); + + ExtensionLoader.resetExtensionLoader(LoadBalance.class); } @Test
[StickyTest->[testMethodStickyNoCheck->[testSticky,assertTrue,println],StickyClusterInvoker->[doInvoke->[select]],testStickyForceCheck->[testSticky,assertTrue],testSticky->[getSelectedInvoker,setPort,willReturn,invoke,setMethodName,addParameter,assertEquals,valueOf],testMethodStickyForceCheck->[testSticky,assertTrue],testMethodsSticky->[testSticky,assertTrue],setUp->[add,mock,RpcInvocation,willReturn],testStickyNoCheck->[testSticky,assertTrue,println],mock,RpcResult,valueOf]]
This method is called before any other test.
I am not convinced either. Why we need to reset extension loader here?
@@ -363,7 +363,11 @@ public class ReferenceCountedOpenSslEngine extends SSLEngine implements Referenc // setMode may impact the overhead. calculateMaxWrapOverhead(); } catch (Throwable cause) { - SSL.freeSSL(ssl); + // Call shutdown so we are sure we correctly release all native memory and also guard against the + // case when shutdown() will be called by the finalizer again. If we would call SSL.free(...) directly + // the finalizer may end up calling it again as we would miss to update the DESTROYED_UPDATER. + shutdown(); + PlatformDependent.throwException(cause); } }
[ReferenceCountedOpenSslEngine->[getSSLParameters->[getSSLParameters],needPendingStatus->[isOutboundDone,isInboundDone],retain->[retain],sslReadErrorResult->[shutdownWithError],getOcspResponse->[getOcspResponse],wrap->[release,writePlaintextData,isBytesAvailableEnoughForWrap,wrap,resetSingleSrcBuffer,singleSrcBuffer],closeInbound->[shutdown],newResult->[shutdown,newResult],writeEncryptedData->[release],beginHandshake->[calculateMaxWrapOverhead],checkSniHostnameMatch->[checkSniHostnameMatch],calculateMaxWrapOverhead->[maxEncryptedPacketLength0],rejectRemoteInitiatedRenegotiation->[shutdown],release->[release],setSSLParameters->[isEmpty,setVerify,setSSLParameters],newResultMayFinishHandshake->[newResult],setVerify->[setVerify],handshake->[shutdown,shutdownWithError,pendingStatus,checkEngineClosed],closeOutbound->[shutdown],unwrap->[readPlaintextData,release,sslPending0,newResultMayFinishHandshake,singleDstBuffer,newResult,unwrap,resetSingleDstBuffer,resetSingleSrcBuffer,writeEncryptedData,singleSrcBuffer],doSSLShutdown->[shutdown],getHandshakeStatus->[pendingStatus],readPlaintextData->[release],shutdownWithError->[shutdownWithError,shutdown],setOcspResponse->[setOcspResponse],toJavaCipherSuitePrefix->[isEmpty],isEndPointVerificationEnabled->[isEmpty],writePlaintextData->[release],refCnt->[refCnt],sslPending0->[sslPending],mayFinishHandshake->[handshake],touch->[touch],DefaultOpenSslSession->[handshakeFinished->[isDestroyed,calculateMaxWrapOverhead,toJavaCipherSuite],getPeerCertificateChain->[isEmpty],initPeerCerts->[isEmpty],getPeerPort->[getPeerPort],invalidate->[isDestroyed],getCreationTime->[isDestroyed],getProtocol->[isDestroyed],getPeerPrincipal->[getPeerCertificates],getLastAccessedTime->[getCreationTime],notifyUnbound->[newSSLSessionBindingEvent],selectApplicationProtocol->[selectApplicationProtocol],getPacketBufferSize->[maxEncryptedPacketLength],getValueNames->[isEmpty],putValue->[newSSLSessionBindingEvent],isValid->[isDestroyed],getPeerCertificates->[isEmpty],getPeerHost->[getPeerHost]],setClientAuth->[setVerify]]]
Sets the OCSP response. Gets the OCSP response.
@normanmaurer I thought the reference counted openssl engine didn't use a finalizer?
@@ -433,7 +433,7 @@ class StorageTableClientTest(TableTestCase): tables = list(service.list_tables(raw_response_hook=callback)) self.assertIsInstance(tables, list) - # @pytest.mark.skip("pending") + @pytest.mark.skip("pending") @GlobalStorageAccountPreparer() def test_user_agent_custom(self, resource_group, location, storage_account, storage_account_key): custom_app = "TestApp/v1.0"
[StorageTableClientTest->[test_create_service_protocol->[validate_standard_account_endpoints],test_create_service_with_connection_string->[validate_standard_account_endpoints],test_create_service_with_socket_timeout->[validate_standard_account_endpoints],test_create_service_with_key->[validate_standard_account_endpoints],test_create_service_with_connection_string_key->[validate_standard_account_endpoints]]]
Test user agent for default storage account.
Is there a reason this one is skipped?
@@ -231,11 +231,11 @@ module.exports = function (grunt) { ngtemplates: { dist: { cwd: 'src/main/webapp', - src: ['scripts/app/**/*.html', 'scripts/components/**/*.html',], + src: ['app/app/**/*.html', 'app/components/**/*.html',], dest: '.tmp/templates/templates.js', options: { module: '<%= angularAppName%>', - usemin: 'scripts/app.js', + usemin: 'app/app.js', htmlmin: '<%%= htmlmin.dist.options %>' } }
[No CFG could be retrieved]
Yeoman s config. js JS app. js.
Shouldn't one app be removed from app/app
@@ -2153,6 +2153,7 @@ static void _manage_editor_save(dt_lib_module_t *self) // update the preset in the database dt_lib_presets_update(d->edit_preset, self->plugin_name, self->version(), newname, "", params, strlen(params)); g_free(params); + d->edit_preset = g_strdup(newname); // if name has changed, we need to reflect the change on the presets list too _manage_preset_update_list(self);
[No CFG could be retrieved]
This function is called when the user enters the group editor. It is called when the remove a module from the modules list.
don't we need to free d->edit_preset first ?