patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -213,9 +213,15 @@ static void build_SYS_str_reasons(void) str->error = ERR_PACK(ERR_LIB_SYS, 0, i); if (str->string == NULL) { - char (*dest)[LEN_SYS_STR_REASON] = &(strerror_tab[i - 1]); - if (openssl_strerror_r(i, *dest, sizeof(*dest))) - str->string = *dest; + if (openssl_strerror_r(i, cur, sizeof(strerror_pool) - cnt)) { + size_t l = strlen(cur) + 1; + + str->string = cur; + cnt += l; + if (cnt > sizeof(strerror_pool)) + cnt = sizeof(strerror_pool); + cur += l; + } } if (str->string == NULL) str->string = "unknown";
[No CFG could be retrieved]
This function initializes the array of strings that are returned by the system. region System. str_reasons.
Hmmm. Actually, on second thoughts, if strerror_r is well behaved, this should never happen, right?
@@ -35,12 +35,13 @@ public class DefaultDataSourceConfigTest { @Test public void testDefaultDataSourceInjection() throws SQLException { testDataSource(defaultDataSource, "username-default", 3, 13, 7, Duration.ofSeconds(53), Duration.ofSeconds(54), - Duration.ofSeconds(55), Duration.ofSeconds(56), Duration.ofSeconds(57)); + Duration.ofSeconds(55), Duration.ofSeconds(56), Duration.ofSeconds(57), + "create schema if not exists schema_default"); } private static void testDataSource(AgroalDataSource dataSource, String username, int minSize, int maxSize, int initialSize, Duration backgroundValidationInterval, Duration acquisitionTimeout, Duration leakDetectionInterval, - Duration idleRemovalInterval, Duration maxLifetime) throws SQLException { + Duration idleRemovalInterval, Duration maxLifetime, String initialSQL) throws SQLException { AgroalConnectionPoolConfiguration configuration = dataSource.getConfiguration().connectionPoolConfiguration(); AgroalConnectionFactoryConfiguration agroalConnectionFactoryConfiguration = configuration .connectionFactoryConfiguration();
[DefaultDataSourceConfigTest->[testDataSource->[validationTimeout,assertTrue,maxSize,getName,jdbcUrl,reapTimeout,connectionPoolConfiguration,initialSize,metricsEnabled,leakTimeout,maxLifetime,connectionFactoryConfiguration,minSize,jdbcTransactionIsolation,getConnection,acquisitionTimeout,assertEquals],testDefaultDataSourceInjection->[testDataSource,ofSeconds],addAsResource,setArchiveProducer]]
Test that the default data source injection is performed.
It's very minor but let's be consistent and call the variable `initialSql`.
@@ -216,6 +216,10 @@ class Builder(HasProps): is (Column, Ascending). """) + source = Instance(ColumnDataSource) + + hover = Either(List(Tuple(String, String)), List(String), Bool, default=None) + def __init__(self, *args, **kws): """Common arguments to be used by all the inherited classes.
[XYBuilder->[set_ranges->[get_dim_extents]],Builder->[create->[set_ranges,setup,process_data,yield_renderers]]]
Common arguments to be used by all the inherited classes. Add missing input to the chart.
I'm afraid calling it hover may create confusion with the `HoverTool` itself. What about calling it `tooltips` or `hover_tooltips` to not get confused?
@@ -1,9 +1,11 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System; using System.Collections; using System.Diagnostics; using System.IO; +using System.Linq; using System.Text; using Xunit;
[DescriptionNameTests->[VerifyOSDescription->[Same,OSDescription,NotNull],VerifyOSXName->[OrdinalIgnoreCase,OSX,OSDescription,Contains],VerifyNetBSDName->[OrdinalIgnoreCase,NetBSD,OSDescription,Contains],VerifyRuntimeNameOnNetCoreApp->[Netcoreapp,FrameworkDescription,StartsWith,Same,True],VerifyFreeBSDName->[OrdinalIgnoreCase,Contains,FreeBSD,OSDescription],DumpRuntimeInformationToConsole->[PeakVirtualMemorySize64,Value,NonpagedSystemMemorySize,PriorityClass,GetEnvironmentVariables,Message,TotalProcessorTime,UserProcessorTime,WorkingSet,PrivilegedProcessorTime,MainWindowHandle,PrivateMemorySize64,WaitForExit,WorkingSet64,Append,IsInAppContainer,RuntimeIdentifier,VirtualMemorySize,PagedSystemMemorySize64,MainModule,Responding,Start,Location,s_cgroupMemoryLimitPath,PeakWorkingSet,PrivateMemorySize,ProcessorAffinity,PeakWorkingSet64,StartTime,GetValue,Key,PeakPagedMemorySize,MinWorkingSet,DriveFormat,nameof,Browser,NonpagedSystemMemorySize64,VirtualMemorySize64,LibcVersion,Contains,GetTempPath,GetDistroVersionString,s_cgroupVersion,Id,GetCurrentProcess,ToString,MaxWorkingSet,PeakPagedMemorySize64,AppendLine,MachineName,PriorityBoostEnabled,SessionId,PagedMemorySize,LibcRelease,Version,PagedSystemMemorySize,WriteLine,PagedMemorySize64,PeakVirtualMemorySize,MainWindowTitle,BasePriority,CurrentDirectory,GetDirectoryName,HandleCount,ProcessName,Trim],VerifyLinuxName->[OrdinalIgnoreCase,Contains,Linux,OSDescription],VerifyWindowsDescriptionDoesNotContainTrailingWhitespace->[False,Windows,EndsWith],VerifyWindowsName->[OrdinalIgnoreCase,Windows,Contains,OSDescription]]]
A class that displays the name of the object and its runtime information. Shows information about the current Cgroups.
Nits: static readonly and with an s_ prefix
@@ -100,12 +100,13 @@ func (v *VolumeStore) VolumeCreate(op trace.Operation, ID string, store *url.URL // Get the path to the disk in datastore uri format volDiskDsURL := v.volDiskDsURL(ID) + config := disk.NewPersistentDisk(volDiskDsURL).WithCapacity(int64(capacityKB)) // Create the disk - vmdisk, err := v.dm.CreateAndAttach(op, volDiskDsURL, nil, int64(capacityKB), os.O_RDWR, disk.Ext4) + vmdisk, err := v.dm.CreateAndAttach(op, config) if err != nil { return nil, err } - defer v.dm.Detach(op, vmdisk) + defer v.dm.Detach(op, vmdisk.VirtualDiskConfig) vol, err := storage.NewVolume(store, ID, info, vmdisk, executor.CopyNew) if err != nil { return nil, err
[volMetadataDirPath->[volDirPath],VolumesList->[volDiskDsURL,volMetadataDirPath],volDiskDsURL->[volDirPath],VolumeCreate->[volDirPath,volDiskDsURL,volMetadataDirPath],VolumeDestroy->[volDirPath]]
VolumeCreate creates a new volume with the given ID.
expecting to see a `.WithCapacity()` call for scratch image creation as well.
@@ -114,8 +114,8 @@ public class GobblinClusterConfigurationKeys { public static final String STOP_TIMEOUT_SECONDS = GOBBLIN_CLUSTER_PREFIX + "stopTimeoutSeconds"; public static final long DEFAULT_STOP_TIMEOUT_SECONDS = 60; - public static final String HELIX_JOB_QUEUE_DELETE_TIMEOUT_SECONDS = GOBBLIN_CLUSTER_PREFIX + "jobQueueDeleteTimeoutSeconds"; - public static final long DEFAULT_HELIX_JOB_QUEUE_DELETE_TIMEOUT_SECONDS = 300; + public static final String HELIX_WORKFLOW_EXPIRY_TIME_SECONDS = GOBBLIN_CLUSTER_PREFIX + "job.expiry.seconds"; + public static final long DEFAULT_HELIX_WORKFLOW_EXPIRY_TIME_SECONDS = 6 * 60 * 60; public static final String TASK_RUNNER_SUITE_BUILDER = GOBBLIN_CLUSTER_PREFIX + "taskRunnerSuite.builder"; }
[No CFG could be retrieved]
This class defines the default values for the parameters that are used by the Gobblin.
Should this be `workflow.expirySeconds` to match the name?
@@ -61,7 +61,7 @@ class IdTokenBuilder def expires ttl = Pii::SessionStore.new(identity.rails_session_id).ttl - Time.zone.now.to_i + ttl + now.to_i + ttl end def hash_token(token)
[IdTokenBuilder->[jwt_payload->[merge],hash_token->[urlsafe_encode64,byteslice],expires->[to_i,ttl],timestamp_claims->[to_i],id_token->[encode],id_token_claims->[hash_token,access_token,urlsafe_base64,nonce,service_provider],acr->[raise,ial],attr_reader,freeze,include,url_helpers],require]
Returns a unique token for the current session.
Fun fact, this is my code from 2017....so I only have myself to blame
@@ -1268,6 +1268,7 @@ public class TestOzoneManagerHA { // Stop leader OM, and then validate list parts. stopLeaderOM(); + Thread.sleep(NODE_FAILURE_TIMEOUT * 2); validateListParts(ozoneBucket, keyName, uploadID, partsMap);
[TestOzoneManagerHA->[testRemovePrefixAcl->[setupBucket],testFileOperationsWithNonRecursive->[setupBucket],initiateMultipartUpload->[initiateMultipartUpload],testSetKeyAcl->[setupBucket],createKey->[createKey],testListParts->[setupBucket,initiateMultipartUpload],shutdown->[shutdown],testRemoveKeyAcl->[setupBucket],testOMRestart->[createKey],testAllBucketOperations->[createAndCheckVolume],testOMRetryProxy->[createVolumeTest],testSetAcl->[compareAcls],testAddKeyAcl->[setupBucket],testRemoveBucketAcl->[setupBucket],testSetBucketAcl->[setupBucket],testAddPrefixAcl->[setupBucket],testMultipartUploadWithOneOmNodeDown->[setupBucket],testOMProxyProviderFailoverToCurrentLeader->[createVolumeTest],testFileOperationsWithRecursive->[setupBucket],testOMProxyProviderFailoverOnConnectionFailure->[createVolumeTest],testAddBucketAcl->[setupBucket],testAddAcl->[containsAcl],testMultipartUpload->[setupBucket],testSetPrefixAcl->[setupBucket],testAllVolumeOperations->[createAndCheckVolume]]]
Test list parts.
What is this sleep for?
@@ -75,11 +75,6 @@ func addControllerRoleToSA(saNamespace, saName string, role rbac.ClusterRole) { } } - if role.Annotations == nil { - role.Annotations = map[string]string{} - } - role.Annotations[roleSystemOnly] = roleIsSystemOnly - controllerRoles = append(controllerRoles, role) controllerRoleBindings = append(controllerRoleBindings,
[NewClusterBinding,RuleOrDie,BindingOrDie,SAs,NewRule,Fatalf,Groups,HasPrefix,Resources]
addControllerRole adds a new controller role to the SA. Build rules for all resources that require access.
I moved all of this logic to a single place in `pkg/cmd/server/bootstrappolicy/policy.go` / `GetBootstrapClusterRoles`
@@ -106,6 +106,7 @@ func (app *ChainlinkApplication) AddJob(job models.JobSpec) error { } app.Scheduler.AddJob(job) + app.JobMetrics.Add(job) return app.JobSubscriber.AddJob(job, app.HeadTracker.LastRecord()) }
[Stop->[Stop],Start->[Start],RemoveAdapter->[GetStore],AddAdapter->[GetStore],AddJob->[AddJob]]
AddJob adds a job to the application.
Should we have error handling?
@@ -1782,6 +1782,11 @@ int X509_cmp_current_time(const ASN1_TIME *ctm) return X509_cmp_time(ctm, NULL); } +static int ascii_isdigit(char inchar) { + if (inchar > 0x2F && inchar < 0x3A) + return 1; + return 0; +} int X509_cmp_time(const ASN1_TIME *ctm, time_t *cmp_time) { static const size_t utctime_length = sizeof("YYMMDDHHMMSSZ") - 1;
[int->[x509_check_cert_time,X509_get_pubkey_parameters,X509_verify_cert,STACK_OF],X509_CRL_diff->[STACK_OF]]
Compares current time with given time_t. ASN. 1 - extended extended - time - comparison.
Since this is the second time you've defined this, I suggest moving it to ctype.c instead.
@@ -179,18 +179,6 @@ describe Idv::ConfirmationsController do ) end end - - context 'ial2 step indicator disabled' do - before do - allow(IdentityConfig.store).to receive(:ial2_step_indicator_enabled).and_return(false) - end - - it 'assigns empty step indicator steps' do - get :show - - expect(assigns(:step_indicator_steps)).to eq([]) - end - end end end
[stub_idv_session->[profile_id,to,new,and_return,stub_sign_in,pii_attributes,save_profile,personal_key,user_session,applicant,id,pii,resolution_successful],index->[render],create,phone,let,to_not,describe,personal_key,it,session,to,profile_id,create_profile_from_applicant_with_password,user_phone_confirmation,vendor_phone_confirmation,before,with,t,require,profile,include,have_actions,before_action,user_session,patch,controller,applicant,address_verification_mechanism,redirect_to,context,hash_including,complete_session,get,eq,and_return]
IDV events related to a user adds patches to update the user session state when no sp present.
Should we flatten (unwrap) the above context "ial2 step indicator enabled" ?
@@ -370,8 +370,14 @@ def provides(*specs, **kwargs): can use the providing package to satisfy the dependency. """ def _execute_provides(pkg): - spec_string = kwargs.get('when', pkg.name) - provider_spec = spack.spec.parse_anonymous_spec(spec_string, pkg.name) + when = kwargs.get('when') + when_spec = make_when_spec(when) + if not when_spec: + return + + # ``when`` specs for ``provides()`` need a name, as they are used + # to build the ProviderIndex. + when_spec.name = pkg.name for string in specs: for provided_spec in spack.spec.parse(string):
[extends->[_execute_extends->[_depends_on]],DirectiveMeta->[directive->[_decorator->[_wrapper->[remove_directives->[remove_directives],remove_directives]]]],depends_on->[_execute_depends_on->[_depends_on]],variant->[_raise_reserved_name->[format_error],_raise_argument_error->[format_error],_raise_default_not_set->[format_error]],directive]
Provides a virtual dependency to a package.
> when specs for provides() need a name... -> > "when" specs for provides() need a name Also, "buld" -> "build"
@@ -69,9 +69,17 @@ class ArchiveSpikeService(BaseService): def on_update(self, updates, original): updates[ITEM_OPERATION] = ITEM_SPIKE + self._validate_item(original) self._validate_take(original) self._update_rewrite(original) + def _validate_item(self, original): + # only allow an item to be spiked if it is not a member of a package + if original[ITEM_TYPE] != CONTENT_TYPE.COMPOSITE and original.get(LINKED_IN_PACKAGES, None) \ + and len([x for x in original.get(LINKED_IN_PACKAGES, []) if x.get(PACKAGE_TYPE, '') == '']): + raise ValidationError(['This item is in a package' + + ' it needs to be removed before the item can be spiked']) + def _validate_take(self, original): takes_service = TakesPackageService() if not takes_service.is_last_takes_package_item(original):
[ArchiveSpikeService->[update->[update]],ArchiveUnspikeService->[update->[get_unspike_updates,update]]]
Called when an item is updated.
To be consistent across its better to throw `SuperdeskApiError.badRequestError()` instead of `ValidationError`.
@@ -131,6 +131,10 @@ class Version(OnChangeMixin, ModelBase): git_hash = models.CharField(max_length=40, blank=True) source_git_hash = models.CharField(max_length=40, blank=True) + recommendation_status = models.PositiveSmallIntegerField( + choices=amo.RECOMMENDATION_VERSION_STATUS_CHOICES.items(), + default=amo.STATUS_NULL) + # The order of those managers is very important: please read the lengthy # comment above the Addon managers declaration/instantiation. unfiltered = VersionManager(include_deleted=True)
[update_status->[update_status],Version->[was_auto_approved->[is_public],is_public->[is_public],transformer->[rollup,_compat_map],inherit_nomination->[reset_nomination_time],from_upload->[VersionCreateError,from_upload],transformer_activity->[rollup],delete->[save],VersionManager],inherit_nomination->[inherit_nomination],License->[LicenseManager],VersionManager->[__init__->[__init__]],ApplicationsVersions->[__str__->[get_application_display,is_compatible_app]]]
Creates a new version object. Return a string representation of the version number.
Do we really need that extra field ? We're going to have to update `status` on the `DiscoveryItem` anyway when a version is approved, right ?
@@ -57,7 +57,7 @@ def setup_args(parser=None): train.add_argument('-sval', '--save-after-valid', type='bool', default=False, help='Saves the model to model_file.checkpoint after ' - 'every validation (default True).') + 'every validation (default False).') train.add_argument('-vme', '--validation-max-exs', type=int, default=-1, help='max examples to use during validation (default '
[TrainLoop->[validate->[run_eval,save_best_valid],train->[run_eval,validate,log]],TrainLoop]
Setup the training loop arguments. Parse command line options for training agent.
You can also write `%(default)s` instead and argparse will insert the default.
@@ -56,9 +56,12 @@ type appEnv struct { func main() { // Copy the contents of /pach-bin/certs into /etc/ssl/certs + copyErr := false if err := filepath.Walk("/pach-bin/certs", func(inPath string, info os.FileInfo, err error) error { if err != nil { - return err // Don't try and fix any errors encountered by Walk() itself + log.Warnf("skipping \"%s\", could not stat path: %v", inPath, err) + copyErr = true + return nil // Don't try and fix any errors encountered by Walk() itself } if info.IsDir() { return nil // We'll just copy the children of any directories when we traverse them
[Dir,NewAPIServer,KeepAlive,Grant,Warnf,Close,WithLease,PipelineRcName,SetAuthToken,Mode,Copy,Walk,Put,New,Go,NewFromAddress,Ctx,Errorf,GetPipelineInfo,Wait,Main,RegisterWorkerServer,Join,ListenAndServe,Get,Rel,MkdirAll,Serve,Println,IsDir,WithTimeout,Sprintf,Background,Unmarshal,EtcdDialOptions,OpenFile]
returns the unique identifier of a given object in etcd. extract relative path to the file and write it to outRelPath.
Why don't you just return the error from `Walk` and then log it below. I think that would condense this code a lot, you'd only need 1 line to handle each error rather than 3 and you wouldn't need `copyErr`.
@@ -117,6 +117,7 @@ module ProtocolImporters name: desc_component[:source][:name], mol_weight: desc_component[:source][:mol_weight], details: { + catalog_number: desc_component[:source][:sku], link: desc_component[:source][:vendor][:link], linear_formula: desc_component[:source][:linfor] }
[StepComponents->[description->[get_component],name->[get_component]]]
Build a single component from a hash. desc_component - desc_component Hash.
We should also move this to details I believe and check for `zero?` before displaying.
@@ -117,9 +117,10 @@ def main(): time_e = time.perf_counter() time_forward += (time_e - time_s) * 1000 - mel = (mel + 4) / 8 - np.clip(mel, 0, 1, out=mel) - mel = np.transpose(mel) + if is_wavernn: + mel = (mel + 4) / 8 + np.clip(mel, 0, 1, out=mel) + mel = np.transpose(mel) mel = np.expand_dims(mel, axis=0) time_s = time.perf_counter()
[save_wav->[,writeframes,tobytes,setnchannels,setframerate,open,setsampwidth],main->[forward,tqdm,transpose,save_wav,enumerate,expand_dims,min,perf_counter,extend,ForwardTacotronIE,build_argparser,rstrip,append,print,len,format,clip,open,IECore,WaveRNNIE,array],build_argparser->[add_argument_group,ArgumentParser,add_argument],exit,main]
Main function of the n - node algorithm. missing - check if there is a wavernn in the list of resampled images.
This also seems like something that should be moved into the `WaveRNNIE` class.
@@ -223,7 +223,9 @@ def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline, raise ValueError('exclude has to be a list of channel names or ' '"bads"') - picks = list(set(picks).difference(exclude)) + picks = [pick for pick in picks if pick not in exclude] + if len(picks) != len(set(picks)): + picks = set(picks) picks = np.array(picks) types = np.array([channel_type(info, idx) for idx in picks])
[plot_evoked_joint->[plot_evoked_joint,_plot_evoked],_combine_grad->[pair_and_combine],_setup_styles->[convert_colors,_aux_setup_styles],plot_evoked_image->[_plot_evoked],_handle_spatial_colors->[_plot_legend],_plot_lines->[_rgb],plot_compare_evokeds->[_format_evokeds_colors,_combine_grad,_setup_styles,_plot_legend,_check_loc_legal,plot_compare_evokeds,_truncate_yaxis],_plot_evoked_white->[whitened_gfp],plot_evoked->[_plot_evoked]]
Plot a single - channel . Compute the n - th unit critical number for a given set of channels. Plots a series of time series and a series of time series.
why not always do this? And it should maybe be `picks = sorted(set(picks))`
@@ -160,6 +160,11 @@ export class FixedLayer { * Must be always called after DOMReady. */ setup() { + const isEmbedded = !!this.ampdoc.getParent(); + if (!isEmbedded) { + return; + } + const root = this.ampdoc.getRootNode(); const stylesheets = root.styleSheets; if (!stylesheets) {
[No CFG could be retrieved]
Initializes a lightbox object. Check if a node has attributes and if so determine if it is hidden.
Are you looking for "isIframed" here? If so, I don't think `!!ampdoc.getParent()` would give us this, since we do not consider iframe to be a case of a chain of ampdocs where runtime might be reused. If so, the signal we need is in the `viewer.isEmbedded()`. Notice that that signal is needed for both: iframed and in-a-webivew cases since both can offset header. Another recommendation here: to also switch on this mode in `localDev` mode to make local development/testing easier. **Edit**: Pulled points apart for readability.
@@ -744,6 +744,8 @@ def create_parser(plugins, args): help="Obtain and install certs using Apache") helpful.add(None, "--nginx", action="store_true", help="Obtain and install certs using Nginx") + helpful.add(None, "--standalone", action="store_true", + help='Obtain certs using a "standalone" webserver on port 443.') # positional arg shadows --domains, instead of appending, and # --domains is useful, because it can be stored in config #for subparser in parser_run, parser_auth, parser_install:
[_plugins_parsing->[add,add_group,add_plugin_args],_auth_from_domains->[_report_new_cert,_treat_as_renewal],revoke->[revoke,_determine_account],auth->[_auth_from_domains,_find_domains,_report_new_cert,choose_configurator_plugins,_init_le_client],_create_subparsers->[add_subparser,add,add_group,flag_default],setup_logging->[setup_log_file_handler],_treat_as_renewal->[_find_duplicative_certs],SilentParser->[add_argument->[add_argument]],HelpfulArgumentParser->[add_plugin_args->[add_group],add->[add_argument],__init__->[SilentParser,flag_default]],install->[_init_le_client,_find_domains,choose_configurator_plugins],run->[_init_le_client,_auth_from_domains,_find_domains,choose_configurator_plugins],create_parser->[HelpfulArgumentParser,flag_default,add,config_help,add_group],main->[setup_logging,create_parser],choose_configurator_plugins->[set_configurator,diagnose_configurator_problem],_paths_parser->[config_help,add,add_group,flag_default],_init_le_client->[_determine_account],rollback->[rollback],main]
Create a parser for the given options. Add flags for the given sequence number. Display the parser and the args of the last unknown option.
Nit: Standalone now supports both SimpleHTTP and DVSNI. Perhaps just change this to `Obtain certs using a "standalone" webserver.`?
@@ -71,6 +71,12 @@ func (m *MetricSet) Fetch() (common.MapStr, error) { return nil, err } + var cerr error + cerr = m.db.Close() + if cerr != nil { + return nil, errors.Wrap(cerr, "mysql-status close failed") + } + event := eventMapping(status) if m.Module().Config().Raw {
[Fetch->[HostData,loadStatus,Module,Wrap,Config,NewDB],loadStatus->[Scan,Close,Next,Query],DefaultMetricSet,MakeDebug,MustAddMetricSet,WithHostParser]
Fetch fetches the metrics from the mysql - status database.
Yes, I think I'd prefer to use `err` as usual in golang.
@@ -80,10 +80,12 @@ class Edge: if config.get('fee'): self.fee = config['fee'] else: - self.fee = self.exchange.get_fee(symbol=self.config['exchange']['pair_whitelist'][0]) + self.fee = self.exchange.get_fee(symbol=expand_pairlist( + self.config['exchange']['pair_whitelist'], list(self.exchange.markets))[0]) def calculate(self) -> bool: - pairs = self.config['exchange']['pair_whitelist'] + pairs = expand_pairlist(self.config['exchange']['pair_whitelist'], + list(self.exchange.markets)) heartbeat = self.edge_config.get('process_throttle_secs') if (self._last_updated > 0) and (
[Edge->[_find_trades_for_stoploss_range->[round,_detect_next_stop_or_sell_point],stake_amount->[round,min,info,stoploss,abs],_detect_next_stop_or_sell_point->[round,append,find_1st,len,float],_process_expectancy->[results,sort_values,get,sum,len,where,droplevel,PairInfo,groupby,itertuples,abs,count],calculate->[_find_trades_for_stoploss_range,refresh_data,load_data,ohlcvdata_to_dataframe,_process_expectancy,strftime,critical,_fill_calculable_fields,sort_values,get,len,advise_sell,DataFrame,items,info,utcnow,reset_index,get_timerange],adjust->[append,info,get,items,float],accepted_pairs->[float,get,items,append],stoploss->[warning],__init__->[arange,critical,parse_timerange,get,now,OperationalException,float,get_fee],_fill_calculable_fields->[int,result,total_seconds]],getLogger]
Initializes the object with the given configuration. Function to calculate a single object. Check if a record has a missing expectation.
Nice that you even thought about edge - i suspect i'd have forgotten this!
@@ -273,6 +273,7 @@ abstract class Module { * @return array|object|null */ public function get_next_chunk( $config, $status, $chunk_size ) { + // phpcs:disable WordPress.DB.PreparedSQL.InterpolatedNotPrepared global $wpdb; return $wpdb->get_col( <<<SQL
[Module->[get_next_chunk->[id_field,table_name],get_min_max_object_ids_for_batches->[id_field,table_name],total->[table_name],send_action->[send_action],send_full_sync_actions->[get_initial_last_sent,name,get_next_chunk],get_objects_by_id->[get_object_by_id]]]
Get next chunk of n - chunk from the database.
@mdbitz I'd love your thoughts on confirming this is acceptable.
@@ -377,7 +377,7 @@ require({ errorLines.push(line); } } - } + }/*eslint-enable new-cap*/ } function scheduleHint() {
[No CFG could be retrieved]
Adds error line and hint lines to the editor. Schedule hint no change.
We probably want to establish a style guide here, I propose all `eslint-` statements in the code be on their own line.
@@ -560,7 +560,7 @@ class UpdateChannelsMixin(object): elif isinstance(self, Evoked): self.data = self.data.take(idx, axis=0) - def add_channels(self, add_list, copy=None): + def add_channels(self, add_list, force_update_info=False, copy=None): """Append new channels to the instance. Parameters
[ContainsMixin->[__contains__->[_contains_ch_type]],_get_T1T2_mag_inds->[pick_types],UpdateChannelsMixin->[pick_types->[pick_types],_pick_drop_channels->[inst_has]],read_ch_connectivity->[_recursive_flatten],fix_mag_coil_types->[pick_types],SetChannelsMixin->[set_channel_types->[_check_set],rename_channels->[rename_channels],plot_sensors->[plot_sensors]]]
Drop channels from the object. Add objects to the base object and return a object.
should probably put after `copy` even though it's about to be removed (could in 0.12 break someone's code)
@@ -185,7 +185,9 @@ def destroy_network_rules_for_nic(vm_name, vm_ip, vm_mac, vif, sec_ips): logging.debug("Ignoring failure to delete ebtable rules for vm: " + vm_name) def get_bridge_physdev(brname): - physdev = execute("bridge -o link show | awk '/master %s / && !/^[0-9]+: vnet/ {print $2}' | head -1 | cut -d ':' -f1" % brname) + # eth1.50@eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 master breth1-50 state forwarding priority 32 cost 4 | + # eth1.50@eth1: | eth1.50@eth1 | eth1.50 + physdev = execute("bridge -o link show | awk '/master %s / && !/^[0-9]+: vnet/ {print $2}' | head -1 | cut -d ':' -f1 | cut -d '@' -f1" % brname) return physdev.strip()
[network_rules_for_rebooted_vm->[check_domid_changed,default_network_rules_systemvm,rewrite_rule_log_for_vm,execute,delete_rules_for_vm_in_bridge_firewall_chain],can_bridge_firewall->[execute],verify_iptables_rules_for_bridge->[execute,get_br_fw],get_vm_id->[get_libvirt_connection],get_vifs->[virshdumpxml],add_network_rules->[egress_chain_name,parse_network_rules,default_network_rules,check_rule_log_for_vm,write_rule_log_for_vm,execute,iptables_chain_name],post_default_network_rules->[execute],get_vifs_for_bridge->[virshdumpxml],network_rules_vmSecondaryIp->[split_ips_by_family,refactor_ebtable_rules,add_to_ipset],add_to_ipset->[execute],egress_chain_name->[iptables_chain_name],destroy_network_rules_for_nic->[execute],default_ebtables_rules->[destroy_ebtables_rules,execute],cleanup_rules->[execute,destroy_network_rules_for_vm,virshlist],get_rule_logs_for_vms->[get_rule_log_for_vm,virshlist],virshlist->[get_libvirt_connection],delete_rules_for_vm_in_bridge_firewall_chain->[execute],destroy_ebtables_rules->[execute],destroy_network_rules_for_vm->[execute],default_network_rules_systemvm->[execute],refactor_ebtable_rules->[destroy_ebtables_rules,default_ebtables_rules,execute],default_network_rules->[destroy_ebtables_rules,add_to_ipset,write_secip_log_for_vm,default_ebtables_rules,create_ipset_forvm,execute,split_ips_by_family,ebtables_rules_vmip,ipv6_link_local_addr],verify_ipset_for_vm->[execute,ipset_chain_name],get_bridges->[virshdumpxml],verify_ebtables_rules_for_vm->[execute],get_bridge_physdev->[execute],ebtables_rules_vmip->[execute],cleanup_bridge->[execute],check_default_network_rules->[default_ebtables_rules,ebtables_rules_vmip,execute],virshdumpxml->[get_libvirt_connection],create_ipset_forvm->[execute],get_br_fw->[execute],add_fw_framework->[get_bridge_physdev,execute,get_br_fw],verify_default_iptables_rules_for_vm->[execute,iptables_chain_name,egress_chain_name,get_br_fw],verify_network_rules->[execute],destroy_network_rules_for_vm,obtain_file_lock,check_default_network_rules,destroy_network_rules_for_nic,default_network_rules_systemvm,default_network_rules,cleanup_rules,can_bridge_firewall,get_rule_logs_for_vms,add_network_rules,post_default_network_rules,network_rules_vmSecondaryIp,verify_network_rules]
Get the bridge s physdev. Delete any nexus in the vif.
just note - we need to regression test CentOS7, CentOS8 and Ubuntu with SG to check/test this (manual test should be okay as well)
@@ -19,8 +19,6 @@ import java.util.Map; @Deprecated public interface ExceptionPayload extends Serializable { - int getCode(); - String getMessage(); Map getInfo();
[No CFG could be retrieved]
Gets the code of the exception.
Check if META-INF/org/mule/runtime/core/config/mule-exception-codes.properties and META-INF/org/mule/runtime/core/config/mule-exception-config.properties can be deleted too
@@ -10,7 +10,14 @@ namespace Dynamo.Extensions /// </summary> public interface IExtensionLoader { + /// <summary> + /// Loads assembly by passed extension path and return it as IExtension + /// </summary> IExtension Load(string extensionPath); + + /// <summary> + /// Returns an enum of IExtension specified by passed extension path + /// </summary> IEnumerable<IExtension> LoadDirectory(string extensionsPath); } }
[No CFG could be retrieved]
Load extension from file.
Extension method for loading assembly from the path. Returns cref = IExtension.
@@ -203,14 +203,9 @@ def warn_no_ssl_cert_checking(): "your Python to enable certificate verification.") -def push_to_url(local_path, remote_path, **kwargs): +def push_to_url(local_file_path, remote_path, **kwargs): keep_original = kwargs.get('keep_original', True) - local_url = url_util.parse(local_path) - local_file_path = url_util.local_file_path(local_url) - if local_file_path is None: - raise ValueError('local path must be a file:// url') - remote_url = url_util.parse(remote_path) verify_ssl = spack.config.get('config:verify_ssl')
[list_url->[_iter_s3_prefix],url_exists->[read_from_url],_spider_wrapper->[_spider],read_from_url->[uses_ssl],LinkParser->[__init__->[__init__]],_spider->[read_from_url,LinkParser,NonDaemonPool],find_versions_of_archive->[spider],spider->[_spider],standardize_header_names->[standardize_header_names],push_to_url->[warn_no_ssl_cert_checking,uses_ssl],_iter_s3_prefix->[_list_s3_objects],NonDaemonPool->[__init__->[NonDaemonContext]]]
Push a file to a remote file. Get the SCHEME of the remote URL.
I think I understand how this change addresses #13404, but it seems to imply that there is something broken with how `file://` URLs are parsed. It looks like urllib is stripping out the part of the filename that looks like query arguments, and that will still be a problem if we ever have to work with similar file paths, either as part of something internal to Spack (as in this case), or if a user ever hands us such a file path. I'd like to take a closer look at this and possibly follow up with @scheibelp if I have any questions.
@@ -446,7 +446,12 @@ class MaxPooling2D(Pooling2D): @keras_export('keras.layers.AveragePooling2D', 'keras.layers.AvgPool2D') class AveragePooling2D(Pooling2D): """Average pooling operation for spatial data. - + + >>> import numpy as np + >>> x = tf.keras.Input(shape=(3, 3, 1), name='x') + >>> average_pooling_layer = tf.keras.layers.AveragePooling2D(pool_size = (2,2),strides = (1,1),padding = "same")(x) + <tf.Tensor 'average_pooling2d_2/Identity:0' shape=(None, 1, 1, 1) dtype=float32> + Arguments: pool_size: integer or tuple of 2 integers, factors by which to downscale (vertical, horizontal).
[MaxPooling3D->[__init__->[super]],GlobalPooling3D->[__init__->[super,normalize_data_format,InputSpec],compute_output_shape->[TensorShape],get_config->[dict,list,super,items]],GlobalMaxPooling2D->[call->[max]],GlobalAveragePooling2D->[call->[mean]],MaxPooling2D->[__init__->[super]],GlobalMaxPooling3D->[call->[max]],AveragePooling3D->[__init__->[super]],GlobalPooling1D->[__init__->[super,normalize_data_format,InputSpec],compute_output_shape->[TensorShape],get_config->[dict,list,super,items]],AveragePooling1D->[__init__->[partial,super]],GlobalPooling2D->[__init__->[super,normalize_data_format,InputSpec],compute_output_shape->[TensorShape],get_config->[dict,list,super,items]],Pooling2D->[call->[convert_data_format,pool_function,upper],__init__->[normalize_data_format,InputSpec,super,image_data_format,normalize_tuple,normalize_padding],compute_output_shape->[TensorShape,conv_output_length],get_config->[dict,list,super,items]],Pooling3D->[call->[pool_function,transpose,upper],__init__->[normalize_data_format,InputSpec,super,image_data_format,normalize_tuple,normalize_padding],compute_output_shape->[TensorShape,conv_output_length],get_config->[dict,list,super,items]],MaxPooling1D->[__init__->[partial,super]],Pooling1D->[call->[squeeze,expand_dims,pool_function],__init__->[normalize_data_format,InputSpec,super,image_data_format,normalize_tuple,normalize_padding],compute_output_shape->[TensorShape,conv_output_length],get_config->[dict,list,super,items]],GlobalMaxPooling1D->[call->[max]],AveragePooling2D->[__init__->[super]],GlobalAveragePooling3D->[call->[mean]],GlobalAveragePooling1D->[call->[reduce_sum,expand_dims,floatx,cast,mean,sum],__init__->[super]],keras_export]
MaxPooling2D constructor. Missing input and output dimensions of a Keras block.
No need to import numpy if you are not using it :)
@@ -733,10 +733,12 @@ class WheelBuilder(object): shutil.move( os.path.join(temp_dir.path, wheel_name), wheel_path ) + except BaseException as exc: + logger.critical('Failed to build: %s', exc) + logger.debug('Exception information:', exc_info=True) + else: logger.info('Stored in directory: %s', output_dir) return wheel_path - except: - pass # Ignore return, we can't do anything else useful. self._clean_one(req) return None
[WheelBuilder->[__build_one->[_base_setup_args],_build_one->[_find_build_reqs,_install_build_reqs,BuildEnvironment],_clean_one->[_base_setup_args],build->[_build_one]],move_wheel_files->[record_installed->[normpath],clobber->[record_installed],open_for_csv,rehash,root_is_purelib,message_about_scripts_not_on_PATH,clobber,get_entrypoints,normpath],get_entrypoints->[_split_ep]]
Build one missing key in wheel and return path to it.
FWIW, this bit of the codebase is gonna change drastically over the coming weeks.
@@ -59,10 +59,10 @@ class HelpFormatter: lines.extend(self.format_option(ohi)) add_option(oshi.basic) - if self._show_recursive: - add_option(oshi.recursive, category="recursive") if self._show_advanced: add_option(oshi.advanced, category="advanced") + if self._show_deprecated: + add_option(oshi.deprecated, category="deprecated") return [*lines, "\n"] def format_option(self, ohi: OptionHelpInfo) -> List[str]:
[HelpFormatter->[format_option->[_maybe_red,_maybe_magenta,_maybe_cyan],format_options->[add_option->[_maybe_green],add_option]]]
Return a help message for the specified options. Print a warning if the OHI object is missing.
Good call to remove this. I agree that it's rare for us to have a recursive option and we don't want to emphasize it. Notably, we're removing the recursive options `--fmt-skip` and `--fmt-transitive`.
@@ -1,5 +1,6 @@ class RateLimitChecker - attr_accessor :user + attr_accessor :user, :situation + def initialize(user = nil) @user = user end
[RateLimitChecker->[limit_by_email_recipient_address->[size],track_image_uploads->[id,to_i,write,seconds],ping_admins_without_delay->[id,perform_now],ping_admins->[id,perform_later],limit_by_situation->[to_i,size],attr_accessor]]
Initialize a new lease object.
Btw, attr_writers are not used, so we can replace it with `attr_reader`. Or you can choose to use a setter like `self.situation = situation`
@@ -4,10 +4,14 @@ import hashlib import json import logging +import typing from collections import OrderedDict from collections.abc import Iterable, Mapping, Set +from pathlib import Path +from typing import Any, Optional, Type, Union from twitter.common.collections import OrderedSet +from typing_extensions import Protocol from pants.util.objects import DatatypeMixin from pants.util.strutil import ensure_binary
[CoercingEncoder->[default->[_is_natively_encodable,_maybe_encode_dict_key,default],encode->[default]],Sharder->[is_in_shard->[compute_shard],compute_shard->[hash_all],__init__->[ensure_int->[InvalidShardSpec],InvalidShardSpec,ensure_int]],json_hash->[hash_all],stable_json_sha1->[json_hash]]
Creates a new object with the given hexadecimal representation of the given object. Decode bytes and convert to str.
The preferred style is `from typing import X`. Think Typeshed (official type hints repo) established this in their style guide.
@@ -318,7 +318,7 @@ export class VideoManager { * with it or playing through autoplay * * @param {!../video-interface.VideoInterface} video - * @return {!../video-interface.VideoInterface} PlayingStates + * @return {string} */ getPlayingState(video) { return this.getEntryForVideo_(video).getPlayingState();
[No CFG could be retrieved]
Registers common video actions such as play pause etc. on the video element. Register an action that can be performed on a user s video.
`PlayingStates` sounds far more descriptive than `string`, @alanorozco does this look correct?
@@ -35,7 +35,7 @@ def send_alert(): @app.route('/en/latest/<path:filename>') def send_docs(filename): return flask.send_from_directory( - os.path.join(_basedir, "sphinx/build/html/"), filename) + os.path.join(_basedir, "sphinx/_build/html/"), filename) def open_browser(): # Child process
[send_versions->[send_from_directory,join],ui->[input,sleep],shutdown_server->[print,add_callback],serve_http->[listen,IOLoop,current],send_alert->[get],send_docs->[send_from_directory,join],open_browser->[sleep,open],WSGIContainer,HTTPServer,print,Thread,dirname,route,start,Flask,ui,join,shutdown_server,exit]
Send the docs to the user.
@nimishbongale why this change? The subdir is named `build` (not `_build`) on my local machine.
@@ -23,10 +23,10 @@ namespace System.Linq.Expressions.Interpreter public override string InstructionName => "DefaultValue"; [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2077:UnrecognizedReflectionPattern", - Justification = "_type is a ValueType. You can always create an instance of a ValueType.")] + Justification = "_type is a ValueType. You can always get an uninitialized ValueType.")] public override int Run(InterpretedFrame frame) { - frame.Push(Activator.CreateInstance(_type)); + frame.Push(RuntimeHelpers.GetUninitializedObject(_type)); return 1; }
[DefaultValueInstruction->[Run->[CreateInstance,Push],IsNullableType,Assert,IsValueType]]
If _type is a ValueType create an instance of it.
Do we still get a warning after this change if the attribute is removed?
@@ -8,9 +8,13 @@ ***************************************************************************/ +#include <codecvt> +#include <locale> + #include "unicode.h" #ifdef _WIN32 +#include "strconv.h" #define UTF8PROC_DLLEXPORT #endif
[utf16f_from_uchar->[utf16_from_uchar],utf8_is_valid_string->[uchar_from_utf8,uchar_isvalid],utf8_from_uchar->[utf8_from_uchar,uchar_isvalid],uchar_from_utf16f->[uchar_from_utf16],utf16_from_uchar->[uchar_isvalid]]
Checks if a given Unicode character is a legitimate unicode character and if it is a Returns the number of bytes needed to read a single unicode character from the UTF - 8 string.
Please `#include` standard library headers after project headers, to protect against inadvertently getting into a situation where a header doesn't work if you don't `#include` something else first.
@@ -11,12 +11,17 @@ KRATOS_CREATE_VARIABLE(double, ADJOINT_VELOCITY_POTENTIAL) KRATOS_CREATE_VARIABLE(double, ADJOINT_AUXILIARY_VELOCITY_POTENTIAL) // Flow field magnitudes -KRATOS_CREATE_3D_VARIABLE_WITH_COMPONENTS(VELOCITY_INFINITY) KRATOS_CREATE_3D_VARIABLE_WITH_COMPONENTS(VELOCITY_LOWER) KRATOS_CREATE_VARIABLE(double, PRESSURE_LOWER) KRATOS_CREATE_VARIABLE(double, POTENTIAL_JUMP) KRATOS_CREATE_VARIABLE(double, ENERGY_NORM_REFERENCE) KRATOS_CREATE_VARIABLE(double, POTENTIAL_ENERGY_REFERENCE) +KRATOS_CREATE_VARIABLE(double, HEAT_CAPACITY_RATIO) + +// Free stream magnitudes +KRATOS_CREATE_3D_VARIABLE_WITH_COMPONENTS(VELOCITY_INFINITY) +KRATOS_CREATE_VARIABLE(double, DENSITY_INFINITY) +KRATOS_CREATE_VARIABLE(double, MACH_INFINITY) // Markers KRATOS_CREATE_VARIABLE(int, WAKE)
[No CFG could be retrieved]
Create a list of variables for the given application. Missing KratosSeoVariable - Creates all Trailing Edge and ZeroVELOCITY Condition.
I propose you call them `FAR_FIELD_*` instead of `*_INFINITY`.
@@ -407,6 +407,7 @@ class ResultsControllerTest < AuthenticatedControllerTest :retrieve_file).once.raises(Exception.new(SAMPLE_ERR_MSG)) get_as @student, :codeviewer, + format: :js, :assignment_id => @assignment.id, :submission_id => 1, :submission_file_id => @submission_file.id,
[ResultsControllerTest->[marking_scheme_type,find_by_submission_id,all,new,find_entry,group_name,unit,update_attributes,post_as,refresh_grade_distribution,join,first,raises,released_to_students,should,assert_select,map,update_total_mark,assert,assert_equal,body,save!,respond_with,returns,add,repo_name,total_mark,result,revision_number,assigns,overall_comment,t,assert_not_nil,header,remark_result_id,assert_nil,commit,count,empty?,to_i,get_latest_result,user,save,access_repo,assert_not_equal,times,submission,read,get_transaction,get_as,each,repository_folder,make,id,marking_state,open,extra_mark,annotation_text,short_identifier,context,get_latest_revision,assert_response,has_submission?,grade_distribution_percentage,get,generate_new_submission,retrieve_file,assert_match,render_template,setup,reload,assert_recognizes],join,require,expand_path,dirname]
Checks that the student has no access to the file specified by the user. This action checks that the response body contains a valid object and that the response body.
Align the parameters of a method call if they span more than one line.
@@ -34,9 +34,16 @@ #include <unistd.h> #include <libzfs.h> #include <libshare.h> +#include <sys/list.h> +#include <sys/zfs_debug.h> +#include <stddef.h> #include "libshare_impl.h" -static boolean_t nfs_available(void); +static boolean_t +nfs_available(void); + +static int +nfs_enable_share_one(const char *, const char *, char *); static sa_fstype_t *nfs_fstype;
[int->[fork,strdup,unlink,libzfs_run_process,nfs_enable_share,fprintf,nfs_available,strcat,strlen,malloc,strchr,nfs_disable_share,fcntl,mkstemp,foreach_nfs_shareopt,sprintf,foreach_nfs_host,waitpid,callback,strcmp,FSINFO,close,WEXITSTATUS,free,realloc,get_linux_shareopts,get_linux_hostspec,add_linux_shareopt,execlp,WIFEXITED,dup2,nfs_is_share_active,assert,exit],boolean_t->[nfs_available,fseek,nfs_check_exportfs,fgets,strlen,fclose,strcmp,strchr,fdopen,dup],libshare_nfs_init->[register_fstype],void->[free,FSINFO]]
Common Development and Distribution License Iterates over the specified string of NFS share options and invokes the specified callback function for each.
Function prototypes can be one line.
@@ -60,11 +60,15 @@ class KeyPairsController < ApplicationController respond_to do |format| if @key_pair.update_attributes(key_pair_params) - format.html { redirect_to @key_pair, notice: 'Key pair was successfully updated.' } + format.html do redirect_to @key_pair, + notice: 'Key pair was successfully updated.' + end format.json { head :no_content } else format.html { render action: "edit" } - format.json { render json: @key_pair.errors, status: :unprocessable_entity } + format.json do render json: @key_pair.errors, + status: :unprocessable_entity + end end end end
[KeyPairsController->[destroy->[destroy],new->[new],create->[new]]]
update the key pair with the given parameters.
Block body expression is on the same line as the block start.
@@ -387,6 +387,9 @@ export class VisibilityModel { isVisibilityMatch_(visibility) { dev().assert(visibility >= 0 && visibility <= 1, 'invalid visibility value: %s', visibility); + if (this.spec_.visiblePercentageMin == 1) { + return visibility == 1; + } return visibility > this.spec_.visiblePercentageMin && visibility <= this.spec_.visiblePercentageMax; }
[No CFG could be retrieved]
Private functions - Report when report ready promise resolve. Replies if the visibility matches.
Without this special case, if they set min=max=100 then we would never fire. This opens up the discussion of should we just change ">" on line 374 to ">=" but in that case we would need to handle the special case of min=0 since visibility will always be >=0. Spec is (minPercentage, maxPercentage] so I think the special case on 100 is more appropriate than special case on 0.
@@ -286,7 +286,7 @@ def get_metrics( the `"loss"` metric is "average loss per batch". Returns the `"batch_loss"` separately. """ - metrics = model.get_metrics(reset=reset) + metrics = model.get_metrics(reset=reset, world_size=world_size) if batch_loss is not None: metrics["batch_loss"] = batch_loss metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0
[make_vocab_from_params->[datasets_from_params],evaluate->[get_metrics],get_batch_size->[get_batch_size],get_metrics->[get_metrics]]
Get the metrics of a .
Needs another input - cuda_device.
@@ -108,6 +108,9 @@ bio_spdk_env_init(void) opts.env_context = (char *)dpdk_cli_override_opts; + bio_get_hotplug_busid_range(nvme_glb.bd_nvme_conf); + spdk_nvme_pcie_set_hotplug_filter(hotplug_filter_fn); + rc = spdk_env_init(&opts); if (rc != 0) { rc = -DER_INVAL; /* spdk_env_init() returns -1 */
[No CFG could be retrieved]
Initializes the environment variables and the DAOS environment. Initialize the environment variables and the DAOS thread library.
Minor: All these hotplug filter related code could be moved into bio_config.c, and export only single function like 'bio_set_hotplug_filter(nvme_conf)' in bio_internal.h.
@@ -75,6 +75,8 @@ class TorchAgent(Agent): self.history_tokens = opt['history_tokens'] self.history_dialog = opt['history_dialog'] self.history_replies = opt['history_replies'] + self.model = None + self.optimizer = None def share(self): shared = super().share()
[TorchAgent->[map_valid->[any,Batch,choice,get,len,sorted,zip,enumerate,range,cuda,is_valid,LongTensor],unmap_valid->[zip],vectorize->[obs,append,LongTensor,cuda,txt2vec],maintain_dialog_history->[parse->[split,txt2vec],parse,get,len,history,deque],share->[super],add_cmdline_args->[add_argument_group,add_argument],__init__->[device,print,is_available,DictionaryAgent,super]],ModuleNotFoundError,namedtuple]
Initialize the object with the given options and shared.
I don't think we should set these to None
@@ -525,13 +525,13 @@ def run(ctx: Context, **kwargs: Any) -> None: # pylint: disable=too-many-locals,too-many-branches,too-many-statements flamegraph = kwargs.pop("flamegraph", None) + switch_tracing = kwargs.pop("switch_tracing", None) profiler = None + switch_monitor = None if flamegraph: os.makedirs(flamegraph, exist_ok=True) - from raiden.utils.profiling.sampler import TraceSampler, FlameGraphCollector - now = datetime.datetime.now().isoformat() address = to_checksum_address(kwargs["address"]) stack_path = os.path.join(flamegraph, f"{address}_{now}_stack.data")
[smoketest->[print_step,append_report],run->[run]]
Display a single critical block. This function is called when a node is opened and released. Check if a node is a reserved nonce and if so stop it.
When switch_tracing can't be none, you can omit the `is True`. I would also slightly prefer to merge this with the initialization at L533 (e.g. `switch_monitor = SwitchMonitoring() if switch_tracing else None`).
@@ -221,7 +221,8 @@ def test_VolumePairList_whitelist_gen(mocker, whitelist_conf, shitcoinmarkets, t assert log_has_re(r'^Removed .* from whitelist, because stop price .* ' r'would be <= stop limit.*', caplog) if pairlist['method'] == 'PriceFilter': - assert log_has_re(r'^Removed .* from whitelist, because 1 unit is .*%$', caplog) + assert (log_has_re(r'^Removed .* from whitelist, because 1 unit is .*%$', caplog) or + log_has_re(r"^Removed .* from whitelist, because 'last' is empty.*", caplog)) def test_gen_pair_whitelist_not_supported(mocker, default_conf, tickers) -> None:
[test_volumepairlist_invalid_sortvalue->[whitelist_conf]]
Test if a pairlist is not supported in the whitelist.
"...because ticker,last is empty" ? "...because ticker['last'] is empty" ? "...because 'last' in ticker is empty" ?
@@ -11,8 +11,12 @@ VIEWPORT = { 'mobile': {'width': 481, 'height': 1024}, 'small': {'width': 320, 'height': 480}} +_KUMA_STATUS = None +_DYNAMIC_FIXTURES = None + def pytest_addoption(parser): + """Add command-line options for Kuma tests.""" parser.addoption( "--maintenance-mode", action="store_true",
[kuma_status->[urlunsplit,get,json,urlsplit],sensitive_url->[startswith,any],selenium->[set_window_size,get],pytest_addoption->[addoption],fixture]
Add options related to pytest.
Wow, this is some nice pytest magic. I'm going to tuck this away for future reference!
@@ -272,6 +272,8 @@ def run_yara_query_rule_on_versions_chunk(version_pks, query_rule_pk): 'Running Yara Query Rule %s on versions %s-%s.', query_rule_pk, version_pks[0], version_pks[-1]) rule = ScannerQueryRule.objects.get(pk=query_rule_pk) + if rule.state == ABORTING: + return for version_pk in version_pks: try: version = Version.unfiltered.all().no_transforms().get(
[run_customs->[run_scanner],_run_yara_query_rule_on_version->[_run_yara_for_path],run_wat->[run_scanner]]
Run a specific ScannerQueryRule on a list of versions.
I'll change this to `!= RUNNING`
@@ -92,7 +92,13 @@ class AnchorGenerator(nn.Module): shifts_y = torch.arange( 0, grid_height, dtype=torch.float32, device=device ) * stride_height - shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + # TODO: remove tracing pass when exporting torch.meshgrid() + # is suported in ONNX + if torchvision._is_tracing(): + shift_y = shifts_y.view(-1, 1).expand(grid_height, grid_width) + shift_x = shifts_x.view(1, -1).expand(grid_height, grid_width) + else: + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
[concat_box_prediction_layers->[permute_and_flatten],AnchorGenerator->[forward->[set_cell_anchors,cached_grid_anchors],set_cell_anchors->[generate_anchors],cached_grid_anchors->[grid_anchors]],RegionProposalNetwork->[forward->[concat_box_prediction_layers,assign_targets_to_anchors,filter_proposals,compute_loss],filter_proposals->[_get_top_n_idx]]]
Generate a list of grid anchors.
This has already been fixed on the ONNX side, and can be removed by just doing the meshgrid as before
@@ -6,7 +6,6 @@ import textwrap from typing import Iterable, Type import pytest - from pants.backend.python.python_artifact import PythonArtifact from pants.backend.python.rules.run_setup_py import ( AmbiguousOwnerError,
[TestGetRequirements->[test_get_requirements->[assert_requirements],assert_requirements->[tgt]],TestGetSources->[assert_sources->[tgt],test_get_sources->[assert_sources]],TestGetOwnedDependencies->[test_owned_dependencies->[assert_owned],assert_owned->[tgt]],TestGetAncestorInitPy->[test_get_ancestor_init_py->[assert_ancestor_init_py],assert_ancestor_init_py->[tgt]],TestGenerateChroot->[test_generate_chroot->[assert_chroot],assert_error->[tgt],assert_chroot->[tgt],test_invalid_binary->[assert_error]],TestGetExportingOwner->[test_get_owner_not_an_ancestor->[assert_is_owner,assert_no_owner],test_get_owner_simple->[assert_is_owner,assert_ambiguous_owner,assert_no_owner],assert_error->[tgt],assert_ambiguous_owner->[assert_error],test_get_owner_multiple_ancestor_generations->[assert_is_owner],test_get_owner_siblings->[assert_is_owner],assert_no_owner->[assert_error]]]
Creates a new object from a single object. A base class for building a build file with the name of the .
Ditto on bad import order.
@@ -335,7 +335,16 @@ public class SemiTransactionalHiveMetastore firstPresent(newBasicStatistics.getRowCount(), oldBasicStatistics.getRowCount()), firstPresent(newBasicStatistics.getInMemoryDataSizeInBytes(), oldBasicStatistics.getInMemoryDataSizeInBytes()), firstPresent(newBasicStatistics.getOnDiskDataSizeInBytes(), oldBasicStatistics.getOnDiskDataSizeInBytes())); - return new PartitionStatistics(updatedBasicStatistics, newPartitionStats.getColumnStatistics()); + Map<String, HiveColumnStatistics> updatedColumnStatistics = + updateColumnStatistics(oldPartitionStats.getColumnStatistics(), newPartitionStats.getColumnStatistics()); + return new PartitionStatistics(updatedBasicStatistics, updatedColumnStatistics); + } + + private Map<String, HiveColumnStatistics> updateColumnStatistics(Map<String, HiveColumnStatistics> oldColumnStats, Map<String, HiveColumnStatistics> newColumnStats) + { + Map<String, HiveColumnStatistics> result = new HashMap<>(oldColumnStats); + result.putAll(newColumnStats); + return ImmutableMap.copyOf(result); } private static OptionalLong firstPresent(OptionalLong first, OptionalLong second)
[SemiTransactionalHiveMetastore->[dropRole->[dropRole],dropColumn->[dropColumn],generatePageSinkMetadata->[getTable],listTablePrivileges->[listTablePrivileges],grantTablePrivileges->[getTableOwner,grantTablePrivileges],getAllViews->[getAllViews],rollbackShared->[getTable],deleteRecursivelyIfExists->[deleteIfExists],dropDatabase->[dropDatabase],DeclaredIntentionToWrite->[toString->[toString]],PartitionAndMore->[toString->[toString],getAugmentedPartitionForInTransactionRead->[toString]],renameDatabase->[renameDatabase],getPartitionsByNames->[getTableSource,getPartitionsByNames],UpdateStatisticsOperation->[undo->[updatePartitionStatistics],run->[updatePartitionStatistics]],listRoles->[listRoles],revokeRoles->[revokeRoles],doRecursiveDeleteFiles->[doRecursiveDeleteFiles],getTableStatistics->[getTableStatistics],DirectoryDeletionTask->[toString->[toString]],DirectoryRenameTask->[toString->[toString]],commentTable->[commentTable],IrreversibleMetastoreOperation->[run->[run]],renameTable->[renameTable],CreateTableOperation->[hasTheSameSchema->[getType],undo->[dropTable],run->[getTable,createTable,getPrestoQueryId]],replaceTable->[replaceTable],createDatabase->[createDatabase],getDatabase->[getDatabase],recursiveDeleteFilesAndLog->[logCleanupFailure],grantRoles->[grantRoles],Action->[toString->[toString]],getPartitionName->[getPartitionName],Committer->[prepareDropTable->[dropTable],prepareAlterTable->[getTable],prepareAddPartition->[addPartition,getPartitionName],prepareInsertExistingPartition->[getPartitionName],prepareInsertExistingTable->[getTable],undoAddPartitionOperations->[rollback],prepareAlterPartition->[getPartitionName],prepareDropPartition->[dropPartition],prepareAddTable->[getTable]],getAllDatabases->[getAllDatabases],renameColumn->[renameColumn],truncateUnpartitionedTable->[getTable],revokeTablePrivileges->[getTableOwner,revokeTablePrivileges],getTable->[getTable],createRole->[createRole],TableAndMore->[toString->[toString]],DirectoryCleanUpTask->[toString->[toString]],doGetPartitionNames->[getTable,getTableSource,getPartitionNames,getPartitionNamesByParts],AlterTableOperation->[undo->[replaceTable],run->[replaceTable]],PartitionAdder->[execute->[getPrestoQueryId,getPartition],rollback->[dropPartition]],getSupportedColumnStatistics->[getSupportedColumnStatistics],addColumn->[addColumn],getPartitionStatistics->[getTable,getPartitionStatistics],getAllTables->[getAllTables],setShared->[checkReadable],listRoleGrants->[listRoleGrants],finishInsertIntoExistingTable->[getTableStatistics]]]
Update partition statistics.
put all args on the previous line, it won't be too long
@@ -183,9 +183,9 @@ func (client *Client) Publish(batch publisher.Batch) error { // PublishEvents sends all events to elasticsearch. On error a slice with all // events not published or confirmed to be processed by elasticsearch will be // returned. The input slice backing memory will be reused by return the value. -func (client *Client) publishEvents( - data []publisher.Event, -) ([]publisher.Event, error) { +func (client *Client) publishEvents(ctx context.Context, data []publisher.Event) ([]publisher.Event, error) { + span, ctx := apm.StartSpan(ctx, "publishEvents", "output") + defer span.End() begin := time.Now() st := client.observer
[publishEvents->[NewBatch,Failed,Bulk,Now,Duplicate,GetVersion,Acked,Errorf,Sub,ErrTooMany,Dropped,Debugf],Test->[Test],Connect->[Connect],Close->[Close],Publish->[Events,RetryEvents,ACK,publishEvents],Unlock,IsEmpty,NewLogger,Error,New,Warnf,NewConnection,Select,Lock,Errorf,EncodeToString,Debugf]
publishEvents publishes the given events to elasticsearch and returns the list of events that have been successfully This function is called when a bulk command fails.
I think it would be better if we didn't create this span, and just relied on the "publish" transaction, but this is probably necessary for now if we really want the labels. Ideally, the code for recording the labels (`events_original`, etc.) should be done in a single location, rather than in specific output implementations. This would at lease give high-level information for all outputs. Specific outputs could optionally extend those spans with output-specific information, or add sub-spans (e.g. the apmelasticsearch span). I think to do that we'd need to change the Batch and/or Observer interfaces (to report batch-specific stats) and/or the Publish signature (to return the stats).
@@ -154,7 +154,7 @@ public class ChainedExecutionQueryRunnerTest EasyMock.verify(watcher); } - @Test + @Test(timeout = 60000) public void testQueryTimeout() throws Exception { ExecutorService exec = PrioritizedExecutorService.create(
[ChainedExecutionQueryRunnerTest->[testQueryTimeout->[run]]]
Tests that a query can be cancelled. This method can be used to test if a query has not yet been registered and that it Method that creates a new instance of the NationalQueryRunner which runs a sequence of objects This method asserts that the runner has not yet completed.
we can probably specify a shorter timeout
@@ -60,6 +60,17 @@ type PlanSummary interface { Sames() map[resource.URN]bool } +// PlanPendingOperationsError is an error returned from `NewPlan` if there exist pending operations in the +// snapshot that we are preparing to operate upon. The engine does not allow any operations to be pending +// when operating on a snapshot. +type PlanPendingOperationsError struct { + Operations []resource.Operation +} + +func (p PlanPendingOperationsError) Error() string { + return "one or more operations are currently pending" +} + // Plan is the output of analyzing resource graphs and contains the steps necessary to perform an infrastructure // deployment. A plan can be generated out of whole cloth from a resource graph -- in the case of new deployments -- // however, it can alternatively be generated by diffing two resource graphs -- in the case of updates to existing
[GetProvider->[GetProvider],IsRefresh->[IsRefresh],generateURN->[Target],SignalCancellation->[SignalCancellation]]
is an interface that can be used to hook interesting engine and planning events. A simple helper to create a new instance of the class.
Nit: can we standardize on `InFlight` or `Pending`? My preference is for the latter, but all I really care about is that we pick one term in strings that might end up being displayed to a user.
@@ -7,10 +7,12 @@ import { type Dispatch } from 'redux'; import { openDialog } from '../../../../base/dialog'; import { translate } from '../../../../base/i18n'; -import { JitsiModal, setActiveModalId } from '../../../../base/modal'; +import { setActiveModalId } from '../../../../base/modal'; +import JitsiScreen from '../../../../base/modal/components/JitsiScreen'; import { LoadingIndicator } from '../../../../base/react'; import { connect } from '../../../../base/redux'; -import { DIAL_IN_SUMMARY_VIEW_ID } from '../../../constants'; +import { screen } from '../../../../conference/components/native/routes'; +import { renderArrowBackButton } from '../../../../welcome/functions.native'; import { getDialInfoPageURLForURIString } from '../../../functions'; import DialInSummaryErrorDialog from './DialInSummaryErrorDialog';
[No CFG could be retrieved]
Component that displays a dial in summary dialog. Displays the JitsiModal with a link to the summary page.
Please remove setActiveModalId altogether.
@@ -101,8 +101,8 @@ public class TestDynamicFilter INNER, ImmutableList.of(equiJoinClause("ORDERS_OK", "LINEITEM_OK")), ImmutableMap.of("ORDERS_OK", "LINEITEM_OK"), - Optional.empty(), - tableScan("orders", ImmutableMap.of("ORDERS_OK", "orderkey")), + anyTree( + tableScan("orders", ImmutableMap.of("ORDERS_OK", "orderkey"))), exchange( project( tableScan("lineitem", ImmutableMap.of("LINEITEM_OK", "orderkey")))))));
[TestDynamicFilter->[testSubTreeJoinDFOnBuildSide->[exchange,equiJoinClause,of,project,assertPlan,anyTree,join,with,tableScan,numberOfDynamicFilters],testUncorrelatedSubqueries->[equiJoinClause,node,of,project,assertPlan,empty,anyTree,join,tableScan],testNestedDynamicFiltersRemoval->[exchange,equiJoinClause,of,project,assertPlan,empty,anyTree,join,tableScan],testSemiJoin->[of,project,assertPlan,filter,tableScan,anyTree,semiJoin],testNonInnerJoin->[exchange,equiJoinClause,of,project,assertPlan,anyTree,join,tableScan],testInnerInequalityJoinWithEquiJoinConjuncts->[anyNot,equiJoinClause,of,assertPlan,anyTree,join,tableScan],testJoinWithOrderBySameKey->[exchange,equiJoinClause,of,project,assertPlan,empty,anyTree,join,tableScan],testSubTreeJoinDFOnProbeSide->[exchange,equiJoinClause,of,project,assertPlan,empty,anyTree,join,tableScan],testNonPushedDownJoinFilterRemoval->[exchange,equiJoinClause,of,project,assertPlan,expression,anyTree,join,with,tableScan,numberOfDynamicFilters],testEmptyJoinCriteria->[exchange,of,assertPlan,anyTree,join,tableScan],testNonFilteringSemiJoin->[of,project,assertPlan,filter,tableScan,anyTree,semiJoin],testJoinMultipleEquiJoinClauses->[exchange,equiJoinClause,of,project,assertPlan,empty,anyTree,join,tableScan],testMultiSemiJoin->[of,project,assertPlan,filter,tableScan,anyTree,semiJoin],testSemiJoinUnsupportedDynamicFilterRemoval->[of,project,assertPlan,expression,filter,tableScan,anyTree,semiJoin],testJoin->[exchange,equiJoinClause,of,project,assertPlan,empty,anyTree,join,tableScan],testSemiJoinWithStaticFiltering->[of,project,assertPlan,filter,tableScan,anyTree,semiJoin],numberOfDynamicFilters->[detailMatches->[MatchResult,size],Matcher],testJoinOnCast->[node,of,project,assertPlan,anyTree,tableScan],createTestMetadataManager,name,of]]
Join the orders table with the orderkey and the lineitem table with the orderkey.
nit: we could use `DynamicFilters#createDynamicFilterExpression` as in `TestRemoveUnsupportedDynamicFilters#testDynamicFilterConsumedOnBuildSide` to be little more explicit At least please validate that there is `filter` node above table scan
@@ -290,7 +290,7 @@ class Site_Command extends \WP_CLI\CommandWithDBObject { * ## EXAMPLES * * $ wp site create --slug=example - * Success: Site 3 created: www.example.com/example/ + * Success: Site 3 created: http://www.example.com/example/ */ public function create( $_, $assoc_args ) { if ( !is_multisite() ) {
[Site_Command->[_empty->[_insert_default_terms,_empty_posts,_empty_taxonomies,_empty_comments]]]
Create a new blog. This function is a more efficient way to do this. It is a more efficient way to This function is called when a new site is created.
Trailing slash could be removed from this example also.
@@ -283,11 +283,12 @@ class BuildFileAddress(Address): return Address(spec_path=self.spec_path, target_name=self.target_name) @property + @deprecated('1.5.0.dev0', + hint_message='Use `BuildFileAddress.rel_path` to access the relative path to the ' + 'BUILD file for a target.') def build_file(self): """The build file that contains the object this address points to. - :API: public - :rtype: :class:`pants.base.build_file.BuildFile` """ return self._build_file
[BuildFileAddress->[to_address->[Address]],Address->[sanitize_path->[InvalidSpecPath],check_target_name->[InvalidTargetName],parse->[parse_spec],__init__->[sanitize_path,check_target_name]],parse_spec->[prefix_subproject,normalize_absolute_refs]]
The build file that contains the object this address points to.
should this stay marked `:API: public` until it's removal? technically, it's still public - just deprecated - until it's removed entirely.
@@ -4124,7 +4124,7 @@ void Game::updateFrame(ProfilerGraph *graph, RunStats *stats, f32 dtime, .getInterpolated(video::SColor(255, 0, 0, 0), 0.9); sky->overrideColors(clouds_dark, clouds->getColor()); sky->setBodiesVisible(false); - runData.fog_range = 20.0f * BS; + runData.fog_range = MYMIN(runData.fog_range * 0.5f, 32.0f * BS); // do not draw clouds after all clouds->setVisible(false); }
[No CFG could be retrieved]
Updates the time of day and the clouds information. function to update the clouds and the fog of the given object.
please std::min instead of mymin
@@ -2809,7 +2809,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S int accountDirSizeInGB = getSizeInGB(accountTemplateDirSize + accountSnapshotDirSize + accountVolumeDirSize); int defaultMaxAccountSecondaryStorageInGB = Integer.parseInt(cmd.getDefaultMaxAccountSecondaryStorage()); - if ((accountDirSizeInGB + contentLengthInGB) > defaultMaxAccountSecondaryStorageInGB) { + if (defaultMaxAccountSecondaryStorageInGB != Resource.RESOURCE_UNLIMITED && (accountDirSizeInGB + contentLengthInGB) > defaultMaxAccountSecondaryStorageInGB) { s_logger.error("accountDirSizeInGb: " + accountDirSizeInGB + " defaultMaxAccountSecondaryStorageInGB: " + defaultMaxAccountSecondaryStorageInGB + " contentLengthInGB:" + contentLengthInGB); String errorMessage = "Maximum number of resources of type secondary_storage for account has exceeded";
[NfsSecondaryStorageResource->[swiftDelete->[execute],findFile->[getFile],getTemplateOrVolumePostUploadCmd->[getPostUploadPSK],copySnapshotToTemplateFromNfsToNfs->[copySnapshotToTemplateFromNfsToNfsXenserver],configure->[retrieveNfsVersionFromParams,configure],execute->[swiftDelete,copyFromS3ToNfs,copyFromNfsToImage,getVirtualSize,configCerts,s3ListVolume,swiftListTemplate,createTemplateFromSnapshot,deleteSnapshot,s3ListTemplate,registerTemplateOnSwift,copyFromSwiftToNfs],checkSecondaryStorageResourceLimit->[getRootDir],createTemplateFromSnapshot->[copySnapshotToTemplateFromNfsToNfs],mountExists->[execute],s3ListTemplate->[determineS3TemplateNameFromKey],registerTemplateOnSwift->[downloadFromUrlToNfs],addRouteToInternalIpOrCidr->[execute],copyFromS3ToNfs->[postProcessing],configureIpFirewall->[execute],getVirtualSize->[getVirtualSize],deleteTemplate->[swiftDelete],copyFromNfsToSwift->[getVirtualSize,swiftUploadMetadataFile,getFile,getTemplateFormat],createUploadEntity->[getRootDir,getType],configureAuth->[execute],isOneTimePostUrlUsed->[getRootDir],configureStorageLayerClass->[configure],deleteLocalFile->[execute],swiftUploadMetadataFile->[getVirtualSize,swiftWriteMetadataFile,getTemplateFormat],initialize->[execute],swiftDownload->[execute],umount->[execute],copyFromNfsToS3->[getTemplateFormat,findFile,determineStorageTemplatePath,getVirtualSize],allowOutgoingOnPrivate->[execute],s3ListVolume->[determineS3VolumeIdFromKey],swiftUpload->[execute],swiftList->[execute],configureSSL->[execute],downloadFromUrlToNfs->[execute],validatePostUploadRequest->[getPostUploadPSK,updateStateMapWithError],swiftDownloadContainer->[execute],startAdditionalServices->[execute],deleteVolume->[swiftDelete],postUpload->[getSizeInGB,execute,getScriptLocation],fillNetworkInformation->[fillNetworkInformation,setName],attemptMount->[execute],copyFromSwiftToNfs->[postProcessing]]]
check if the maximum number of resources of type secondary_storage is exceeded.
@niteshsarda thanks for the fix. To improve this PR to an even higher level, how do you feel about extracting the condition of the `if` to a method? Then, this method can receive test cases and Java doc explaining when it returns true or false. This can facilitate code maintenance in the future and also to reduce the bar for newcomers.
@@ -613,6 +613,11 @@ class Jetpack { // A filter to control all just in time messages add_filter( 'jetpack_just_in_time_msgs', '__return_true', 9 ); + // If enabled, point edit post and page links to Calypso instead of WP-Admin. + if ( get_option( 'jetpack_edit_links_calypso_redirect' ) ) { + add_filter( 'get_edit_post_link', array( $this, 'point_edit_links_to_calypso' ), 1, 2 ); + } + // Update the Jetpack plan from API on heartbeats add_action( 'jetpack_heartbeat', array( $this, 'refresh_active_plan_from_wpcom' ) );
[Jetpack->[verify_json_api_authorization_request->[add_nonce],get_locale->[guess_locale_from_lang],admin_notices->[opt_in_jetpack_manage_notice,can_display_jetpack_manage_notice],authenticate_jetpack->[verify_xml_rpc_signature],admin_page_load->[disconnect,unlink_user,can_display_jetpack_manage_notice],wp_rest_authenticate->[verify_xml_rpc_signature],jumpstart_has_updated_module_option->[do_stats,stat],jetpack_getOptions->[get_connected_user_data],build_connect_url->[build_connect_url],opt_in_jetpack_manage_notice->[opt_in_jetpack_manage_url],display_activate_module_link->[opt_in_jetpack_manage_url],register->[do_stats,stat,get_remote_query_timeout_limit,validate_remote_register_response]]]
This method is called by the constructor of the class. This method is called by the server when the user is redirected to the Jetpack login This method is called by Jetpack when it is activated. Universal ajax callback for all Jetpack tracking events triggered via js_ajax_track This method is used to filter all the modules and actions that are not part of the J.
@rralian instead of separate option here, would it maybe be more suitable to add a custom filter that you can enable from `wpcomsh`? I imagine that would be easier for you than setting this option for all AT sites (or making sure that it's set during transfer).
@@ -77,6 +77,15 @@ abstract class CommandWithMeta extends \WP_CLI_Command { /** * Get meta field value. * + * <id> + * : The ID of the object. + * + * <key> + * : The name of the meta field to get. + * + * [--format=<format>] + * : Accepted values: table, json. Default: table + * * @synopsis <id> <key> [--format=<format>] */ public function get( $args, $assoc_args ) {
[CommandWithMeta->[list_->[display_items,check_object_id,get_fields],get->[check_object_id],update->[check_object_id],add->[check_object_id],delete->[check_object_id]]]
Gets metadata from a specific object.
Can you remove `@synopsis` ? Now that we have expanded docs here, it's no longer necessary.
@@ -53,4 +53,18 @@ abstract class BaseEntity extends BaseDataTransferObject return $this->$name; } + + /** + * @param $name + * @return bool + * @throws HTTPException\InternalServerErrorException + */ + public function __isset($name) + { + if (!property_exists($this, $name)) { + throw new HTTPException\InternalServerErrorException('Unknown property ' . $name . ' in Entity ' . static::class); + } + + return !empty($this->$name); + } }
[No CFG could be retrieved]
Get a property of the Entity.
Why do you need this for? There aren't supposed to be dynamic properties in entities.
@@ -26,7 +26,9 @@ const { } = require('./helpers'); const {buildExtensions} = require('./extension-helpers'); const {compileCss} = require('./css'); +const {compileExprs} = require('./compile-expr'); const {createCtrlcHandler, exitCtrlcHandler} = require('../ctrlcHandler'); +const {isTravisBuild} = require('../travis'); const {maybeUpdatePackages} = require('./update-packages'); const {parseExtensionFlags} = require('./extension-helpers'); const {serve} = require('./serve');
[No CFG could be retrieved]
Creates a new build process and starts a new build process. Perform a single build of the nobuild.
Can we call this `compileJison` to be a bit more specific?
@@ -42,7 +42,7 @@ function checkNode(node, context) { node.properties.forEach(function (prop) { if (!prop.key.raw && !prop.computed) { context.report({ - node, + node: prop, message: 'Found: ' + prop.key.name +
[No CFG could be retrieved]
Checks if a node is a reserved key.
Does this belong in a separate pull request?
@@ -113,7 +113,7 @@ def test_completion_for_default_parameters(script): def test_completion_option_for_command(script): """ - Test getting completion for ``--`` in command (eg. pip search --) + Test getting completion for ``--`` in command (e.g. ``pip search --``) """ res, env = setup_completion(script, 'pip search --', '2')
[test_completion_for_un_snippet->[setup_completion],test_completion_option_for_command->[setup_completion],test_completion_for_default_parameters->[setup_completion]]
Test getting completion for missing option in command.
This change is good but unrelated? But I mean, it doesn't add much noise so it's fine. :P
@@ -281,9 +281,9 @@ def register_resource(res: 'Resource', ty: str, name: str, custom: bool, props: # Note: a resource urn will always get a value, and thus the output property # for it can always run .apply calls. log.debug(f"preparing resource for RPC") - urn_future = asyncio.Future() - urn_known = asyncio.Future() - urn_secret = asyncio.Future() + urn_future: asyncio.Future[Output[str]] = asyncio.Future() + urn_known: asyncio.Future[bool] = asyncio.Future() + urn_secret: asyncio.Future[bool] = asyncio.Future() urn_known.set_result(True) urn_secret.set_result(False) resolve_urn = urn_future.set_result
[register_resource->[do_register->[prepare_resource]],prepare_resource->[ResourceResolverOperations],read_resource->[do_read->[prepare_resource]]]
Registers a new resource object with the network. Register a with the server. Register a new node in the system. Register a resource in the cluster.
Future of Output seems very very odd.
@@ -21,6 +21,11 @@ class Jetpack_Media_Summary { $blog_id = get_current_blog_id(); } + $cache_key = md5( "{$blog_id}_{$post_id}_{$args['max_words']}_{$args['max_chars']}" ); + if ( isset( self::$cache[ $cache_key ] ) ){ + return self::$cache[ $cache_key ]; + } + if ( ! class_exists( 'Jetpack_Media_Meta_Extractor' ) ) { jetpack_require_lib( 'class.media-extractor' ); }
[No CFG could be retrieved]
Get a single post This function extracts the excerpt from a post and returns it as an array. This function get video information from vimeo and video poster This function count video. This function is used to extract the image from the post and make it the primary focus of This function prioritizes galleries first and then tries to choose an image post.
Nit-picking, but you miss a space after the closing brace here.
@@ -70,10 +70,11 @@ type UpdateInfo struct { Config map[string]ConfigValue `json:"config"` // Information obtained from an update completing. - Result UpdateResult `json:"result"` - EndTime int64 `json:"endTime"` - Deployment *DeploymentV1 `json:"deployment,omitempty"` - ResourceChanges map[OpType]int `json:"resourceChanges,omitempty"` + Result UpdateResult `json:"result"` + EndTime int64 `json:"endTime"` + Version int `json:"version"` + Deployment json.RawMessage `json:"deployment,omitempty"` + ResourceChanges map[OpType]int `json:"resourceChanges,omitempty"` } // GetHistoryResponse is the response from the Pulumi Service when requesting
[No CFG could be retrieved]
The UpdateKind object is returned when an update is complete.
Not sure if we want to use different names for the new `Version` fields given that they're in largish types that contain more than just a `Deployment`. Thoughts?
@@ -19,6 +19,8 @@ package io.druid.segment; +import javax.annotation.Nullable; + public interface ObjectColumnSelector<T> extends ColumnValueSelector { public Class<T> classOfObject();
[getDouble->[doubleValue],getFloat->[floatValue],getLong->[longValue]]
Returns the class of the object.
Could we make `ObjectColumnSelector<T extends Number>`?
@@ -34,6 +34,7 @@ import org.apache.beam.model.pipeline.v1.RunnerApi.ProcessPayload; import org.apache.beam.model.pipeline.v1.RunnerApi.ReadPayload; import org.apache.beam.model.pipeline.v1.RunnerApi.StandardEnvironments; import org.apache.beam.model.pipeline.v1.RunnerApi.WindowIntoPayload; +import org.apache.beam.sdk.metrics.BeamUrns; import org.apache.beam.sdk.util.common.ReflectHelpers; import org.apache.beam.vendor.grpc.v1p13p1.com.google.protobuf.ByteString; import org.apache.beam.vendor.grpc.v1p13p1.com.google.protobuf.InvalidProtocolBufferException;
[Environments->[windowExtractor->[getEnvironmentId],getEnvironment->[getEnvironment],readExtractor->[getEnvironmentId],parDoExtractor->[getEnvironmentId],createProcessEnvironment->[createProcessEnvironment],combineExtractor->[getEnvironmentId]]]
Package private for unit testing. Returns an immutable map of all possible values for the given key.
How come this file was fine before without importing this? : s
@@ -2145,6 +2145,11 @@ namespace System.Windows.Forms /// </summary> protected override void SetItemsCore(IList value) { + if (value == null) + { + throw new ArgumentNullException(nameof(value)); + } + BeginUpdate(); Items.ClearInternal(); Items.AddRangeInternal(value);
[ListBox->[FindString->[FindString],OnFontChanged->[OnFontChanged],WmReflectMeasureItem->[OnMeasureItem],Sort->[NativeClear,NativeAdd,NativeSetSelected,Sort,CheckNoDataSource,GetSelected],GetSelected->[CheckIndex],OnHandleDestroyed->[OnHandleDestroyed],ToString->[ToString],OnDisplayMemberChanged->[OnDisplayMemberChanged],OnHandleCreated->[NativeAdd,OnHandleCreated],SetBoundsCore->[SetBoundsCore],OnSelectedValueChanged->[OnSelectedValueChanged],WmReflectCommand->[OnSelectedIndexChanged],OnDataSourceChanged->[OnDataSourceChanged,BeginUpdate,EndUpdate],Refresh->[OnMeasureItem,Refresh],OnGotFocus->[OnGotFocus],IndexFromPoint->[IndexFromPoint],SetSelected->[OnSelectedIndexChanged,NativeSetSelected,SetSelected],ResetBackColor->[ResetBackColor],Rectangle->[CheckIndex],GetSelectedInternal->[GetSelected],WmReflectDrawItem->[OnDrawItem],RescaleConstantsForDpi->[RescaleConstantsForDpi],OnResize->[OnResize],OnSelectedIndexChanged->[GetSelected,OnSelectedIndexChanged,ItemsCountIsChanged,FocusedItemIsChanged],FindStringExact->[FindStringExact],OnParentChanged->[OnParentChanged],ScaleControl->[ScaleControl],SetItemsCore->[BeginUpdate,EndUpdate,OnSelectedValueChanged],Size->[Size],RefreshItems->[NativeClear],OnChangeUICues->[OnChangeUICues],ResetForeColor->[ResetForeColor],WndProc->[WmReflectCommand,WmReflectDrawItem,WmReflectMeasureItem,WmPrint,WndProc]]]
Set the items in the list.
note that this hoisting has the side effect that if you call `SetItemsCore(null)` you'd previously have thrown ANE but `Items` would be cleared before the exception was thrown. But I think its better because we've moved param validation to the front, and we shouldn't usually have side effects for invalid parameters
@@ -528,8 +528,8 @@ class Wheel(object): class WheelBuilder(object): """Build wheels from a RequirementSet.""" - def __init__(self, requirement_set, finder, wheel_dir, build_options=[], - global_options=[]): + def __init__(self, requirement_set, finder, wheel_dir, build_options=(), + global_options=()): self.requirement_set = requirement_set self.finder = finder self.wheel_dir = normalize_path(wheel_dir)
[WheelBuilder->[build->[_build_one]],move_wheel_files->[record_installed->[normpath],clobber->[record_installed],open_for_csv,rehash,root_is_purelib,clobber,get_entrypoints]]
Initialize a new object.
Hmm, I'd tend to either use `None` as the default and do an explicit `self.build_options = [] if build_options is None else build_options`, or if we _really_ want the attribute to be immutable, I'd do `self.build_options = tuple(build_options)`. Again, I'm sort of on the fence.
@@ -13,7 +13,7 @@ module GobiertoParticipation private def find_issue_activities - ActivityCollectionDecorator.new(Activity.in_site(current_site).no_admin.in_process(@issue.processes).sorted.includes(:subject, :author, :recipient).page(params[:page])) + ActivityCollectionDecorator.new(Activity.in_site(current_site).no_admin.in_process(@issue.processes.open_process).sorted.includes(:subject, :author, :recipient).page(params[:page])) end end end
[ActivitiesController->[find_issue_activities->[new,page],include]]
Find all activities in issue.
Line is too long. [189/180]
@@ -255,7 +255,8 @@ public class ProPurchaseOption { final double distance = Math.max(0, enemyDistance - 1.5); final int moveValue = isLandTransport ? (movement + 1) : movement; // 1, 2, 2.5, 2.75, etc - final double moveFactor = 1 + 2 * (Math.pow(2, moveValue - 1) - 1) / Math.pow(2, moveValue - 1); + final double moveFactor = + 1.0 + 2.0 * (Math.pow(2, moveValue - 1.0) - 1.0) / Math.pow(2, moveValue - 1.0); return Math.pow(moveFactor, distance / 5); }
[ProPurchaseOption->[calculateEfficiency->[calculateEfficiency]]]
Calculates the land distance factor. private double totalSupportFactor = 0 ;.
Haven't checked, but maybe when using `1d` and `2d` instead of `1.0` and `2.0` it might fit into a single line? But not really important, similar thing like the style preference earlier. It would be interesting to see if we consistently use one form over the other, or it's just a 50:50 chance each time
@@ -661,7 +661,12 @@ exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec')) except pkg_resources.DistributionNotFound: return False except pkg_resources.VersionConflict: - self.conflicts_with = pkg_resources.get_distribution(self.req.project_name) + existing_dist = pkg_resources.get_distribution(self.req.project_name) + if self.set.use_user_site: + if dist_in_usersite(existing_dist): + self.conflicts_with = existing_dist + else: + self.conflicts_with = existing_dist return True @property
[parse_requirements->[parse_requirements,from_line,from_editable],RequirementSet->[cleanup_files->[remove_temporary_source],install->[remove_temporary_source,rollback_uninstall,install,uninstall,values,commit_uninstall],uninstall->[values,commit_uninstall,uninstall],create_bundle->[_clean_zip_name,bundle_requirements],__init__->[Requirements],prepare_files->[requirements,update_editable,check_if_exists,build_location,run_egg_info,values,InstallRequirement,add_requirement,archive,has_requirement,move_bundle_files,assert_source_matches_version,bundle_requirements],has_requirements->[values],locate_files->[values,build_location,check_if_exists],has_editables->[values],bundle_requirements->[values],__str__->[values]],UninstallPthEntries->[remove->[remove],add->[add]],InstallRequirement->[from_path->[from_path],requirements->[egg_info_lines],egg_info_lines->[egg_info_data],dependency_links->[egg_info_lines],run_egg_info->[correct_build_location],installed_version->[pkg_info],pkg_info->[egg_info_data,egg_info_path],bundle_requirements->[InstallRequirement]],Requirements->[__repr__->[keys]],UninstallPathSet->[remove->[_can_uninstall,values,remove,compact,_stash],add_pth->[_permitted,add],add->[_permitted,add],compact->[add],rollback->[_stash,rollback]]]
Check if a node - package exists.
I would rather pass a `user_site` flag explicitly to each `InstallRequirement` rather than give `InstallRequirement` awareness of the `RequirementSet`.
@@ -199,5 +199,13 @@ public class SdkInternalContext implements EventInternalContext<SdkInternalConte public ExecutorCallback getCallback() { return callback; } + + public void setExecutionContextAdapter(ExecutionContextAdapter executionContextAdapter) { + this.executionContextAdapter = executionContextAdapter; + } + + public ExecutionContextAdapter getExecutionContextAdapter() { + return executionContextAdapter; + } } }
[SdkInternalContext->[setPolicyToApply->[setPolicyToApply],isNoPolicyOperation->[isNoPolicyOperation,getPolicyToApply],setOperationExecutionParams->[setOperationExecutionParams],setResolutionResult->[setResolutionResult],getPolicyToApply->[getPolicyToApply],setConfiguration->[setConfiguration],getConfiguration->[getConfiguration],getResolutionResult->[getResolutionResult],getOperationExecutionParams->[getOperationExecutionParams]]]
Returns the callback.
should this actually be mutable? @elrodro83 what say you?
@@ -430,16 +430,6 @@ public class NicProfile implements InternalIdentity, Serializable { @Override public String toString() { - return new StringBuilder("NicProfile[").append(id) - .append("-") - .append(vmId) - .append("-") - .append(reservationId) - .append("-") - .append(iPv4Address) - .append("-") - .append(broadcastUri) - .append("]") - .toString(); + return String.format("NicProfile {\"id\": %s, \"vmId\": %s, \"reservationId\": \"%s\", \"iPv4Address\": \"%s\", \"broadcastUri\": \"%s\"}", id, vmId, reservationId, iPv4Address, broadcastUri); } }
[NicProfile->[toString->[toString],getIPv6Gateway,getReservationStrategy,getIPv6Address,getTrafficType,getReservationId,getBroadcastDomainType,getIPv4Netmask,getInstanceId,getDeviceId,isDefaultNic,getMode,getMacAddress,getUuid,getIPv4Address,getId,getAddressFormat,getIPv6Cidr,getIPv4Gateway]]
Returns the String representation of the nanomaton.
@GutoVeronezi why are you converting them into hand-written json?
@@ -247,6 +247,7 @@ def generate_defualt_params(): if __name__ == '__main__': + """@nni.next_parameter()""" try: main(generate_defualt_params()) except Exception as exception:
[max_pool->[max_pool],avg_pool->[avg_pool],conv2d->[conv2d],main->[build_network,MnistNetwork],generate_defualt_params,main]
This function is the entry point for the main function. It is the entry point.
what is the difference between @nni.next_parameter() and @nni.get_next_parameter()?
@@ -432,12 +432,14 @@ handler = FetchHandlerExample(var_dict=var_dict) class Executor(object): """ An Executor in Python, supports single/multiple-GPU running, - and single/multiple-CPU running. When construction the Executor, - the device is required. + and single/multiple-CPU running. Args: - place(fluid.CPUPlace()|fluid.CUDAPlace(n)): This parameter represents - the executor run on which device. + place(fluid.CPUPlace()|fluid.CUDAPlace(n)|None): This parameter represents + which device the executor run on. When this parameter is None, PaddlePaddle + will set the default device according to its installation version. If Paddle + is CPU version, the default device would set to CPUPlace(). If Paddle is GPU + version, the default device would set to CUDAPlace(0). Default is None. Returns: Executor
[Executor->[_run_impl->[global_scope,run,_run_parallel],_run_program->[_add_feed_fetch_ops,_add_program_cache,_get_program_cache,as_numpy,run,_add_ctx_cache,_get_strong_program_cache_key,_add_scope_cache,_get_scope_cache,_get_ctx_cache,_feed_data],_add_feed_fetch_ops->[has_feed_operators,has_fetch_operators],_run_from_dataset->[_adjust_pipeline_resource,_dump_debug_info,_prepare_trainer],_prepare_trainer->[global_scope],_run_parallel->[check_feed_shape_type,as_numpy],close->[close],__init__->[Executor],train_from_dataset->[_run_from_dataset],_feed_data->[check_feed_shape_type,_as_lodtensor],_run_inference->[run],infer_from_dataset->[_run_from_dataset]],scope_guard->[_switch_scope],check_feed_shape_type->[dimension_is_compatible_with,dtype_is_compatible_with],_fetch_var->[global_scope,as_numpy],as_numpy->[as_numpy]]
Creates a class which can be used to handle the n - tuple of the n - tuple Run the main program once and only once.
device would set to -> device would be set to
@@ -254,7 +254,7 @@ class StubConv(StubWeightBiasLayer): keras_layer.set_weights((self.weights[0].T, self.weights[1])) def size(self): - return self.filters * self.kernel_size * self.kernel_size + self.filters + return (self.input_channel * self.kernel_size * self.kernel_size + 1) * self.filters @abstractmethod def to_real_layer(self):
[StubConv->[export_weights_keras->[set_weights],import_weights_keras->[get_weights,set_weights]],StubGlobalPooling3d->[to_real_layer->[GlobalAvgPool3d]],StubDense->[export_weights_keras->[set_weights],import_weights_keras->[get_weights,set_weights]],StubGlobalPooling1d->[to_real_layer->[GlobalAvgPool1d]],to_real_keras_layer->[keras_dropout],set_stub_weight_to_torch->[export_weights],StubWeightBiasLayer->[export_weights_keras->[set_weights],import_weights->[set_weights],import_weights_keras->[get_weights,set_weights]],set_stub_weight_to_keras->[export_weights_keras],layer_width->[is_layer],StubGlobalPooling2d->[to_real_layer->[GlobalAvgPool2d]],TorchFlatten->[forward->[size]],set_torch_weight_to_stub->[import_weights],set_keras_weight_to_stub->[import_weights_keras],StubBatchNormalization->[import_weights->[set_weights]]]
Returns the size of the kernel and filters.
why we change this?
@@ -54,7 +54,7 @@ examples = dict( ... training_frame=cars) >>> cars_gbm.auc() """, - keep_cross_validation_models=""" + keep_cross_validation_models=""" >>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv") >>> cars["economy_20mpg"] = cars["economy_20mpg"].asfactor() >>> predictors = ["displacement","power","weight","acceleration","year"]
[update_param->[param],dict]
Train and train a H2O gradient boosting estimator. Train and predict a CARS with a cross - validation model.
This is now indented 3 spaces instead of 4. All others are indented 4 spaces.
@@ -44,6 +44,16 @@ var ( // KubeConfig is the key to the kubeconfig const KubeConfig = "kubeconfig" +func init() { + // enable protobuf for Gardener API for controller-runtime clients + protobufSchemeBuilder := runtime.NewSchemeBuilder( + gardenercorescheme.AddToScheme, + settingsscheme.AddToScheme, + ) + + utilruntime.Must(apiutil.AddToProtobufScheme(protobufSchemeBuilder.AddToScheme)) +} + // NewClientFromFile creates a new Client struct for a given kubeconfig. The kubeconfig will be // read from the filesystem at location <kubeconfigPath>. If given, <masterURL> overrides the // master URL in the kubeconfig.
[RESTClient,ClientConfig,RawConfig,DiscoverVersion,CompareVersions,New,NewForConfig,NewClientConfigFromBytes,Discovery,Errorf,NewNonInteractiveDeferredLoadingClientConfig,Get,InClusterConfig]
NewClientFromFile creates a new client object for the given master URL and kubeconfig. clientConfig returns a client config for the command.
What about the new seedmanagement scheme?
@@ -63,7 +63,10 @@ export default (state = initialState, action) => { // Actions export const handleRegister = (login, email, password, langKey = 'en') => ({ type: ACTION_TYPES.CREATE_ACCOUNT, - payload: axios.post('/api/register', { login, email, password, langKey }) + payload: axios.post('/api/register', { login, email, password, langKey }), + meta : { + successMessage: <% if (enableTranslation) { %>'register.messages.success'<% } else { %>translate('register.messages.success')<% } %> + } }); export const reset = () => ({
[No CFG could be retrieved]
Create or update a .
same here, having the key for translation and translate for noi18n looks weird
@@ -612,6 +612,7 @@ class Processor Logger::log('Updating profile for ' . $activity['object_id'], Logger::DEBUG); APContact::getByURL($activity['object_id'], true); +// Contact::updateFromProbe($activity['object_id'], $network = '', $force = false) } /**
[No CFG could be retrieved]
Updates the profile for the given activity.
Do you plan on removing this line at some point?
@@ -385,6 +385,16 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) { g.AddLinuxGIDMapping(uint32(0), uint32(0), uint32(1)) } } + + for _, i := range c.config.Spec.Linux.Namespaces { + if i.Type == spec.UTSNamespace { + hostname := c.Hostname() + g.SetHostname(hostname) + g.AddProcessEnv("HOSTNAME", hostname) + break + } + } + if c.config.UTSNsCtr != "" { if err := c.addNamespaceContainer(&g, UTSNS, c.config.UTSNsCtr, spec.UTSNamespace); err != nil { return nil, err
[generateSpec->[getUserOverrides],checkpoint->[exportCheckpoint,checkpointRestoreSupported,checkpointRestoreLabelLog],restore->[checkpointRestoreSupported,prepare,importCheckpoint,checkpointRestoreLabelLog]]
generateSpec generates a spec from the container s config and the container s state. Add mount options to container This function is called by the container init code to set the correct process environment variables and setup addNamespaceContainer creates a namespace container in the global namespace.
Shouldn't we check for path here? To see if a path to the UTS namespace was given?
@@ -211,6 +211,10 @@ module Engine @game.purchasable_companies(entity) end + def purchasable_unsold_companies + @game.companies.select { |c| c.owner.nil? } + end + def get_par_prices(entity, _corp) @game .stock_market
[BuySellParShares->[sell_shares->[can_sell?],can_ipo_any?->[can_buy?],can_buy_any_from_market?->[can_buy?],can_sell_any?->[can_sell?],can_buy_any_from_ipo?->[can_buy?],can_buy_any?->[can_buy_any_from_ipo?,can_buy_any_from_market?],pass!->[pass!],purchasable_companies->[purchasable_companies]]]
Returns an array of the companies that can be purchased from other players.
companies.reject{ |c| c.owner }
@@ -37,6 +37,14 @@ func getCleanInTlfPath(p *parsedPath) cleanInTlfPath { return cleanInTlfPath(path.Clean(p.rawInTlfPath)) } +func getParentPath(p cleanInTlfPath) (parent cleanInTlfPath, ok bool) { + lastSlashIndex := strings.LastIndex(string(p), "/") + if lastSlashIndex <= 0 { + return "", false + } + return p[:lastSlashIndex], true +} + type debouncedNotify struct { notify func() shutdown func()
[LocalChange->[nodeChangeLocked],SubscribePath->[subscribePath],subscribePath->[registerForChangesLocked,checkSubscriptionIDLocked],SubscribeNonPath->[subscribeNonPath],Unsubscribe->[unsubscribeNonPath,unsubscribePath],BatchChanges->[nodeChangeLocked],unsubscribePath->[unregisterForChangesLocked],subscribeNonPath->[checkSubscriptionIDLocked]]
type is a function that returns a message that can be sent to a channel when a debounce returns a notify channel that will be sent when a new is received.
Wouldn't `path.Split()` do this same thing, and you can just check for a `""` parent instead of using a bool?
@@ -116,13 +116,14 @@ public class MetricsPluginTest extends ActiveMQTestBase { assertThat(artemisMetrics, containsInAnyOrder( // artemis.(un)routed.message.count is present twice, because of activemq.notifications address - new Metric("artemis.address.memory.usage", "Memory used by all the addresses on broker for in-memory messages", 0.0), + new Metric("artemis.address.memory.usage", "Bytes used by all the addresses on broker for in-memory messages", 0.0), new Metric("artemis.connection.count", "Number of clients connected to this server", 1.0), new Metric("artemis.consumer.count", "number of consumers consuming messages from this queue", 0.0), new Metric("artemis.delivering.durable.message.count", "number of durable messages that this queue is currently delivering to its consumers", 0.0), new Metric("artemis.delivering.durable.persistent.size", "persistent size of durable messages that this queue is currently delivering to its consumers", 0.0), new Metric("artemis.delivering.message.count", "number of messages that this queue is currently delivering to its consumers", 0.0), new Metric("artemis.delivering.persistent_size", "persistent size of messages that this queue is currently delivering to its consumers", 0.0), + new Metric("artemis.disk.store.usage", "Memory used by the disk store", 0.0), new Metric("artemis.durable.message.count", "number of durable messages currently in this queue (includes scheduled, paged, and in-delivery messages)", 0.0), new Metric("artemis.durable.persistent.size", "persistent size of durable messages currently in this queue (includes scheduled, paged, and in-delivery messages)", 0.0), new Metric("artemis.message.count", "number of messages currently in this queue (includes scheduled, paged, and in-delivery messages)", 0.0),
[MetricsPluginTest->[testForBasicMetricsPresenceAndValue->[toString],testForArtemisMetricsPresence->[Metric->[equals->[equals]],Metric],setUp->[setUp]]]
Tests for presence of a missing metric in the Artemis metrics. Returns a metric with all the metrics that are currently registered. This metric is used to collect metrics from the queue.
I think this should be Bytes as well... can you change the description as well?
@@ -826,7 +826,8 @@ def _handle_exception(exc_type, exc_value, trace, args): """ logger.debug( - "Exiting abnormally:\n%s", + "Exiting abnormally:%s%s", + os.linesep, "".join(traceback.format_exception(exc_type, exc_value, trace))) if issubclass(exc_type, Exception) and (args is None or not args.debug):
[_plugins_parsing->[add,add_group,add_plugin_args],revoke->[revoke,_determine_account],auth->[_init_le_client,_find_domains],_create_subparsers->[add_subparser,add_argument,flag_default],HelpfulArgumentParser->[add_plugin_args->[add_group],add->[add_argument],__init__->[SilentParser,flag_default]],SilentParser->[add_argument->[add_argument]],install->[_init_le_client,_find_domains],run->[_find_duplicative_certs,_find_domains,_init_le_client],create_parser->[HelpfulArgumentParser,flag_default,add,config_help,add_group],main->[_setup_logging,create_parser],_paths_parser->[config_help,add,add_group,flag_default],_init_le_client->[_determine_account],rollback->[rollback],main]
Logs exceptions and reports them to the user.
i'm starting to feel that this (`os.linesep`) becomes an antipattern
@@ -49,8 +49,9 @@ var ( }, } searchDescription = ` - Search registries for a given image. Can search all the default registries or a specific registry. - Can limit the number of results, and filter the output based on certain conditions.` + Search registries for a given image. Will search all the default registries in /etc/containers/registries.conf. + To search only a specific registry use the --registry flag. Can limit the number of results, and filter the output + based on certain conditions.` searchCommand = cli.Command{ Name: "search", Usage: "search registry for image",
[headerMap->[Field,ValueOf,ToUpper,NumField,Type,Indirect],StringSlice,BoolT,headerMap,Writer,IsSet,Atoi,Int,Args,GetSystemContext,SearchRegistry,GetRuntime,Errorf,Bool,Wrapf,GetInsecureRegistries,Join,Contains,GetRegistries,Shutdown,Split,Out,String,Replace,TODO]
"parse" - > parse the input of the searchCmd is a wrapper for the libcompose ethernet - find command.
There's no "default" registries in /etc/containers/registries.conf. Perhaps "Will search all the registries listed in the "registires.search" table in /etc/containers.registries.conf"
@@ -165,7 +165,13 @@ module ProtocolsIoHelper end def step_hash_null?(step_json) - step_json.dig(0, 'components', 0, 'component_type_id').nil? + return false unless step_json.dig( + 0, 'components', 0, 'component_type_id' + ).nil? + return false unless step_json.dig( + 0, 'components', '0', 'component_type_id' + ).nil? + true end # Images are allowed in:
[protocols_io_fill_desc->[prepare_for_view],pio_stp_17->[prepare_for_view],pio_stp_1->[prepare_for_view],pio_stp->[fill_attributes],prepare_for_view->[pio_eval_len,string_html_table_remove,not_null],fill_attributes->[pio_eval_title_len,prepare_for_view],pio_stp_6->[pio_eval_title_len],protocols_io_fill_step->[protocols_io_fill_desc,not_null,pio_stp_17,pio_stp_1,pio_stp,pio_stp_6,protocolsio_string_to_table_element,protocols_io_guid_reorder_step_json]]
Checks if the step_json contains a missing step_hash or not.
You can make it shorter: `step_json.dig(0, 'components', 0, 'component_type_id').nil? ||` `step_json.dig(0, 'components', '0', 'component_type_id').nil?`
@@ -45,7 +45,11 @@ public class Connect extends AbstractCommand { try { String connectionString = commandLine.getArguments().get(0).getValue(); Connection connection = ConnectionFactory.getConnection(connectionString); - connection.connect(context); + String password = null; + if (connection.needsCredentials()) { + password = new String(context.getOutputAdapter().secureReadln("Password: ")); + } + connection.connect(context, password); context.setConnection(connection); } catch (Exception e) { context.error(e);
[Connect->[isAvailable->[isConnected],execute->[getValue,error,connect,disconnect,setConnection,getConnection,isConnected]]]
Execute the connection.
Is an explicit copy of the String required here?
@@ -14,7 +14,7 @@ if ($_SESSION['userlevel'] < 10 || $_SESSION['userlevel'] > 10) { $delete_username = dbFetchCell('SELECT username FROM users WHERE user_id = ?', array($vars['id'])); if ($vars['confirm'] == 'yes') { - if (deluser($delete_username)) { + if (deluser($vars['id']) >= 0) { print_message('<div class="infobox">User "'.$delete_username.'" deleted!'); } else { print_error('Error deleting user "'.$delete_username.'"!');
[No CFG could be retrieved]
Generate the HTML for a single user identifier. The authentication module shows a hidden field that shows the user ID.
Why not put the >= 0 check inside the function so it just returns a boolean?
@@ -256,7 +256,10 @@ public class GroupByMergingQueryRunnerV2 implements QueryRunner<Row> public AggregateResult call() { try ( + // These variables are used to close releasers automatically. + @SuppressWarnings("unused") Releaser bufferReleaser = mergeBufferHolder.increment(); + @SuppressWarnings("unused") Releaser grouperReleaser = grouperHolder.increment() ) { final AggregateResult retVal = input.run(queryPlusForRunners, responseContext)
[GroupByMergingQueryRunnerV2->[run->[cleanup->[close],make->[close->[close],close,get],run],waitForFutureCompletion->[get]]]
run a group by query with a specific context Get a sequence of rows from the mergeBufferPool. The main method that is called when a segment is unmapped. Returns a new iterator that can be used to iterate over the resources.
IMO, `//noinspection unused` is easier on the eyes. But this is personal preference.
@@ -152,7 +152,8 @@ static int construct_from_text(OSSL_PARAM *to, const OSSL_PARAM *paramdef, } } - *to = *paramdef; + to->key = template->key; + to->data_type = template->data_type; to->data = buf; to->data_size = buf_n; to->return_size = 0;
[int->[strncpy,CRYPTOerr,OPENSSL_hexstr2buf_ex,memcpy,strncmp,BN_sub_word,BN_bn2nativepad,strlen,BN_hex2bn,BN_dec2bn,BN_is_negative,OSSL_PARAM_locate_const,BN_num_bytes],OSSL_PARAM_allocate_from_text->[OPENSSL_zalloc,CRYPTOerr,OPENSSL_free,BN_free,construct_from_text,prepare_from_text]]
This function is called from the constructor of the BN_ASN. 1.
`unsigned int` for flags perhaps? We usually avoid signed flags.