patch stringlengths 18 160k | callgraph stringlengths 4 179k | summary stringlengths 4 947 | msg stringlengths 6 3.42k |
|---|---|---|---|
@@ -1586,6 +1586,14 @@ namespace ProtoScript.Runners
ResetForDeltaExecution();
runnerCore.Options.ApplyUpdate = true;
Execute(true);
+
+ // Execute() will push a stack frame in SetupAndBounceStackFrame().
+ // In normal execution, that stack frame will pop in RETB. But in
+ // ApplyUpdate(), there is no RETB instruciton, so need to manually
+ // cleanup stack frame.
+ StackValue restoreFramePointer = runtimeCore.RuntimeMemory.GetAtRelative(ProtoCore.DSASM.StackFrame.kFrameIndexFramePointer);
+ runtimeCore.RuntimeMemory.FramePointer = (int)restoreFramePointer.IntegerValue;
+ runtimeCore.RuntimeMemory.PopFrame(ProtoCore.DSASM.StackFrame.kStackFrameSize);
}
ForceGC();
}
| [LiveRunner->[CompileAndExecute->[Compile,Execute],ApplyUpdate->[Execute,ApplyUpdate,ForceGC],Execute->[ReInitializeLiveRunner,SetupRuntimeCoreForExecution],CompileAndExecuteForDeltaExecution->[ResetForDeltaExecution,CompileAndExecute,PostExecution],SynchronizeInternal->[CompileAndExecuteForDeltaExecution,UpdateCachedASTFromSubtrees,GetDeltaASTList,ResetForDeltaExecution,Apply],ResetVMAndResyncGraph->[ReInitializeLiveRunner,UpdateCmdLineInterpreter],ResetVMAndImportLibrary->[ReInitializeLiveRunner,UpdateCmdLineInterpreter],UpdateGraph->[UpdateGraph],PreviewInternal->[GetDeltaASTList,EstimateNodesAffectedByASTList],ReInitializeLiveRunner->[InitCore],ResetForDeltaExecution->[ResetForDeltaExecution],Dispose->[Dispose]],GraphSyncData->[ToString->[ToString]],Subtree->[ToString->[ToString]],ChangeSetComputer->[GetModifiedNodes->[HandleRedefinedLHS],EstimateNodesAffectedByASTList->[EstimateReachableGraphNodes],CompileToSSA->[CompileToSSA],GetDeltaASTList->[GetDeltaAstListDeleted,GetDeltaAstListModified,GetDeltaAstListAdded],SetNestedLanguageBlockASTGuids->[SetNestedLanguageBlockASTGuids],GetDeltaAstListModified->[SetNestedLanguageBlockASTGuids]]] | if any of the conditions are met then execute the update. | I hope the next exercise is to turn `kFrameIndexFramePointer` into `FrameIndexFramePointer` ;) |
@@ -105,6 +105,11 @@ public class SAMLConfiguration {
return Boolean.parseBoolean(Framework.getProperty(WANT_ASSERTIONS_SIGNED));
}
+ public static int getSkewTimeMillis() {
+ String skewTimeMs = Framework.getProperty(SKEW_TIME_MS);
+ return skewTimeMs != null ? Integer.parseInt(skewTimeMs) : DEFAULT_SKEW_TIME_MS;
+ }
+
/**
* Returns the {@link EntityDescriptor} for the Nuxeo Service Provider
*/
| [SAMLConfiguration->[getEntityDescriptor->[getEntityId],getSPSSODescriptor->[getAuthnRequestsSigned,getLoginBindings,getWantAssertionsSigned]]] | Get the entity descriptor. | At some point we must think about refactoring the SAML config to use a proper service + extension point instead of ad hoc system properties. Even using the configuration service would be much better. |
@@ -2,11 +2,11 @@
module PageData
class ForumPostsShowService
- def self.exec(page_params)
- new.exec(page_params)
+ def self.exec(current_user, page_params)
+ new.exec(current_user, page_params)
end
- def exec(page_params)
+ def exec(current_user, page_params)
{
post: ForumPost.find(page_params.dig("post", "id")),
comments: ForumComment.where(id: page_params["comments"].pluck("id"))
| [ForumPostsShowService->[exec->[find,dig,pluck,where,exec]]] | exec creates a new forum object and returns it. | Unused method argument - current_user. If it's necessary, use _ or _current_user as an argument name to indicate that it won't be used. |
@@ -25,10 +25,13 @@ import java.util.function.Supplier;
public class DefaultFlowProcessingStrategyFactory extends ProactorProcessingStrategyFactory {
@Override
- public ProcessingStrategy create(MuleContext muleContext) {
- return new DefaultFlowProcessingStrategy(() -> muleContext.getSchedulerService().cpuLightScheduler(),
- () -> muleContext.getSchedulerService().ioScheduler(),
- () -> muleContext.getSchedulerService().cpuIntensiveScheduler(),
+ public ProcessingStrategy create(MuleContext muleContext, String schedulersNamePrefix) {
+ return new DefaultFlowProcessingStrategy(() -> muleContext.getSchedulerService()
+ .cpuLightScheduler(config().withName(schedulersNamePrefix + ".cpuLite")),
+ () -> muleContext.getSchedulerService()
+ .ioScheduler(config().withName(schedulersNamePrefix + ".io")),
+ () -> muleContext.getSchedulerService().cpuIntensiveScheduler(config()
+ .withName(schedulersNamePrefix + ".cpuIntensive")),
scheduler -> scheduler.stop(muleContext.getConfiguration().getShutdownTimeout(),
MILLISECONDS),
muleContext);
| [DefaultFlowProcessingStrategyFactory->[create->[cpuIntensiveScheduler,cpuLightScheduler,stop,ioScheduler,getShutdownTimeout,DefaultFlowProcessingStrategy],DefaultFlowProcessingStrategy->[scheduleOverridePredicate->[isTransactionActive]]]] | Creates a new processing strategy that will stop the MuleContext if it is not already stopped. | Any constant that already exists that you can use for this? |
@@ -11,9 +11,8 @@ import (
// Event metadata constants. These keys are used within libbeat to identify
// metadata stored in an event.
const (
- EventMetadataKey = "_event_metadata"
- FieldsKey = "fields"
- TagsKey = "tags"
+ FieldsKey = "fields"
+ TagsKey = "tags"
)
var (
| [StringToPrint->[MarshalIndent,Sprintf],String->[Marshal,Sprintf],Clone->[Clone],Wrapf,Join,Sprintf,Do,New,DeepUpdate,Errorf,Split] | Common import imports and updates the EventMetadata fields and tags of a given object. Delete deletes the given key from the given map. | I assume this already has become obsolete in previous refactoring? |
@@ -371,13 +371,14 @@ func (m *podManager) teardown(req *cniserver.PodRequest) error {
}
}
+ errList := []error{}
if netnsValid {
hostVethName, _, podIP, err := getVethInfo(req.Netns, podInterfaceName)
if err != nil {
return err
}
- if err := m.ovs.TearDownPod(hostVethName, podIP); err != nil {
+ if err := m.ovs.TearDownPod(hostVethName, podIP, req.SandboxID); err != nil {
errList = append(errList, err)
}
}
| [ipamDel->[ExecPluginWithoutResult,Errorf],teardown->[ipamDel,IsNSorErr,TearDownPod,Infof,SyncHostports,NewAggregate,getRunningPods,V],ipamAdd->[ExecPluginWithResult,Errorf,GetResult],update->[UpdatePod,GetVNID],setup->[LinkByName,InterfaceByIndex,ConfigureIface,Warningf,OpenPodHostportsAndSync,WithNetNSPath,LinkSetUp,SetUpPod,NewResultFromResult,GetVNID,Errorf,SetupVeth,ConstructPodPortMapping,getRunningPods,Convert_api_Pod_To_v1_Pod,Get,ipamDel,EnsureVNIDRules,SyncHostports,ipamAdd,SetHWAddrByIP,Core,String,Pods],LinkByName,LinkByIndex,Value,NewTransaction,Fd,RouteList,GetNS,Close,SetPodBandwidth,LinkAdd,LinkSetUp,New,SetLink,Errorf,Attrs,Do,FormatUint,AddrList,EndTransaction,ExtractPodBandwidthResources,String,NsFd] | teardown is called when a pod is being destroyed. | req.containerId should already be the pod sandbox ID... I'm pretty sure we don't need to re-request it, since at Teardown time the runtime is calling us directly with the sandboxID. |
@@ -993,7 +993,11 @@ def call_subprocess(
def write_output(msg, *args):
# type: (str, str) -> None
- logger.info(msg, *args)
+ command_logger.info(msg, *args)
+ if args == ():
+ print(msg)
+ else:
+ print(msg % args)
def _make_build_dir(build_dir):
| [hide_value->[HiddenText],hide_url->[redact_auth_from_url,HiddenText],get_installed_distributions->[editables_only_test->[dist_is_editable],editable_test->[dist_is_editable],user_test,editables_only_test,local_test,editable_test],captured_stdout->[captured_output],unzip_file->[ensure_dir,has_leading_dir,split_leading_dir,current_umask],redact_netloc->[split_auth_from_netloc],unpack_file->[untar_file,unzip_file,file_contents,is_svn_page],split_auth_netloc_from_url->[_transform_url],captured_stderr->[captured_output],dist_location->[egg_link_path],ask_input->[_check_no_input],normalize_version_info->[cast],make_subprocess_output_error->[path_to_display,format_command_args],untar_file->[ensure_dir,has_leading_dir,split_leading_dir,current_umask],dist_in_site_packages->[normalize_path],call_subprocess->[reveal_command_args,make_subprocess_output_error,format_command_args],captured_output->[from_stream],_get_netloc->[split_auth_from_netloc],is_local->[normalize_path],dist_is_local->[is_local],rmtree->[rmtree],parse_netloc->[build_url_from_netloc],remove_auth_from_url->[_transform_url],redact_auth_from_url->[_transform_url],has_leading_dir->[split_leading_dir],ask->[_check_no_input],splitext->[splitext],dist_in_usersite->[normalize_path],_redact_netloc->[redact_netloc],ask_password->[_check_no_input]] | Write a message to the output file. | I would `print()` before logging since printing is more important. Also, you should do this in a way that lets you have one `print()` statement. Like, won't `print(msg % args)` work also if `args` is empty? |
@@ -31,7 +31,11 @@ class ChatChannelsController < ApplicationController
def update
ChatChannelUpdateService.new(@chat_channel, chat_channel_params).update
- flash[:settings_notice] = "Channel Settings Updated."
+ if @chat_channel.valid?
+ flash[:settings_notice] = "Channel Settings Updated."
+ else
+ flash[:settings_notice] = "#{@chat_channel.errors.full_messages}"
+ end
redirect_to "/chat_channel_memberships/#{@chat_channel.mod_memberships.where(user_id: current_user.id).first.id}/edit"
end
| [ChatChannelsController->[create->[create],open->[update],update->[update]]] | chat_channel_update_nack where the user has seen a node in the group. | I think it makes more sense to rely on the result of `update` call instead of `@chat_channel.valid?` . In some cases the channel could be valid but still not updated for other reasons. |
@@ -1459,7 +1459,7 @@ class ArchiveChecker:
cdata = repository.get(some_chunkid)
return key_factory(repository, cdata)
- def verify_data(self):
+ def do_verify_data(self):
logger.info('Starting cryptographic data integrity verification...')
chunks_count_index = len(self.chunks)
chunks_count_segments = 0
| [ChunksProcessor->[maybe_checkpoint->[write_part_file,info],write_part_file->[write_checkpoint,add_item,as_dict],process_file_chunks->[maybe_checkpoint,show_progress,chunk_processor]],ArchiveChecker->[finish->[info],rebuild_manifest->[valid_msgpacked_dict,valid_archive,info],verify_data->[delete,info],orphan_chunks_check->[delete,info],rebuild_refcounts->[mark_as_possibly_superseded->[add],verify_file_chunks->[mark_as_possibly_superseded,add_reference,replacement_chunk,info],robust_iterator->[valid_item->[list_keys_safe],report,valid_item,resync,RobustUnpacker,feed],mark_as_possibly_superseded,verify_file_chunks,info,robust_iterator,ChunkBuffer,add_reference,add,as_dict,flush],check->[load,info]],BackupIO->[__exit__->[BackupOSError]],DownloadPipeline->[unpack_many->[preload->[preload],preload]],FilesystemObjectProcessors->[process_file->[stat_simple_attrs,OsOpen,update,process_file_chunks,stat_ext_attrs,is_special,stat_update_check,backup_io_iter,create_helper],process_stdin->[add_item,backup_io_iter,process_file_chunks],process_fifo->[OsOpen,update,stat_attrs,stat_update_check,create_helper],process_dir->[stat_attrs,create_helper,update],__init__->[Statistics],process_symlink->[stat_attrs,create_helper,update],process_dev->[stat_attrs,stat_update_check,create_helper,update],create_helper->[add_item]],Statistics->[show_progress->[as_dict,update],__add__->[Statistics]],stat_update_check->[BackupError],RobustUnpacker->[__next__->[valid_msgpacked_dict,feed],feed->[feed]],ChunkBuffer->[add->[as_dict],flush->[write_chunk]],Archive->[iter_items->[item_filter,unpack_many],compare_archives_iter->[update_hardlink_masters->[is_hardlink_master],defer_if_necessary->[update_hardlink_masters,hardlink_master_seen],compare_items->[fetch_many,has_hardlink_master],iter_items,hardlink_master_seen,update_hardlink_masters,defer_if_necessary,compare_items],load->[_load_meta],info->[as_dict,update],add_item->[show_progress,add],extract_item->[make_parent,BackupError,fetch_many,IncompatibleFilesystemEncodingError,extract_helper,flush],__init__->[DoesNotExist,CacheChunkBuffer,DownloadPipeline,Statistics,AlreadyExists],save->[AlreadyExists,update,as_dict,flush],calc_stats->[add->[add],Statistics,add],delete->[chunk_decref->[ChunksIndexError,fetch_async_response,chunk_decref],fetch_async_response,chunk_decref],set_meta->[_load_meta,as_dict],rename->[AlreadyExists,set_meta]],MetadataCollector->[stat_attrs->[stat_simple_attrs,stat_ext_attrs,update]],ArchiveRecreater->[iter_chunks->[fetch_many],matcher_add_tagged_dirs->[exclude,iter_items,add],create_target->[ChunksProcessor],chunk_processor->[add],create_target_archive->[Archive],open_archive->[Archive],recreate->[is_temporary_archive,save],process_items->[iter_items,item_is_hardlink_master,show_progress],save->[delete,save,rename,Statistics],process_item->[add_item],process_chunks->[process_file_chunks]],BackupIO] | Identify a key in the repository. Check if the chunks are missing and remove them from the repository. Get - The next defect chunk in defect_chunks. | why did you rename this? |
@@ -617,7 +617,7 @@ namespace Dynamo.Models
}
catch (Exception ex)
{
- Logger.Log(ex.Message);
+ Logger.Log(ex.Message);
}
ExtensionManager.Add(ext);
| [DynamoModel->[InitializeNodeLibrary->[InitializeIncludedNodes],ForceRun->[ResetEngine],Paste->[Paste,Copy],RemoveWorkspace->[Dispose],UngroupModel->[DeleteModelInternal],ResetEngine->[ResetEngine],ShutDown->[OnShutdownCompleted,OnShutdownStarted,ShutDown],DumpLibraryToXml->[DumpLibraryToXml],ResetEngineInternal->[RegisterCustomNodeDefinitionWithEngine,Dispose],AddWorkspace->[OnWorkspaceSaved],AddZeroTouchNodeToSearch->[AddZeroTouchNodeToSearch],OpenFileFromPath->[OnWorkspaceOpening,ResetEngine],DeleteModelInternal->[Dispose],AddHomeWorkspace->[RegisterHomeWorkspace],Dispose->[Dispose]]] | Creates a new library and loads all extensions. RemoveExtension - removes extension from the extension manager and removes it from the list of extensions. | seems there a lot of line endings changes that make tracking the real changes more difficult. Any idea whats going on there? |
@@ -32,7 +32,7 @@ func NewAnalyzer(host Host, ctx *Context, name tokens.QName) (Analyzer, error) {
_, path, err := workspace.GetPluginPath(
workspace.AnalyzerPlugin, strings.Replace(string(name), tokens.QNameDelimiter, "_", -1), nil)
if err != nil {
- return nil, err
+ return nil, rpcerrors.Convert(err)
} else if path == "" {
return nil, NewMissingError(workspace.PluginInfo{
Kind: workspace.AnalyzerPlugin,
| [Close->[Close],Analyze->[label,Analyze],GetPluginInfo->[label,GetPluginInfo]] | NewAnalyzer creates a new instance of an Analyzer. Name returns a base label for analyzing a single resource object. | It wasn't evident to me why we are converting errors here, since they aren't from RPCs, especially since there are in fact errors we don't convert (e.g., return from `semver.ParseTolerant` below). (Maybe will become more obvious as I read on...) |
@@ -68,7 +68,7 @@ public class RandomStatsDetails implements Serializable {
double sumOfSquaredMeanDeviations = 0;
// TODO: does this need to be updated to take data.getDiceSides() ?
for (int i = 1; i <= diceSides; i++) {
- sumOfSquaredMeanDeviations += (stats.getInt(i) - (localTotal / diceSides))
+ sumOfSquaredMeanDeviations += (double) (stats.getInt(i) - (localTotal / diceSides))
* (stats.getInt(i) - (localTotal / diceSides));
}
variance = sumOfSquaredMeanDeviations / (localTotal - 1);
| [RandomStatsDetails->[getAllStats->[getStatsDisplay],getAllStatsString->[getStatsString]]] | Get the DiceStatistic for the given statistics. | @ssoloff Yes and no. The code snippet you presented was one of them, but this is another occurence. |
@@ -209,6 +209,9 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
}
return &clientSwaggerSchema{client, api.Scheme}, nil
}
+ if filenameFlag := flags.Lookup("filename"); filenameFlag != nil && filenameFlag.Changed {
+ return validation.FilenameInputSchema{}, nil
+ }
return validation.NullSchema{}, nil
},
DefaultNamespace: func() (string, error) {
| [ValidateBytes->[ValidateBytes,Raw,NewSwaggerSchemaFromBytes,DataVersionAndKind,Do,AbsPath,Get],BindFlags->[AddPFlagSetToPFlagSet,Lookup,VisitAll,AddFlag,AddFlagSetToPFlagSet,Bool,BoolVar],ClientMapperForCommand->[RESTClient,ClientMapperFunc],PrintObject->[Object,PrintObj,RESTMapping,ObjectVersionAndKind,PrinterForMapping],PrinterForMapping->[ClientConfig,Printer,NewVersionedPrinter,Errorf],StringVar,NewAccessor,ClientConfigForVersion,ClientForVersion,SetNormalizeFunc,NewScalerClient,ScalerFor,Itoa,NewInteractiveDeferredLoadingClientConfig,BindOverrideFlags,NewHumanReadablePrinter,Errorf,Namespace,Labels,Lookup,RecommendedConfigOverrideFlags,NewFlagSet,DescriberFor,ClientConfig,MakeLabels,Kind,NewDefaultClientConfigLoadingRules,String,ReaperFor] | BindFlags is the default function that returns a new object that can be used to create a Hack for global access to validation flag. | This forces filename validation on all objects processed (users can pass filename and something else). Are there any scenarios in which this would break a command that uses resource builder multiple times in a row? |
@@ -56,7 +56,7 @@ class AutotestRunJob < ApplicationJob
retry
end
- def enqueue_test_run(test_run, host_with_port, test_group_ids, test_specs_name, hooks_script_name, ssh = nil)
+ def enqueue_test_run(test_run, host_with_port, test_categories, ssh = nil)
params_file = nil
grouping = test_run.grouping
assignment = grouping.assignment
| [AutotestRunJob->[enqueue_test_run->[export_group_repo],perform->[enqueue_test_run]]] | get_api_key returns the api key of the current user or create a new Creates a temp directory and runs the command if the server is running locally or remotely with find node by id. | Metrics/MethodLength: Method has too many lines. [55/20]<br>Metrics/PerceivedComplexity: Perceived complexity for enqueue_test_run is too high. [8/7] |
@@ -277,6 +277,10 @@ class SemanticAnalyzerPass2(NodeVisitor[None],
self.is_typeshed_stub_file = self.errors.is_typeshed_file(file_node.path)
self.globals = file_node.names
self.patches = patches
+ self.named_tuple_analyzer = NamedTupleAnalyzer(options, self)
+ self.typed_dict_analyzer = TypedDictAnalyzer(options, self, self.msg)
+ self.enum_call_analyzer = EnumCallAnalyzer(options, self)
+ self.newtype_analyzer = NewTypeAnalyzer(options, self, self.msg)
with experiments.strict_optional_set(options.strict_optional):
if 'builtins' in self.modules:
| [infer_condition_value->[infer_condition_value],make_any_non_explicit->[accept],SemanticAnalyzerPass2->[analyze_comp_for->[analyze_lvalue],build_newtype_typeinfo->[named_type],name_not_defined->[add_fixture_note,lookup_fully_qualified_or_none],check_classvar->[is_self_member_ref],visit_lambda_expr->[analyze_function],build_typeddict_typeinfo->[basic_new_typeinfo,str_type,object_type,named_type_or_none],visit_for_stmt->[fail_invalid_classvar,anal_type,is_classvar,store_declared_types,visit_block,visit_block_maybe,analyze_lvalue],add_symbol->[is_func_scope],visit_import_all->[dereference_module_cross_ref,add_submodules_to_parent_modules,process_import_over_existing_name,normalize_type_alias,correct_relative_import],basic_new_typeinfo->[object_type],visit_index_expr->[analyze_alias,add_type_alias_deps,alias_fallback,anal_type],accept->[accept],normalize_type_alias->[add_module_symbol],analyze_simple_literal_type->[named_type_or_none],anal_type->[type_analyzer],parse_typeddict_fields_with_types->[anal_type],is_class_scope->[is_func_scope],check_newtype_args->[anal_type],visit_with_stmt->[fail_invalid_classvar,anal_type,is_classvar,store_declared_types,visit_block,analyze_lvalue],analyze_types->[anal_type],process_typevar_parameters->[expr_to_analyzed_type,object_type],fail_blocker->[fail],visit_type_application->[anal_type],build_namedtuple_typeinfo->[named_type,add_field,add_method,object_type,make_init_arg,basic_new_typeinfo,str_type,named_type_or_none],visit_assignment_stmt->[anal_type],process_module_assignment->[process_module_assignment],analyze_try_stmt->[analyze_lvalue],alias_fallback->[object_type],build_enum_call_typeinfo->[basic_new_typeinfo,named_type_or_none],store_declared_types->[store_declared_types],parse_namedtuple_fields_with_types->[anal_type],visit_block_maybe->[visit_block],visit_member_expr->[dereference_module_cross_ref,normalize_type_alias],visit_while_stmt->[visit_block_maybe],apply_class_plugin_hooks->[get_fullname->[get_fullname],get_fullname],analyze_tuple_or_list_lvalue->[analyze_lvalue],correct_relative_import->[correct_relative_import],visit_cast_expr->[anal_type],lookup_qualified->[lookup,normalize_type_alias],check_and_set_up_type_alias->[analyze_alias,add_type_alias_deps,alias_fallback],is_valid_del_target->[is_valid_del_target],visit_if_stmt->[visit_block_maybe,visit_block],visit_import_from->[add_submodules_to_parent_modules],analyze_typeddict_classdef->[is_typeddict],visit__promote_expr->[anal_type],is_module_scope->[is_func_scope,is_class_scope],analyze_lvalue->[analyze_lvalue],check_classvar_in_signature->[check_classvar_in_signature]],calculate_class_mro->[fail],replace_implicit_first_type->[replace_implicit_first_type],mark_block_mypy_only->[accept],mark_block_unreachable->[accept]] | Run semantic analysis phase 2 over a file. Delete options patches and globals from node - cache. | There is a bit strange asymmetry between `NamedTupleAnanlyzer` and `TypedDictAnalyzer`, because only the latter needs `self.msg`. It is needed by `check_for_explicit_any`, and it is a bit surprising that it doesn't appear in `NamedTupleAnalyzer`. I think it would be good or explain this asymmetry. |
@@ -285,10 +285,8 @@ def _get_edf_info(fname, stim_channel, annot, annotmap, eog, misc, preload):
with open(fname, 'rb') as fid:
assert(fid.tell() == 0)
- fid.seek(8)
+ fid.seek(168) # Seek 8 + 80 bytes for Subject id + 80 bytes for rec id
- fid.read(80).strip().decode() # subject id
- fid.read(80).strip().decode() # recording id
day, month, year = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
hour, minute, sec = [int(x) for x in re.findall('(\d+)',
| [read_raw_edf->[RawEDF],_read_ch->[reshape,fromfile],_read_annot->[findall,int,zeros,zip,open,float],_get_edf_info->[_update_redundant,all,any,max,warn,array,info,enumerate,tell,RuntimeError,n_samps,_empty_info,int,dict,min,timegm,zeros,range,list,append,datetime,logical_or,len,read,isinstance,float,open,zip,index,findall,splitext,utctimetuple,logical_and,seek,ones,ravel],_parse_tal_channel->[findall,int,append,extend,decode,bytearray,ev,float],RawEDF->[__init__->[bool,abspath,_check_update_montage,super,warn,info,_get_edf_info],_read_segment_file->[,any,hstack,where,linspace,enumerate,resample,int,dict,empty,set,zeros,range,interp1d,cumsum,append,data,len,sorted,atleast_2d,concatenate,_blk_read_lims,open,_read_ch,_read_annot,NotImplementedError,arange,seek,bitwise_and,_parse_tal_channel,array]]] | Extracts all the information from the EDF + and BDF file. Reads an EDF annotation file and returns the EDF info. Pick a channel from the list of ch_names. Add missing components to the list of missing components. | Shouldn't we actually use these to populate entries in `info`? Or do we already get them from somewhere else? |
@@ -394,8 +394,14 @@ func (a *Auditor) auditLocked(m libkb.MetaContext, id keybase1.TeamID, headMerkl
}
last := lastAudit(history)
- if last != nil && last.MaxChainSeqno == maxChainSeqno {
- m.CDebugf("Short-circuit audit, since there is no new data (@%v)", maxChainSeqno)
+
+ // It's possible that we're bouncing back and forth between the Fast and Slow
+ // loader. Therefore, it might have been that we previous audited up to chainlink
+ // 20, and now we're seeing an audit only for link 18 (if one of them was stale).
+ // That's fine, just make sure to short-circuit as long as we've audited past
+ // the given maxChainSeqno.
+ if last != nil && last.MaxChainSeqno >= maxChainSeqno {
+ m.CDebugf("Short-circuit audit, since there is no new data (@%v <= %v)", maxChainSeqno, last.MaxChainSeqno)
return nil
}
| [lookupProbes->[lookupProbe],auditLocked->[getLRU,checkRecent,putToCache,doPreProbes,doPostProbes,getFromCache],OnLogout->[newLRU],getFromCache->[getFromDisk,getFromLRU]] | auditLocked is called when the auditing thread is locked. putToCache puts a lease in the cache. | Do we need to and are we asserting that the link 18 in round B is the same link as 18 was in round A? That may fall under the purview of FTL not auditor |
@@ -96,7 +96,9 @@ public class FileSizeCountTask implements ReconOmTask {
return new ImmutablePair<>(getTaskName(), false);
}
// Truncate table before inserting new rows
- dslContext.truncate(FILE_COUNT_BY_SIZE);
+ int execute = dslContext.delete(FILE_COUNT_BY_SIZE).execute();
+ LOG.info("Deleted {} records from {}", execute, FILE_COUNT_BY_SIZE);
+
writeCountsToDB(true, fileSizeCountMap);
LOG.info("Completed a 'reprocess' run of FileSizeCountTask.");
| [FileSizeCountTask->[handleDeleteKeyEvent->[getFileSizeCountKey],process->[getTaskName],FileSizeCountKey->[hashCode->[hashCode],equals->[equals]],getFileSizeCountKey->[getFileSizeUpperBound],getFileSizeUpperBound->[nextClosestPowerIndexOfTwo],handlePutKeyEvent->[getFileSizeCountKey]]] | This method is called when the task is run in the middle of a ReconTask. | Why does truncate not work? |
@@ -374,9 +374,9 @@ func processResponse(ctx context.Context, resp ReadBatch, chunkSet *ByKey, match
log.Debug("Dropping chunk for non-matching metric ", chunk.Metric)
continue
}
- *chunkSet = append(*chunkSet, chunk)
+ chunkSet = append(chunkSet, chunk)
}
- return nil
+ return chunkSet, nil
}
func (c *Store) fetchChunkData(ctx context.Context, chunkSet []Chunk) ([]Chunk, error) {
| [putChunk->[StoreChunk,PutChunk,Warnf],putChunks->[putChunk],Stop->[Stop],fetchChunkData->[decode,externalKey,GetChunk],updateIndex->[BatchWrite,calculateDynamoWrites],lookupMatchers->[GetReadEntriesForMetricLabel,Extract,GetReadEntriesForMetricLabelValue,ExtractMetricNameFromMatchers,GetReadEntriesForMetric,lookupEntries],RegisterFlags->[RegisterFlags],calculateDynamoWrites->[NewWriteBatch,externalKey,Observe,GetWriteEntries,ExtractMetricNameFromMetric,Add],writeBackCache->[BackgroundWrite,encode,externalKey],lookupEntry->[Errorf,QueryPages,Sort],Get->[Match,fetchChunkData,FetchChunkData,SplitFiltersAndMatchers,lookupMatchers,Warnf,writeBackCache,Sort,ErrStorage,Errorf],lookupEntries->[lookupEntry],Put->[putChunks,updateIndex,Extract,externalKey,encode],Match,Value,schemaFactory,NewHashBucketHistogram,Extract,Unmarshal,RangeValue,ExponentialBuckets,NewHistogram,Len,MustRegister,Debug] | fetchChunkData fetches the data from the storage and decodes it into chunks. | Need to sort & dedupe this set for the nWayIntersections above to work. |
@@ -42,6 +42,8 @@ class ElmoLstm(_EncoderBase):
:class:`~allennlp.modules.lstm_cell_with_projection.LstmCellWithProjection`.
num_layers : ``int``, required
The number of bidirectional LSTMs to use.
+ require_grad: ``bool``, optional
+ Compute gradients of model parameters?
recurrent_dropout_probability: ``float``, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
`A Theoretically Grounded Application of Dropout in Recurrent Neural Networks
| [ElmoLstm->[load_weights->[FloatTensor,cached_path,copy,transpose,zip,enumerate,File,copy_],forward->[new,size,append,sort_and_run_forward,cat,_update_states,Variable,index_select],_lstm_forward->[forward_layer,list,state,append,getattr,backward_layer,initial_state,cat,len,pad_packed_sequence,stack,format,zip,enumerate,ConfigurationError],__init__->[append,add_module,LstmCellWithProjection,super,format,range]]] | Creates a new instance of the class. Initialize the ElmoLstm object. | "If `True`, computes gradient of ELMo parameters, which is useful for fine tuning" or something. |
@@ -267,9 +267,7 @@ getent group daos_admins >/dev/null || groupadd -r daos_admins
%{_bindir}/cart_ctl
%{_bindir}/self_test
%{_bindir}/dmg
-%{_bindir}/dmg_old
%{_bindir}/daosctl
-%{_bindir}/dcont
%{_bindir}/daos_agent
%{_bindir}/dfuse
%{_bindir}/daos
| [No CFG could be retrieved] | %(files server - server. yml server - server. yml server - server. yml server Dasagent - agent that provides all Dasos agents. | Changes such as this that will remove a binary from the RPM package should update the Release number (near the top of the file) and provide a corresponding %changelog entry lower in the file. |
@@ -78,4 +78,6 @@ def main(args=None):
os.remove(temp_obj)
if args.header:
- compiler.emit_header(args.output)
+ print('*********')
+ print('WARNING: pycc --header has been disabled in this release due to a known issue')
+ #compiler.emit_header(args.output)
| [main->[write_native_object,remove,emit_header,get_ending,find_args,gettempdir,Compiler,find_linker,debug,add_argument,setLevel,ArgumentParser,parse_args,split,splitext,basename,getLogger,add_mutually_exclusive_group,write_llvm_bitcode,check_call],get_ending->[find_shared_ending]] | Entry point for the compiler. Emit a sequence of bits from the given binary. | I would rather raise / print an error and abort here. If people pass `--header`, they certainly need the file. |
@@ -125,11 +125,9 @@ class Task(metaclass=SignatureValidator):
- cache_key (str, optional): if provided, a `cache_key` serves as a unique identifier for this Task's cache, and can
be shared across both Tasks _and_ Flows; if not provided, the Task's _name_ will be used if running locally, or the
Task's database ID if running in Cloud
- - checkpoint (bool, optional, DEPRECATED): if this Task is successful, whether to
- store its result using the `result_handler` available during the run; defaults to the value of
- `tasks.defaults.checkpoint` in your user config. Note that in the future, all tasks with result handlers
- will be checkpointed. Also note that checkpointing will only occur locally if `prefect.config.flows.checkpointing` is
- set to `True`
+ - checkpoint (bool, optional): if this Task is successful, whether to
+ store its result using the `result_handler` available during the run; Also note that
+ checkpointing will only occur locally if `prefect.config.flows.checkpointing` is set to `True`
- result_handler (ResultHandler, optional): the handler to use for
retrieving and storing state results during execution; if not provided, will default to the
one attached to the Flow
| [Task->[copy->[copy],__ror__->[set_dependencies],__or__->[set_dependencies],set_dependencies->[set_dependencies],map->[copy,bind],set_upstream->[set_dependencies],__call__->[copy],bind->[bind],set_downstream->[set_dependencies]],SignatureValidator->[__new__->[__new__,_validate_run_signature]]] | The Task constructor is used to create a new object from a list of A function with signature f. | Should the changelog reflect that this is no longer deprecated? |
@@ -1493,6 +1493,12 @@ func (p *pg) CreateProject(ctx context.Context, project *v2.Project) (*v2.Projec
return nil, p.processError(err)
}
+ if addPolicies {
+ if err := p.addSupportPolicies(ctx, project, tx); err != nil {
+ return nil, p.processError(err)
+ }
+ }
+
err = tx.Commit()
if err != nil {
return nil, storage_errors.NewTxCommitError(err)
| [AddPolicyMembers->[notifyPolicyChange,queryPolicy],Reset->[Reset],CreateRole->[notifyPolicyChange],RemovePolicyMembers->[notifyPolicyChange,queryPolicy],recordMigrationStatusAndNotifyPG->[notifyPolicyChange],Success->[recordMigrationStatusAndNotifyPG],UpdateRole->[notifyPolicyChange],ListPolicyMembers->[queryPolicy],Close->[Close],SuccessBeta1->[recordMigrationStatusAndNotifyPG],DeleteRole->[notifyPolicyChange],ReplacePolicyMembers->[notifyPolicyChange,queryPolicy]] | CreateProject creates a new project in the database. | Create supporting policies within the same DB transaction. |
@@ -177,6 +177,9 @@ public class HoodieDeltaStreamer implements Serializable {
@Parameter(names = {"--table-type"}, description = "Type of table. COPY_ON_WRITE (or) MERGE_ON_READ", required = true)
public String tableType;
+ @Parameter(names = {"--base-file-format"}, description = "File format for the base files. PARQUET (or) HFILE", required = false)
+ public String baseFileFormat;
+
@Parameter(names = {"--props"}, description = "path to properties file on localfs or dfs, with configurations for "
+ "hoodie client, schema provider, key generator and data source. For hoodie client props, sane defaults are "
+ "used, but recommend use to provide basic things like metrics endpoints, hive configs etc. For sources, refer"
| [HoodieDeltaStreamer->[DeltaSyncService->[startService->[isAsyncCompactionEnabled],onInitializingWriteClient->[isAsyncCompactionEnabled],close->[close],getConfig],main->[sync,Config]]] | Config for a single partition of a hoodie table. Specifies the field within the source record to break ties between records with same key in input. | Similar functionality needs to be done for Spark SQL Writer. See HoodieSparkSqlWriter.scala |
@@ -55,8 +55,12 @@ function computeDesktopVideoSize( // eslint-disable-line max-params
videoHeight,
videoSpaceWidth,
videoSpaceHeight) {
- const aspectRatio = videoWidth / videoHeight;
+ if (videoWidth === 0 || videoHeight === 0 || videoSpaceWidth === 0 || videoSpaceHeight === 0) {
+ // Avoid NaN values caused by devision by 0.
+ return [ 0, 0 ];
+ }
+ const aspectRatio = videoWidth / videoHeight;
let availableWidth = Math.max(videoWidth, videoSpaceWidth);
let availableHeight = Math.max(videoHeight, videoSpaceHeight);
| [No CFG could be retrieved] | Computes the size of a single . The array is used to filter the video by the dimensions of the video. | does it make sense for videoSpaceWidth or videoSpaceHeight to be 0 if videoWidth and videoHeight are not? |
@@ -413,7 +413,16 @@ class _UnpickledSideInput(AsSideInput):
@staticmethod
def _from_runtime_iterable(it, options):
- return options['data'].view_fn(it)
+ access_pattern = options['data'].access_pattern
+ if access_pattern == common_urns.side_inputs.ITERABLE.urn:
+ raw_view = it
+ elif access_pattern == common_urns.side_inputs.MULTIMAP.urn:
+ raw_view = collections.defaultdict(list)
+ for k, v in it:
+ raw_view[k].append(v)
+ else:
+ raise ValueError('Unknown access_pattern: %s' % access_pattern)
+ return options['data'].view_fn(raw_view)
def _view_options(self):
return {
| [PCollection->[from_->[PCollection],to_runner_api->[PCollection],from_runner_api->[PCollection]],AsMultiMap->[_side_input_data->[SideInputData]],AsSideInput->[_side_input_data->[_view_options],to_runner_api->[_side_input_data],from_runner_api->[from_runner_api]],AsIter->[_side_input_data->[SideInputData]],SideInputData->[from_runner_api->[SideInputData]],_make_Row->[Row],PValue->[__or__->[apply],apply->[apply]],DoOutputsTuple->[__getitem__->[PCollection],__repr__->[_str_internal],__getattr__->[__getattr__],__str__->[_str_internal]],AsList->[_side_input_data->[SideInputData]],_UnpickledSideInput->[_view_options->[_windowed_coder]],AsDict->[_side_input_data->[SideInputData]]] | Return a object from the given iterable. | I'm not very familiar with this module; what's the rationale of putting the view construction logic here instead of the `view_fn` in `AsMultiMap._side_input_data()`? |
@@ -16,7 +16,7 @@ import javax.xml.stream.XMLStreamWriter;
* @since 9.0
*/
public final class XMLExtendedStreamWriterImpl implements XMLExtendedStreamWriter, XMLStreamConstants {
- private static final String NO_NAMESPACE = new String();
+ private static final String NO_NAMESPACE = "";
private final XMLStreamWriter delegate;
private final ArrayDeque<ArgRunnable> attrQueue = new ArrayDeque<>();
private int level;
| [XMLExtendedStreamWriterImpl->[writeCharacters->[runAttrQueue,indent,writeCharacters,nl],close->[close],writeStartDocument->[writeStartDocument,nl],setPrefix->[setPrefix],writeAttribute->[run->[writeAttribute],writeAttribute],writeEmptyElement->[writeEmptyElement,indent,nl],writeStartElement->[run->[writeStartElement],nestUnspecifiedNamespace,writeStartElement,indent,nl],writeDTD->[writeDTD,indent,nl],getNamespaceContext->[getNamespaceContext],writeEndElement->[writeEndElement,indent,run,nl],getPrefix->[getPrefix],setDefaultNamespace->[runAttrQueue,setDefaultNamespace],getProperty->[getProperty],runAttrQueue->[run],writeProcessingInstruction->[runAttrQueue,writeProcessingInstruction,indent,nl],writeDefaultNamespace->[run->[writeDefaultNamespace]],setNamespaceContext->[setNamespaceContext],writeCData->[runAttrQueue,writeCData],writeEndDocument->[writeEndDocument],writeEntityRef->[runAttrQueue,writeEntityRef],writeNamespace->[run->[writeNamespace]],flush->[flush]]] | Package private for testing purposes. Checks if there is a namespace in the list of namespaces. If so it is assumed that. | couple line below, `unspecifiedNamespaces` can be final. |
@@ -102,6 +102,17 @@ namespace Dynamo.Wpf.UI.GuidedTour
pointX3 = realWidth;
pointY3 = Height - PointerHeight / 2 - PointerVerticalOffset;
}
+ else if (direction == PointerDirection.BOTTOM_DOWN)
+ {
+ pointX1 = PointerHorizontalOffset;
+ pointY1 = Height + PointerDownHeight;
+
+ pointX2 = PointerDownWidth + PointerHorizontalOffset;
+ pointY2 = Height + PointerDownHeight;
+
+ pointX3 = PointerDownWidth / 2 + PointerHorizontalOffset;
+ pointY3 = realHeight - PointerHeight;
+ }
TooltipPointerPoints = new PointCollection(new[] { new Point(pointX1, pointY1),
new Point(pointX2, pointY2),
| [Tooltip->[DrawPointerDirection->[BOTTOM_RIGHT,TOP_LEFT,BOTTOM_LEFT,TOP_RIGHT],DrawPointerDirection,SetPointerDirection]] | Draws the pointer direction. TooltipPointerPoints - Tooltip pointer points. | HI @filipeotero Without the calculation, where the down pointer would be? |
@@ -36,7 +36,7 @@ var editorRemoveMarks = require( './decorator/tinyMCE' ).editorRemoveMarks;
* @param {string} editorID The ID of the tinyMCE editor.
*/
function isTinyMCEAvailable( editorID ) {
- if ( !isTinyMCELoaded() ) {
+ if ( !isTinyMCELoaded()) {
return false;
}
| [No CFG could be retrieved] | Gets the content of the TinyMCE element. Get the TinyMCE content from the editor. | please add a space: `if ( !isTinyMCELoaded() ) {` |
@@ -66,7 +66,11 @@ public class FlatMapNode extends PlanNode {
this.analysis = Objects.requireNonNull(analysis);
this.functionRegistry = functionRegistry;
this.finalSelectExpressions = buildFinalSelectExpressions();
- outputSchema = buildLogicalSchema(source.getSchema());
+ outputSchema = StreamFlatMapBuilder.buildSchema(
+ source.getSchema(),
+ analysis.getTableFunctions(),
+ functionRegistry
+ );
}
@Override
| [FlatMapNode->[buildFinalSelectExpressions->[getSelectExpressions],getPartitions->[getPartitions],getKeyField->[getKeyField]]] | Returns the output schema of the sequence. | This is just bridge code. Planning to drop this in a follow-up. We shouldn't need to get the schema from the logical plan nodes. |
@@ -15,6 +15,7 @@ module Idv
)
profile.encrypt_pii(pii_attributes, user_password)
profile.proofing_components = current_proofing_components_to_json
+ profile.reproof_at = reproof_at
profile.save!
profile
end
| [ProfileMaker->[save_profile->[save!,proofing_components,new,encrypt_pii],initialize->[user_password,user,pii_attributes,new_from_hash],current_proofing_components_to_json->[id,find_by,to_json],attr_reader,attr_writer,attr_accessor]] | Save a profile with a that can be found in the database. | High level flow step 5b: writing `reproof_at` to the Profile |
@@ -446,9 +446,13 @@ public abstract class AbstractTestQueryFramework
})
.findOnlyElement()
.getId();
- return runner.getCoordinator()
+
+ Supplier<QueryInfo> queryInfoSupplier = () -> runner.getCoordinator()
.getQueryManager()
- .getFullQueryInfo(queryId)
+ .getFullQueryInfo(queryId);
+ assertEventually(new Duration(5, SECONDS), () -> assertThat(queryInfoSupplier.get().isFinalQueryInfo()));
+
+ return queryInfoSupplier.get()
.getQueryStats()
.getOperatorSummaries()
.stream()
| [AbstractTestQueryFramework->[noJoinReordering->[noJoinReordering],computeActual->[getSession,computeActual],assertQueryFails->[getSession,assertQueryFails],assertTableColumnNames->[computeActual],assertQueryOrdered->[assertQueryOrdered,getSession,assertQuery],assertUpdate->[assertUpdate,getSession,assertQuery],getQueryExplainer->[getNodeCount],assertQueryEventually->[assertQueryEventually],assertAccessAllowed->[getSession,assertAccessAllowed],assertQuery->[getSession,assertQuery],assertQuerySucceeds->[getSession,assertQuerySucceeds],computeExpected->[getSession],assertQueryFailsEventually->[getSession,assertQueryFailsEventually],query->[query],searchScanFilterAndProjectOperatorStats->[getQueryRunner],getNodeCount->[getNodeCount],assertAccessDenied->[getSession,assertAccessDenied]]] | This method is used to retrieve the OperatorStats for a ScanFilterAndProjectOperator. | use `new Duration(5, SECONDS)`. |
@@ -426,12 +426,12 @@ public class HoodieTestDataGenerator {
/**
* Generates deduped updates of keys previously inserted, randomly distributed across the keys above.
*
- * @param commitTime Commit Timestamp
+ * @param instantTime Commit Timestamp
* @param n Number of unique records
* @return list of hoodie record updates
*/
- public List<HoodieRecord> generateUniqueUpdates(String commitTime, Integer n) {
- return generateUniqueUpdatesStream(commitTime, n).collect(Collectors.toList());
+ public List<HoodieRecord> generateUniqueUpdates(String instantTime, Integer n) {
+ return generateUniqueUpdatesStream(instantTime, n).collect(Collectors.toList());
}
/**
| [HoodieTestDataGenerator->[generateUpdates->[generateUpdateRecord],generateUpdateRecord->[generateRandomValue],generateInsertsWithHoodieAvroPayload->[generateAvroPayload],generateUpdatesWithHoodieAvroPayload->[generateAvroPayload],generateUniqueDeleteRecordStream->[generateRandomDeleteValue],generateUpdatesWithDiffPartition->[generateUpdateRecord],generateDeleteRecord->[generateDeleteRecord],generateRandomValue->[generateRandomValue],generateInserts->[generateInserts],generateUniqueUpdatesStream->[generateRandomValue],generateInsertsStream->[generateRandomValue],generateDeletes->[generateInserts],generateSameKeyInserts->[generateRandomValue],generateGenericRecord->[generateGenericRecord]]] | Generate a list of HoodieRecords that are unique for the given commit time. | here too.. can you make a second pass everywhere |
@@ -138,6 +138,13 @@ func resourceAwsApiGatewayIntegration() *schema.Resource {
Optional: true,
Computed: true,
},
+
+ "timeout_milliseconds": {
+ Type: schema.TypeInt,
+ Optional: true,
+ ValidateFunc: validateApiGatewayIntegrationTimeout,
+ Default: 29000,
+ },
},
}
}
| [StringMap,Difference,GetChange,GetIntegration,UpdateIntegration,StringValueMap,StringInSlice,Set,Code,NonRetryableError,GetOk,HasChange,Errorf,SetId,Id,Get,Printf,Sprintf,Unmarshal,Print,List,PutIntegration,String,Replace,DeleteIntegration,Retry] | The create function that creates an integration resource based on the provided resource data. Allocate a unique identifier for the resource. | Minor nitpick: We have a helper function for simple integer validation: `validation.IntBetween(50, 29000)` |
@@ -88,6 +88,7 @@ public class RocketsFireHelper {
private void fireWW2V2(final IDelegateBridge bridge, final PlayerID player, final Set<Territory> rocketTerritories) {
final GameData data = bridge.getData();
final Set<Territory> attackedTerritories = new HashSet<>();
+ final LinkedHashMap<Territory,Territory> attackingTerritories = new LinkedHashMap<>();
final boolean oneAttackPerTerritory = !isRocketAttacksPerFactoryInfinite(data);
for (final Territory territory : rocketTerritories) {
final Set<Territory> targets = getTargetsWithinRange(territory, data, player);
| [RocketsFireHelper->[fireWW2V2->[isRocketAttacksPerFactoryInfinite],fireRocket->[rocketMatch,isLimitRocketDamagePerTurn,isPUCap,isDamageFromBombingDoneToUnitsInsteadOfTerritories,isWW2V2,isLimitRocketDamageToProduction],getTargetsWithinRange->[isRocketsCanFlyOverImpassables],fireRockets->[isWW2V2,isAllRocketsAttack]]] | Fire WW2V2. | Left hand side should be the interface, not concrete type, ie: `HashMap< .. >` |
@@ -308,5 +308,17 @@ public class VoidMuleEvent implements MuleEvent
{
throw new UnsupportedOperationException();
}
+
+ @Override
+ public MessagingExceptionHandler getExceptionHandler()
+ {
+ return null;
+ }
+
+ @Override
+ public void setExceptionHandler(MessagingExceptionHandler exceptionHandler)
+ {
+ }
+
}
| [VoidMuleEvent->[setTimeout->[UnsupportedOperationException],getMessage->[UnsupportedOperationException],getEncoding->[UnsupportedOperationException],getFlowVariable->[UnsupportedOperationException],getFlowVariableNames->[UnsupportedOperationException],getExchangePattern->[UnsupportedOperationException],getMessageSourceName->[UnsupportedOperationException],captureReplyToDestination->[UnsupportedOperationException],getSession->[UnsupportedOperationException],transformMessage->[UnsupportedOperationException],getSessionVariable->[UnsupportedOperationException],getMessageSourceURI->[UnsupportedOperationException],getProcessingTime->[UnsupportedOperationException],getProperty->[UnsupportedOperationException],removeSessionVariable->[UnsupportedOperationException],isSynchronous->[UnsupportedOperationException],getTimeout->[UnsupportedOperationException],getCredentials->[UnsupportedOperationException],setFlowVariable->[UnsupportedOperationException],setEnableNotifications->[UnsupportedOperationException],isNotificationsEnabled->[UnsupportedOperationException],removeFlowVariable->[UnsupportedOperationException],getMessageAsBytes->[UnsupportedOperationException],isTransacted->[UnsupportedOperationException],getFlowConstruct->[UnsupportedOperationException],getMuleContext->[UnsupportedOperationException],clearSessionVariables->[UnsupportedOperationException],transformMessageToBytes->[UnsupportedOperationException],getReplyToDestination->[UnsupportedOperationException],isStopFurtherProcessing->[UnsupportedOperationException],setSessionVariable->[UnsupportedOperationException],getOutputStream->[UnsupportedOperationException],getMessageAsString->[UnsupportedOperationException],getReplyToHandler->[UnsupportedOperationException],transformMessageToString->[UnsupportedOperationException],setStopFurtherProcessing->[UnsupportedOperationException],setMessage->[UnsupportedOperationException],getId->[UnsupportedOperationException],getSessionVariableNames->[UnsupportedOperationException],clearFlowVariables->[UnsupportedOperationException],VoidMuleEvent]] | Sets whether notifications should be enabled for this notification. | What would happen with a filter that throws an exception when event is null? |
@@ -11,7 +11,12 @@
</div>
<div class="form-group">
<%= label_tag(:filter, "Category") %>
- <%= select_tag(:filter, options_for_select([""] + ClassifiedListing.categories_available.keys, params[:filter])) %>
+ <%= select_tag(:filter, options_for_select(ClassifiedListing.categories_available.keys, params[:filter]), include_blank: true) %>
+ </div>
+ <div class="form-group">
+ <%= label_tag(:published, "Published only") %>
+ <%= hidden_field_tag(:published, 0) %>
+ <%= check_box_tag(:published, 1, true) %>
</div>
<div class="form-group">
<%= submit_tag("Filter") %>
| [No CFG could be retrieved] | Displays a list of all ClassifiedListing objects that can be found in the system. Displays a table of the possible tag IDs for a listing. | Changed this to use the more standard `include_blank` option. |
@@ -126,9 +126,9 @@ public class TrashPolicyOzone extends TrashPolicyDefault {
Path trashRoot = this.fs.getTrashRoot(path);
String key = path.toUri().getPath();
- LOG.debug("Key path to moveToTrash: "+ key);
+ LOG.debug("Key path to moveToTrash: {}"+ key);
String trashRootKey = trashRoot.toUri().getPath();
- LOG.debug("TrashrootKey for moveToTrash: "+ trashRootKey);
+ LOG.debug("TrashrootKey for moveToTrash: {}"+ trashRootKey);
if (!OzoneFSUtils.isValidName(key)) {
throw new InvalidPathException("Invalid path Name " + key);
| [TrashPolicyOzone->[Emptier->[run->[TrashPolicyOzone]],moveToTrash->[moveToTrash],initialize]] | Move the given path to the trash. | this'll still result in an string concantenation. So change it to: `LOG.debug("Key path to moveToTrash: {}",key);` |
@@ -118,10 +118,14 @@ public class Cli implements Closeable, AutoCloseable {
try {
RestResponse restResponse = restClient.makeRootRequest();
if (restResponse.isErroneous()) {
+ KsqlErrorMessage ksqlError = restResponse.getErrorMessage();
+ if ((ksqlError.getErrorCode() / Errors.HTTP_TO_ERROR_CODE_MULTIPLIER)
+ == NOT_ACCEPTABLE.getStatusCode()) {
+ writer.format("Current CLI version no longer supported: %s\n\n", ksqlError);
+ return;
+ }
writer.format(
- "Couldn't connect to the KSQL server: %s\n\n",
- restResponse.getErrorMessage().getMessage()
- );
+ "Couldn't connect to the KSQL server: %s\n\n", ksqlError.getMessage());
}
} catch (IllegalArgumentException exception) {
writer.println("Server URL must begin with protocol (e.g., http:// or https://)");
| [Cli->[setProperty->[setProperty],displayWelcomeMessage->[displayWelcomeMessage],readLine->[readLine],handlePrintedTopic->[close],unsetProperty->[printKsqlResponse,unsetProperty],RemoteServerSpecificCommand->[execute->[validateClient]],close->[close]]] | Validate the given node in the KSQL server. | Suggest that `(ksqlError.getErrorCode() / Errors.HTTP_TO_ERROR_CODE_MULTIPLIER)` should be a method call e.g. `Errors.toStatusCode(ksqlError.getErrorCode)` |
@@ -241,7 +241,6 @@ public class KnoxJwtRealm extends AuthorizingRealm {
/* return the groups as seen by Hadoop */
Set<String> groups = null;
try {
- hadoopGroups.refresh();
final List<String> groupList = hadoopGroups
.getGroups(mappedPrincipalName);
| [KnoxJwtRealm->[validateSignature->[parseRSAPublicKey],onInit->[onInit]]] | Get the groups that the user is mapped to. | Removed this so we could take advantage of hadoop's native group caching mechanisms that should be controllable via hadoop configurations |
@@ -485,6 +485,9 @@
border-radius: 0 3px 3px 3px;
box-shadow: 1px 3px 5px rgba(0, 0, 0, 0.25);
}
+.elgg-menu-hover.elgg-ajax-loader {
+ padding: 10px;
+}
.elgg-menu-hover-card-container {
display: flex;
| [No CFG could be retrieved] | Displays menu items for a specific menu item Displays hover menu items. | We use .elgg-menu-hover too generically. I'd rather put these props on .elgg-popup and define more specific selectors for our dropdown menus, user card, etc. |
@@ -1,11 +1,13 @@
+# :reek:TooManyMethods
module Idv
- class UspsController < ApplicationController
+ class UspsController < ApplicationController # rubocop:disable Metrics/ClassLength
include IdvSession
before_action :confirm_two_factor_authenticated
before_action :confirm_idv_needed
before_action :confirm_user_completed_idv_profile_step
before_action :confirm_mail_not_spammed
+ before_action :max_attempts_reached, only: [:update]
def index
@presenter = UspsPresenter.new(current_user)
| [UspsController->[create->[redirect_to,address_verification_mechanism,pending_profile_requires_verification?,create_user_event],confirm_user_completed_idv_profile_step->[redirect_to,pending_profile_requires_verification?,profile_confirmation],confirm_mail_not_spammed->[redirect_to,address_mechanism_chosen?,mail_spammed?],usps_mail_service->[new],resend_letter->[fetch,reveal_usps_code?,perform,new,otp,pending_profile],index->[new],before_action,include]] | index_nag - Index of node. | This controller has gotten to almost 160 lines, which to me looks like a signal that some of this should be moved into a service |
@@ -147,7 +147,7 @@ class Indexing_Notification_Integration implements Integration_Interface {
\add_action( 'admin_init', [ $this, 'maybe_cleanup_notification' ] );
}
- if ( $this->options_helper->get( 'indexing_reason' ) ) {
+ if ( $this->indexing_helper->get_reason() ) {
\add_action( 'admin_init', [ $this, 'maybe_create_notification' ] );
}
| [Indexing_Notification_Integration->[notification->[get_presenter,get],get_presenter->[get_unindexed_count],should_show_notification->[get,get_unindexed_count],maybe_create_notification->[restore_notification,should_show_notification,notification,add_notification],maybe_cleanup_notification->[should_show_notification,get_notification_by_id,remove_notification],register_hooks->[get,get_current_yoast_seo_page]]] | Register hooks for the admin_init action. | A `has_reason` would be more logic at this point. |
@@ -46,7 +46,8 @@ class GlobalOptionsRegistrar(Optionable):
register('-l', '--level', choices=['debug', 'info', 'warn'], default='info', recursive=True,
help='Set the logging level.')
register('-q', '--quiet', type=bool, recursive=True,
- help='Squelches most console output.')
+ help='Squelches most console output. NOTE: Some tasks default to behaving quietly: '
+ 'inverting this option supports making them noisier than they would be otherwise.')
# Not really needed in bootstrap options, but putting it here means it displays right
# after -l and -q in help output, which is conveniently contextual.
register('--colors', type=bool, default=sys.stdout.isatty(), recursive=True,
| [GlobalOptionsRegistrar->[register_options->[register_bootstrap_options]]] | Register bootstrap options. Register options that require a specific cache. Returns a sequence number that can be used to track the current state of a pants node Options that can be used by the command line. | Nice. Maybe mention here that --no-quiet is a good way to get more information in case of error? Also, maybe a TODO somewhere to add to the help for each task whether it's quiet or not? |
@@ -415,7 +415,7 @@ var (
// Test fails on platforms that use LoadBalancerService and HostNetwork endpoint publishing strategy
`\[Conformance\]\[Area:Networking\]\[Feature:Router\] The HAProxy router should set Forwarded headers appropriately`, // https://bugzilla.redhat.com/show_bug.cgi?id=1752646
-
+ `\[Conformance\]\[templates\] templateinstance impersonation tests should pass impersonation creation tests`, // https://bugzilla.redhat.com/show_bug.cgi?id=1752910
// requires a 1.14 kubelet, enable when rhcos is built for 4.2
"when the NodeLease feature is enabled",
"RuntimeClass should reject",
| [maybeRenameTest->[Text,MatchString,Contains,SetText,CodeLocation],CurrentGinkgoTestDescription,LiteralPrefix,CoreV1,RunSpecsWithCustomReporters,RoleBindings,NewForConfig,TempDir,Setenv,SecurityContextConstraints,RegisterCommonFlags,RetryOnConflict,SecurityV1,HasPrefix,Strings,CreateTestingNS,RunSpecsWithDefaultAndCustomReporters,RegisterClusterFlags,Errorf,NewNonInteractiveDeferredLoadingClientConfig,MustCompile,Skip,Logf,TrimSpace,Create,Join,AddFileSource,Infof,BeforeEach,Contains,V,RegisterFailHandler,Get,MkdirAll,WalkTests,Namespaces,ClientConfig,Update,Sprintf,CoreDump,NewJUnitReporter,String,IsNotFound,Getenv] | Test for cluster - related requirements Tests for the existence of a critical pod pod. | what happened to bug 1731222? it looks like it was fixed, so shouldn't we be re-enabling this test? |
@@ -39,7 +39,7 @@ abstract class BaseSwaggerGeneratorExampleTest extends AbstractSwaggerConnectorT
private final String specification;
public BaseSwaggerGeneratorExampleTest(final String connectorQualifier, final String name) throws IOException {
- specification = resource("/swagger/" + name + ".swagger.json");
+ specification = resource("/swagger/" + name + ".swagger.json", "/swagger/" + name + ".swagger.yaml");
expected = Json.mapper().readValue(resource("/swagger/" + name + "." + connectorQualifier + "_connector.json"), Connector.class);
}
| [BaseSwaggerGeneratorExampleTest->[without->[remove],shouldGenerateAsExpected->[getProperties,reformatJson,getActions,containsAllEntriesOf,getSpecification,isEqualToIgnoringGivenFields,replace,actionById,generate,isPresent,without,build,containsOnlyElementsOf,getConfiguredProperties,isEqualTo,get,hasSameSizeAs,keySet,getPropertyDefinitionSteps],readValue,resource]] | Checks that the generated connector has the same properties and actions as the expected one. Checks if the generated action input data shape differs from the expected one. | Supporting YAML definition is separate to this PR, but I'm OK to include it here or have it in separate PR - me likey granular PRs :) |
@@ -817,7 +817,11 @@ abstract class AbstractHttp2StreamChannel extends DefaultAttributeMap implements
if (flowControlledBytes != 0) {
int bytes = flowControlledBytes;
flowControlledBytes = 0;
- write0(parentContext(), new DefaultHttp2WindowUpdateFrame(bytes).stream(stream));
+ write0(parentContext(), new DefaultHttp2WindowUpdateFrame(bytes).stream(stream))
+ // Add a listener which will notify and teardown the stream
+ // when a window update fails.
+ // See https://github.com/netty/netty/issues/9663
+ .addListener(WINDOW_UPDATE_FRAME_LISTENER);
writeDoneAndNoFlush = true;
}
}
| [AbstractHttp2StreamChannel->[isActive->[isOpen],connect->[connect],write->[operationComplete->[],write],hashCode->[hashCode],eventLoop->[eventLoop],Http2ChannelUnsafe->[writeComplete->[closeForcibly],disconnect->[close],notifyReadComplete->[closeForcibly,flush],beginRead->[isActive],recvBufAllocHandle->[config,newHandle],write->[operationComplete->[decrementPendingOutboundBytes],size,stream,id,isActive,incrementPendingOutboundBytes,toString],doBeginRead->[recvBufAllocHandle,closeForcibly,pollQueuedMessage,config],closeForcibly->[close,voidPromise],updateLocalWindowIfNeeded->[stream],remoteAddress->[remoteAddress],close->[voidPromise,stream,id,isActive,write,flush],firstWriteComplete->[closeForcibly],validateStreamFrame->[toString,stream],register->[isActive],localAddress->[localAddress]],close->[close],decrementPendingOutboundBytes->[decrementPendingOutboundBytes],voidPromise->[voidPromise],Http2StreamChannelConfig->[setRecvByteBufAllocator->[setRecvByteBufAllocator,newHandle]],newSucceededFuture->[newSucceededFuture],compareTo->[id,compareTo],incrementPendingOutboundBytes->[incrementPendingOutboundBytes],flush0->[flush],write0->[write,newPromise],deregister->[deregister],bytesBeforeUnwritable->[isWritable],disconnect->[disconnect],newFailedFuture->[newFailedFuture],newProgressivePromise->[newProgressivePromise],bytesBeforeWritable->[isWritable],fireChildRead->[isActive],remoteAddress->[remoteAddress],newPromise->[newPromise],writeAndFlush->[writeAndFlush],bind->[bind],localAddress->[localAddress],fireChannelWritabilityChanged->[run->[fireChannelWritabilityChanged],fireChannelWritabilityChanged],FlowControlledFrameSizeEstimator->[FlowControlledFrameSizeEstimator],toString->[toString],read->[read],flush->[flush]]] | This method is called when a window update is needed. | Should the ChannelFuture be attached to the child channel? |
@@ -56,6 +56,9 @@ public interface OAuthConstants {
/** The bearer token. */
String BEARER_TOKEN = "Bearer";
+ /** The bearer token in lower case. */
+ String BEARER_TOKEN_IGNORE_CASE = BEARER_TOKEN.toLowerCase();
+
/** The OAUT h20_ callbackurl. */
String OAUTH20_CALLBACKURL = "oauth20_callbackUrl";
| [No CFG could be retrieved] | This class is used to provide the basic OAuth constants. The authorize url. | I'd rather you call lowercase on the actual const. This isnt a real const. |
@@ -49,6 +49,6 @@ public class WorkItemKeySelector<K, V>
@Override
public TypeInformation<ByteBuffer> getProducedType() {
- return new GenericTypeInfo<>(ByteBuffer.class);
+ return new CoderTypeInformation<>(FlinkKeyUtils.ByteBufferCoder.of(), pipelineOptions.get());
}
}
| [WorkItemKeySelector->[getKey->[key,encodeKey]]] | Returns the type of the missing bytes. | Need to check Kryo internals whether this is breaking change |
@@ -41,6 +41,12 @@ import org.jboss.dmr.ModelNode;
import org.jboss.msc.service.ServiceBuilder;
import org.jboss.msc.service.ServiceName;
+import static org.infinispan.server.endpoint.subsystem.ModelKeys.HOTROD_CONNECTOR;
+import static org.infinispan.server.endpoint.subsystem.ModelKeys.MEMCACHED_CONNECTOR;
+import static org.infinispan.server.endpoint.subsystem.ModelKeys.REST_CONNECTOR;
+import static org.infinispan.server.endpoint.subsystem.ModelKeys.ROUTER_CONNECTOR;
+
+
/**
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
* @author Tristan Tarrant
| [EndpointSubsystemAdd->[populateModel->[populate],EndpointSubsystemAdd]] | Method to import the given model node. Populates the given target node with the empty objects. | @tristantarrant Do we have a consensus on where these static imports should go? I know that eclipse by default likes the top and intellij like the bottom (or something like that). Just would be nice if we could make sure we do the same spot each time. Maybe we should update the various code styles to be the same? |
@@ -2355,6 +2355,10 @@ ds_obj_rw_handler(crt_rpc_t *rpc)
dss_get_module_info()->dmi_xs_id, orw->orw_epoch,
orw->orw_map_ver, ioc.ioc_map_ver, DP_DTI(&orw->orw_dti));
+ if (obj_rpc_is_fetch(rpc) && !(orw->orw_flags & ORF_EC_RECOV) &&
+ (orw->orw_epoch != 0 && orw->orw_epoch != DAOS_EPOCH_MAX))
+ ioc.ioc_fetch_snap = 1;
+
rc = process_epoch(&orw->orw_epoch, &orw->orw_epoch_first,
&orw->orw_flags);
if (rc == PE_OK_LOCAL)
| [No CFG could be retrieved] | Get the object from the rpc. Internal function to process the object local object fetch. if rc == -DER_INPROGRESS and dth. dth_local_local. | is this the safe way to tell if this is snapshot epoch? Probably you can compare the orw_epoch with the most recent snapshot epoch above? |
@@ -371,7 +371,6 @@ public class AntClassLoader extends ClassLoader implements SubBuildListener, Clo
* load the a class through this loader.
*/
public AntClassLoader(final ClassLoader parent, final boolean parentFirst) {
- super(parent); // KK patch for JENKINS-21579
setParent(parent);
project = null;
this.parentFirst = parentFirst;
| [AntClassLoader->[getResourceURL->[log],getResourceAsStream->[getResourceAsStream,log],close->[cleanup],toString->[getClasspath],findClassInComponents->[getClassFilename,getClassFromStream,log,getResourceStream],log->[log],findClass->[log],getClassFromStream->[defineClassFromData],loadBaseResource->[getResourceAsStream],getResource->[isParentFirst,getRootLoader,log,getResource],getResourceStream->[log],findBaseClass->[loadClass],findResources->[isParentFirst,findResources,getRootLoader,ResourceEnumeration],forceLoadClass->[log],forceLoadSystemClass->[log],definePackage->[definePackage],subBuildFinished->[cleanup],newAntClassLoader->[AntClassLoader],getUrl->[log],loadClass->[isParentFirst,log],addPathFile->[addPathFile,log],buildFinished->[cleanup],defineClassFromData->[getClassFilename],getCertificates->[getCertificates]]] | Creates a class loader which can be used to load the given class. The class loader is Add system classpath elements to the list of paths that should be ignored. | No longer needed as of apache/ant#151. |
@@ -48,6 +48,7 @@ export const consentUiClasses = {
placeholder: 'i-amphtml-consent-ui-placeholder',
mask: 'i-amphtml-consent-ui-mask',
enableBorder: 'i-amphtml-consent-ui-enable-border',
+ screenReaderOnly: 'i-amphtml-consent-alertdialog',
};
export class ConsentUI {
| [No CFG could be retrieved] | AJAX - specific module that exports AMP - specific classes. A AMP Consent plugin. | Why "only"? What about `screenReaderDialog` or similar? |
@@ -45,7 +45,14 @@ def ui_thread(addr, frame_address):
pygame.font.init()
assert pygame_modules_have_loaded()
- if HOR:
+ disp_info = pygame.display.Info()
+ max_height = disp_info.current_h
+
+ hor_mode = os.getenv("HORIZONTAL") is not None
+ hor_mode = True if max_height < 960+300 else hor_mode
+ angle_scale = 5.0
+
+ if hor_mode:
size = (640+384+640, 960)
write_x = 5
write_y = 680
| [get_arg_parser->[ArgumentParser,add_argument],ui_thread->[plot_model,draw_lead_car,round,CalibrationTransformsForWarpMatrix,asarray,fill,set_caption,VehicleModel,enumerate,map,warpAffine,draw_mpc,frombuffer,init,sub_sock,recv_one,blit,set_mode,SysFont,copy,sm,flip,zeros,swapaxes,values,dot,blit_array,maybe_update_radar_points,list,circle,get_blank_lid_overlay,update,len,tuple,polygon,get_params,pygame_modules_have_loaded,init_plots,render,SubMaster,keys,get_camera_frame_from_model_frame,draw_plots,get,draw_lead_on,Surface,eye,str,warp_points,extract_model_data,array],Context,get_arg_parser,ui_thread,getenv] | Display a single carparams in a thread. plots a single in a 2D array. Plots the missing - block block chains. Compute the non - zero value of a specific node in the model. Plots all potential car - related data in the system. | No need to change angle_scale |
@@ -220,6 +220,9 @@ class RaidenAPI: # pragma: no unittest
if not is_binary_address(token_address):
raise InvalidBinaryAddress("token_address must be a valid address in binary")
+ if token_address == NULL_ADDRESS_BYTES:
+ raise ValueError("token_address must be non-zero")
+
# The following check is on prestate because the chain state does not
# change here.
# views.state_from_raiden() returns the same state again and again
| [transfer_tasks_view->[flatten_transfer,get_transfer_from_task],RaidenAPI->[get_raiden_events_payment_history_with_timestamps->[event_filter_for_payments],get_pending_transfers->[transfer_tasks_view,get_channel],start_health_check_for->[start_health_check_for],get_raiden_events_payment_history->[get_raiden_events_payment_history_with_timestamps],channel_open->[is_already_existing_channel],get_blockchain_events_channel->[get_channel_list]]] | Register a token in the token network. Return the token network address if it exists or null if it does not. | What does the renaming achieve here now?There is no other chainstate in the function. Better keep it as was if there is no benefit to the change. |
@@ -1518,12 +1518,15 @@ vlans:
def test_handlers(self):
valve_index = self.dot1x.dp_id_to_valve_index[self.DP_ID]
port_no = 1
+ vlan_name = 'student'
+ filter_id = 'block_http'
for handler in (
- self.dot1x.auth_handler,
self.dot1x.logoff_handler,
self.dot1x.failure_handler):
handler(
'0e:00:00:00:00:ff', faucet_dot1x.get_mac_str(valve_index, port_no))
+ self.dot1x.auth_handler(
+ '0e:00:00:00:00:ff', faucet_dot1x.get_mac_str(valve_index, port_no), vlan_name, filter_id)
class ValveChangePortTestCase(ValveTestBases.ValveTestSmall):
| [ValveGroupTestCase->[test_unknown_eth_dst_rule->[learn_hosts,verify_flooding],setUp->[setup_valve]],ValveReloadConfigTestCase->[setUp->[flap_port,update_config]],ValveChangePortTestCase->[test_delete_permanent_learn->[update_config,rcv_packet],setUp->[setup_valve]],ValveMirrorTestCase->[setUp->[setup_valve]],ValveRootStackTestCase->[test_stack_learn->[prom_inc],test_stack_flood->[verify_flooding],setUp->[setup_valve]],build_pkt->[serialize->[serialize],serialize],ValveStackProbeTestCase->[test_stack_probe->[rcv_lldp],setUp->[setup_valve],test_stack_miscabling->[rcv_lldp],rcv_lldp->[rcv_packet],test_stack_lost_lldp->[rcv_lldp]],ValveACLTestCase->[test_vlan_acl_deny->[flap_port,update_config],setUp->[setup_valve]],ValveFuzzTestCase->[test_fuzz_vlan->[rcv_packet],setUp->[setup_valve]],ValveWarmStartVLANTestCase->[test_warm_start->[update_config],setUp->[setup_valve]],ValveDeletePortTestCase->[test_port_delete->[update_config],setUp->[setup_valve]],ValveAddVLANTestCase->[test_add_vlan->[update_config],setUp->[setup_valve]],ValveIdleLearnTestCase->[test_known_eth_src_rule->[learn_hosts],setUp->[setup_valve]],ValveActiveLACPTestCase->[test_lacp->[verify_expiry,learn_hosts,get_prom,port_labels,rcv_packet,packet_outs_from_flows],setUp->[setup_valve]],ValveDot1xSmokeTestCase->[setUp->[setup_valve]],ValveEdgeStackTestCase->[test_stack_learn->[rcv_packet],test_stack_flood->[verify_flooding],setUp->[setup_valve]],ValveReloadConfigProfile->[test_profile_reload->[profile],setUp->[profile]],ValveStackGraphUpdateTestCase->[test_update_stack_graph->[up_stack_port->[rcv_lldp],down_stack_port->[up_stack_port],up_stack_port,verify_stack_learn_edges,all_stack_up,down_stack_port]],ValveTestTunnel->[setUp->[setup_valve],test_tunnel_flowmods->[update_all_flowrules,get_valve,all_stack_up],test_update_on_stack_link_up->[update_all_flowrules,all_stack_up],test_tunnel_flowmod_count->[update_all_flowrules,get_valve,all_stack_up],test_update_on_stack_link_down->[get_valve,update_all_tunnels,all_stack_up,update_all_flowrules,down_stack_port]],ValveChangeACLTestCase->[test_change_port_acl->[update_config],setUp->[setup_valve]],RyuAppSmokeTest->[test_faucet->[_fake_dp]],ValveOFErrorTestCase->[setUp->[setup_valve]],ValveTestBases->[ValveTestSmall->[port_expected_status->[port_labels,get_prom],set_port_down->[port_expected_status],connect_dp->[get_prom],set_port_up->[port_expected_status],tearDown->[teardown_valve],flap_port->[set_port_up,set_port_down],update_config->[prom_inc,get_prom],verify_flooding->[_verify_flood_to_port],rcv_packet->[prom_inc,build_pkt],prom_inc->[get_prom]],ValveTestBig->[test_disconnect->[prom_inc,get_prom],test_ra_for_controller->[packet_outs_from_flows,rcv_packet],test_port_down_eth_src_removal->[flap_port],test_nd_for_controller->[packet_outs_from_flows,rcv_packet],test_host_ipv4_fib_route->[packet_outs_from_flows,verify_expiry,rcv_packet],test_unknown_eth_dst_rule->[learn_hosts,verify_flooding],test_port_acl_deny->[update_config],test_known_eth_src_rule->[learn_hosts],test_unknown_port->[set_port_up],test_add_del_route->[packet_outs_from_flows,rcv_packet],test_bogon_arp_for_controller->[packet_outs_from_flows,rcv_packet],test_lldp->[rcv_packet],test_icmp_ping_unknown_neighbor->[packet_outs_from_flows,rcv_packet],test_icmpv6_ping_controller->[packet_outs_from_flows,rcv_packet],test_arp_for_controller->[packet_outs_from_flows,rcv_packet],test_mac_learning_vlan_separation->[learn_hosts,rcv_packet],test_icmp_ping6_unknown_neighbor->[packet_outs_from_flows,rcv_packet],test_icmp_ping_controller->[packet_outs_from_flows,rcv_packet],test_nd_from_host->[packet_outs_from_flows,rcv_packet],test_dp_acl_deny->[flap_port,update_config],test_loop_protect->[rcv_packet],test_host_ipv6_fib_route->[packet_outs_from_flows,verify_expiry,rcv_packet],setUp->[setup_valve],test_unexpected_port->[prom_inc],test_move_port->[rcv_packet],test_known_eth_src_deletion->[rcv_packet],test_arp_reply_from_host->[packet_outs_from_flows,rcv_packet],test_known_eth_dst_rule->[verify_expiry,learn_hosts],test_known_eth_dst_rule_deletion->[rcv_packet]]],ValveLACPTestCase->[test_lacp_timeout->[port_labels,rcv_packet,get_prom],test_lacp->[verify_expiry,learn_hosts,get_prom,port_labels,rcv_packet],test_lacp_flap->[verify_expiry,learn_hosts,get_prom,port_labels,rcv_packet],setUp->[setup_valve]],ValveDeleteVLANTestCase->[test_delete_vlan->[update_config],setUp->[setup_valve]],ValveAddPortTestCase->[setUp->[setup_valve,_inport_flows],test_port_add->[update_config,_inport_flows],_inport_flows->[flowmods_from_flows]]] | Test handlers for the MAC - 1x. | Bad indentation here. `33% 4:8=8s /home/travis/build/faucetsdn/faucet/tests/unit/faucet/test_valve.py Couldn't run '/home/travis/build/faucetsdn/faucet/tests/unit/faucet/test_valve.py' as Python code: IndentationError: expected an indented block (test_valve.py, line 1526)` |
@@ -200,7 +200,9 @@ module Engine
# add initial corporation tokens
spec[:setup][:corporations].each do |corp|
corporation = game.corporation_by_id(corp[:name])
- initial_tile.cities[corp[:token]].place_token(corporation) unless corp[:token].nil?
+ unless corp[:token].nil?
+ initial_tile.cities[corp[:token]].place_token(corporation, corporation.next_token)
+ end
end
end
| [new,let,describe,corporation_by_id,lay,subject,first,it,to,nil?,tokened_by?,before,rotate!,select,exits,require,include,place_token,hex_by_id,each,cities,tile_by_id,have_attributes,context,tile,eq,reservations] | name of the context it contains each corp has a token and if it is not present in the tile it will be. | prefer to use if corp[:token] |
@@ -235,7 +235,9 @@ class QueryEngine {
dataSource.schema(),
null,
null,
- ksqlTopic
+ ksqlTopic,
+ quotedFieldNames
+
);
}
| [QueryEngine->[buildLogicalPlans->[info,add,clone,getLeft,getRight,buildQueryLogicalPlan],handleDdlStatement->[create,maybeAddFieldsFromSchemaRegistry,getLeft,getRight,execute],getResultDatasource->[schema,KsqlTopic,getSelectItems,get,KsqlStream,field,name],buildPhysicalPlans->[handleDdlStatement,buildQueryPhysicalPlan,size,get,getLeft,getRight,KsqlException,getClass],maybeAddFieldsFromSchemaRegistry->[getSchemaRegistryClient,getTopic,getElements,getKafkaTopicName,copyWith,containsKey,checkAndSetAvroSchema,getLeft,format,toUpperCase,toString,getRight,cleanQuotes,KsqlException,put,StringLiteral],buildQueryLogicalPlan->[getFunctionRegistry,analyzeAggregate,putTopic,getSchema,buildPlan,QueryAnalyzer,KsqlStream,putSource,getTimestampField,toString,getKsqlTopic,cloneWithTimeKeyColumns,getKeyField,analyze],buildQueryPhysicalPlan->[getFunctionRegistry,getSchemaRegistryClient,getTopicClient,add,clone,cloneWithPropertyOverwrite,buildPhysicalPlan,MetastoreUtil,getMetaStore,StreamsBuilder,PhysicalPlanBuilder],getLogger]] | get result datasource. if the left and right are both null return null. | nit: new line |
@@ -78,7 +78,7 @@ func Package() {
customizePackaging()
mg.Deps(Update, prepareModulePackaging)
- mg.Deps(CrossBuild, CrossBuildGoDaemon)
+ mg.Deps(CrossBuild, CrossBuildXPack, CrossBuildGoDaemon)
mg.SerialDeps(mage.Package, TestPackages)
}
| [GenerateFieldsYAML,Now,BuildGoDaemon,TestPackages,Rm,DefaultGolangCrossBuildArgs,Clean,DefaultBuildArgs,CrossBuild,UseElasticBeatPackaging,Errorf,Since,GoTest,CrossBuildGoDaemon,Execute,Deps,GolangCrossBuild,DefaultGoTestIntegrationArgs,DefaultGoTestUnitArgs,Build,SerialDeps,Println,Run] | Package packages the Beat for distribution and build artifacts. GoTestIntegration tests a package. | This magefile change is missing for the other beats. |
@@ -38,5 +38,6 @@ macros.register(
name='usd_to_aud',
label='Convert USD to AUD',
shortcut='c',
- callback=usd_to_aud
+ callback=usd_to_aud,
+ desks=['SPORTS DESK', 'POLITICS']
)
| [usd_to_aud->[convert->[float,group],get,sub,get_rate],get_rate->[float,get,json],register] | Define a function to convert a NIC to AUD. | Whey here are hardcoded desk names? |
@@ -261,6 +261,7 @@ class ConanClientConfigParser(ConfigParser, object):
("CONAN_MSBUILD_VERBOSITY", "msbuild_verbosity", None),
("CONAN_CACERT_PATH", "cacert_path", None),
("CONAN_DEFAULT_PACKAGE_ID_MODE", "default_package_id_mode", None),
+ ("CONAN_REQUIRED_CONAN_VERSION", "required_conan_version", None),
# ("CONAN_DEFAULT_PROFILE_PATH", "default_profile", DEFAULT_PROFILE_NAME),
],
"hooks": [
| [ConanClientConfigParser->[hooks->[_get_conf],logging_level->[get_item],request_timeout->[get_item],print_commands_to_output->[get_item],default_python_requires_id_mode->[get_item],cacert_path->[get_item],download_cache->[get_item],default_profile->[get_item],relax_lockfile->[get_item],storage_path->[_get_conf],retry->[get_item],log_run_to_output->[get_item],non_interactive->[get_item],generate_run_log_file->[get_item],parallel_download->[get_item],scm_to_conandata->[get_item],retry_wait->[get_item],revisions_enabled->[get_item],config_install_interval->[get_item],default_package_id_mode->[get_item],full_transitive_package_id->[get_item],proxies->[_get_conf]]] | Find all of the CMake configuration values. These values are defined in the CMake configuration Config properties for the conan client. | Use only conan.conf, not env-var |
@@ -150,6 +150,16 @@ public class StorageInfo {
}
}
+ private void verifyUpgradingToLayoutVersion()
+ throws InconsistentStorageStateException {
+ int upgradeMark = getUpgradingToLayoutVersion();
+ if (upgradeMark != INVALID_LAYOUT_VERSION) {
+ throw new InconsistentStorageStateException("Ozone Manager died during"
+ + "a LayoutFeature upgrade.");
+ //TODO add recovery steps here, or point to a recovery doc.
+ }
+ }
+
private void verifyCreationTime() {
Long creationTime = getCreationTime();
Preconditions.checkNotNull(creationTime);
| [StorageInfo->[verifyCreationTime->[getCreationTime],setProperty->[setProperty],getProperty->[getProperty],verifyClusterId->[getClusterID],setClusterId->[setProperty],verifyNodeType->[getNodeType]]] | Checks if the cluster ID and creation time are not empty. | Nit: Missing a whitespace before 'a LayoutFeature upgrade'. |
@@ -2805,12 +2805,10 @@ fs_copy_hdlr(struct cmd_args_s *ap)
* specified in the dst argument
*/
src_str_len = strlen(dname);
- path_length = snprintf(dst_dir, MAX_FILENAME, "%s/%s",
- dst_str, src_str + src_str_len);
- if (path_length >= MAX_FILENAME) {
- rc = ENAMETOOLONG;
- fprintf(stderr, "Path length is too long.\n");
- D_GOTO(out_disconnect, rc);
+ D_ASPRINTF(dst_dir, "%s/%s", dst_str, src_str + src_str_len);
+ if (dst_dir == NULL) {
+ fprintf(stderr, "Unable to allocate memory for destination path.\n");
+ D_GOTO(out_disconnect, rc = -DER_NOMEM);
}
/* set paths based on file type for source and destination */
if (src_file_dfs.type == POSIX && dst_file_dfs.type == DAOS) {
| [No CFG could be retrieved] | constructs the path of the n - ary file based on the source and destination paths. Copy the nacns from the source to the destination. | (style) line over 80 characters |
@@ -0,0 +1,16 @@
+'use strict'
+
+const {netLog, NetLog} = process.atomBinding('net_log')
+
+NetLog.prototype.stopLogging = function (callback) {
+ if (callback && typeof callback !== 'function') {
+ throw new Error('Invalid callback function')
+ }
+
+ const path = this.currentlyLoggingPath
+ this._stopLogging(() => {
+ if (callback) callback(path)
+ })
+}
+
+module.exports = netLog
| [No CFG could be retrieved] | No Summary Found. | Any reason why this check isn't performed in a similar way to `StartLogging` api using `mate::Arguments` ? |
@@ -104,6 +104,7 @@ namespace Microsoft.Extensions.Logging.EventSource
exceptionJson = ToJson(exceptionInfoData);
}
IReadOnlyList<KeyValuePair<string, string>> arguments = GetProperties(state);
+ string message = formatter(state, exception);
_eventSource.MessageJson(
logLevel,
_factoryID,
| [EventSourceLogger->[IDisposable->[IsEnabled],Log->[IsEnabled]]] | Log a message in the event source. | >formatter [](start = 33, length = 9) I am seeing this is done in line 64 too. Is it possible we can have a case having _eventSource enabled with FormattedMessage and JsonMessage in same time? I am asking this to see if we can optimize calling `string message = formatter(state, exception);` once |
@@ -748,6 +748,8 @@ class TestResourceLimitsAccount(cloudstackTestCase):
# should be denied to create more than 1 template.
# 3. Try to create 2 templates in account 2. Verify account 2 should be
# able to create template without any error
+ if self.hypervisor.lower() in ['lxc']:
+ self.skipTest("Template feature is not supported on LXC")
try:
apiclient_account1 = self.testClient.getUserApiClient(
| [TestMaxAccountNetworks->[setUpClass->[Services]],TestResourceLimitsAccount->[setUpClass->[Services]],TestResourceLimitsDomain->[setUpClass->[Services]]] | Test 5. Templates per account Create a template from the ROOTDISK Create a template from a specific node in the system. | I think template creation from volume is supported in lxc. |
@@ -1786,9 +1786,9 @@ static void print_hexdump(std::ostream &o, const std::string &data)
int i = i0 + di;
char buf[4];
if(di<thislinelength)
- snprintf(buf, 4, "%.2x ", data[i]);
+ porting::mt_snprintf(buf, 4, "%.2x ", data[i]);
else
- snprintf(buf, 4, " ");
+ porting::mt_snprintf(buf, 4, " ");
o<<buf;
}
o<<" ";
| [No CFG could be retrieved] | emerge a single object from the system. Activate all objects of a given block. | constant can be replaced with `sizeof` |
@@ -26,7 +26,7 @@ module ClientApi
def update_team!
raise ClientApi::CustomTeamError unless @params
- return if @team.update_attributes(@params)
+ return if @team.update(@params)
raise ClientApi::CustomTeamError, @team.errors.full_messages
end
| [TeamsService->[change_current_team!->[update_attribute,id],team_page_details_data->[distinct],teams_data->[datatables_teams],update_team!->[raise,full_messages,update_attributes],initialize->[fetch,include?,raise,find_by_id]],new] | Updates a custom team with a sequence number. | Layout/EmptyLineAfterGuardClause: Add empty line after guard clause. |
@@ -473,7 +473,7 @@ class Yoast_Notification_Center_Test extends WPSEO_UnitTestCase {
$a = new Yoast_Notification( 'a' );
$this->assertFalse( Yoast_Notification_Center::maybe_dismiss_notification( $a ) );
- $b = new Yoast_Notification( 'b', array( 'id' => uniqid( 'id' ) ) );
+ $b = new Yoast_Notification( 'b', array( 'id' => uniqid( 'id', true ) ) );
$this->assertFalse( Yoast_Notification_Center::maybe_dismiss_notification( $b ) );
}
| [Yoast_Notification_Center_Test->[test_construct->[assertTrue],test_notification_is_new->[get_new_notifications,get_notification_center,assertContains,assertInternalType,add_notification],test_get_sorted_notifications_by_type->[get_notification_center,get_sorted_notifications,assertEquals,add_notification],test_update_storage_non_persistent->[assertFalse,get_notification_center,update_storage,add_notification],test_update_storage->[to_array,get_notification_center,update_storage,assertEquals,assertInternalType,add_notification],test_dismiss_notification_is_per_site->[assertTrue,create,skipWithoutMultisite,get_dismissal_key,assertFalse],test_is_notification_dismissed_is_per_site->[assertTrue,create,skipWithoutMultisite,get_dismissal_key,assertFalse,markTestSkipped],test_retrieve_notifications_from_storage_strips_nonces->[get_sample_notifications,to_array,get_nonce,setup_current_notifications,get_id,get_notifications,assertSame],test_remove_notification_by_id_when_no_notification_is_found->[remove_notification_by_id,method,getMock],test_add_notification->[get_notifications,get_notification_center,assertEquals,add_notification],test_is_notification_dismissed->[assertTrue,get_notification_center,is_notification_dismissed],test_get_notification_count->[get_notification_center,assertEquals,add_notification,get_notification_count],test_remove_notification_by_id_when_notification_is_found->[returnValue,remove_notification_by_id,will,getMock,method],test_add_notification_twice_persistent->[get_notifications,get_notification_center,assertEquals,add_notification],test_clear_dismissal_empty_key->[get_notification_center,assertFalse,clear_dismissal],test_get_sorted_notifications->[get_notification_center,get_sorted_notifications,assertEquals,assertInternalType,add_notification],test_restore_notification_clears_user_meta->[assertTrue,get_dismissal_key],get_notification_center->[setup_current_notifications],test_update_storage_strips_nonces->[get_id,get_sample_notifications,update_storage,assertSame],test_update_nonce_on_re_add_notification->[get_nonce,get_notification_center,get_notifications,assertNotEquals,assertInternalType,add_notification],setUp->[add_cap,create],test_resolved_notifications->[get_notification_center,get_resolved_notification_count,assertEquals],test_is_notification_dismissed_falls_back_to_user_meta->[assertTrue,assertEmpty,assertSame,get_dismissal_key],test_remove_storage_without_notifications->[assertFalse,remove_storage,has_stored_notifications],test_restore_notification_is_per_site->[assertTrue,create,skipWithoutMultisite,get_dismissal_key,assertFalse],test_display_notifications->[returnValue,expectOutput,display_notifications,get_notification_center,will,getMock,add_notification],test_maybe_dismiss_notification->[assertFalse],test_remove_storage_with_notifications->[assertTrue,setup_current_notifications,update_storage,remove_storage,add_notification,has_stored_notifications],test_display_dismissed_notification->[display_notifications,get_notification_center,add_notification,expectOutput],test_has_stored_notifications->[returnValue,will,getMock,assertEquals,has_stored_notifications],test_get_sorted_notifications_by_priority->[get_notification_center,get_sorted_notifications,assertEquals,add_notification],test_clear_dismissal_as_string->[assertTrue,get_notification_center,get_dismissal_key,is_notification_dismissed,assertFalse,clear_dismissal],test_clear_dismissal->[assertTrue,get_notification_center,get_dismissal_key,is_notification_dismissed,assertFalse,clear_dismissal],test_get_sorted_notifications_empty->[get_notification_center,get_sorted_notifications,assertInternalType,assertEquals],test_display_notifications_not_for_current_user->[returnValue,expectOutput,display_notifications,get_notification_center,will,getMock,add_notification],test_add_notification_twice->[get_notifications,get_notification_center,assertEquals,add_notification],tearDown->[deactivate_hook]]] | Test if there is a notification that should be dismissed. | Avoid variables with short names like $b. Configured minimum length is 3. |
@@ -20,10 +20,10 @@ module TwoFactorAuthenticatable
private
def apply_secure_headers_override
- return unless after_sign_in_path_for.start_with?(openid_connect_authorize_path)
+ return unless stored_url_for_user&.start_with?(openid_connect_authorize_path)
authorize_params = Rack::Utils.parse_nested_query(
- URI(after_sign_in_path_for).query
+ URI(stored_url_for_user).query
).with_indifferent_access
authorize_form = OpenidConnectAuthorizeForm.new(authorize_params)
| [otp_view_data->[recovery_code_unavailable?,unconfirmed_phone?],authenticator_view_data->[recovery_code_unavailable?],phone_view_data->[recovery_code_unavailable?,unconfirmed_phone?]] | If the after_sign_in_path_for is a path that starts with OpenID. | to check my understanding here, the reason we are using `stored_url_for_user` and accessing directly is because `after_sign_in_path_for` would delete the value? |
@@ -132,15 +132,15 @@ namespace Content.Client.GameObjects.Components.Access
};
vBox.AddChild(grid);
- foreach (var accessLevel in prototypeManager.EnumeratePrototypes<AccessLevelPrototype>())
+ foreach (var AccessLevel in prototypeManager.EnumeratePrototypes<AccessLevelPrototype>())
{
var newButton = new Button
{
- Text = accessLevel.Name,
+ Text = AccessLevel.Name,
ToggleMode = true,
};
grid.AddChild(newButton);
- _accessButtons.Add(accessLevel.ID, newButton);
+ _accessButtons.Add(AccessLevel.ID, newButton);
newButton.OnPressed += _ => SubmitData();
}
}
| [IdCardConsoleWindow->[UpdateState->[White,IsPrivilegedIdPresent,TargetIdName,IsPrivilegedIdAuthorized,Editable,Pressed,Contains,TargetIdJobTitle,Gray,PrivilegedIdName,GetString,Disabled,Modulate,TargetIdFullName,IsTargetIdPresent,Text],SubmitData->[ToList,SubmitData,Text],ButtonPressed,ID,ShrinkCenter,PrivilegedId,TargetId,FillExpand,Name,GetString,AddChild,Disabled,prototypeManager,OnPressed,OnTextEntered,OnTextChanged,SubmitData,Add,Text]] | Displays the UI for the given ID card. Updates the UI for the target ID. | This rename is bad, the file was already correctly capitalized. |
@@ -4635,7 +4635,7 @@ abstract class CommonObject
case "edit":
// GETPOST("options_" . $key) can be 'abc' or array(0=>'abc')
$getposttemp = GETPOST('options_'.$key, 'none'); // GETPOST can get value from GET, POST or setup of default values.
- if (isset($getposttemp)) {
+ if (isset($getposttemp) && $getposttemp != '') {
if (is_array($getposttemp)) {
// $getposttemp is an array but following code expects a comma separated string
$value = implode(",", $getposttemp);
| [CommonObject->[swapContactStatus->[update_contact],fetchCommon->[setVarsFromFetchObj,get_field_list,quote],updateCommon->[call_trigger,set_save_query,quote],getListContactId->[liste_contact],getBannerAddress->[getFullAddress,getFullName],line_ajaxorder->[updateRangOfLine],line_max->[getRangOfLine],copy_linked_contact->[add_contact],deleteCommon->[call_trigger],update_note_public->[update_note],createCommon->[call_trigger,set_save_query,quote],setVarsFromFetchObj->[isArray,isNull,isDate,isInt,isFloat],line_down->[line_order],line_up->[line_order],set_save_query->[isDate,isArray,isFloat,isInt]]] | Show the options of the extrafields This function dump the value of an attribute This function is used to display the options of a node. This function show the list of all options in the select. | What if getposttemp is an array ? Also which kind of bug does this fix ? |
@@ -84,8 +84,10 @@ public class GameSelectorModel extends Observable {
if (newData != null) {
load(newData, file.getName());
}
+ return (newData != null);
} catch (final Exception e) {
log.log(Level.SEVERE, "Error loading game file: " + file.getAbsolutePath(), e);
+ return false;
}
}
| [GameSelectorModel->[selectByName->[resetToFactoryDefault],loadDefaultGameSameThread->[load,loadDefaultGameSameThread],load->[load],setGameData->[getGameName]]] | load a game from the specified file. | I think these parentheses are uneccessary for such a simple expression |
@@ -437,8 +437,8 @@ class CollectionUpdateMeta(UpdateMetaBaseMutation):
description = "Update public metadata for collection."
permissions = (ProductPermissions.MANAGE_PRODUCTS,)
public = True
- error_type_class = ProductError
- error_type_field = "product_errors"
+ error_type_class = CollectionError
+ error_type_field = "collection_errors"
class CollectionClearMeta(ClearMetaBaseMutation):
| [CollectionCreate->[save->[save],Arguments->[CollectionCreateInput]],ProductImageDelete->[perform_mutation->[ProductImageDelete]],ProductImageReorder->[perform_mutation->[save,ProductImageReorder]],ProductImageCreate->[Arguments->[ProductImageCreateInput],perform_mutation->[ProductImageCreate]],VariantImageAssign->[perform_mutation->[VariantImageAssign]],CollectionReorderProducts->[perform_mutation->[CollectionReorderProducts]],ProductTypeUpdate->[Arguments->[ProductTypeInput]],ProductVariantReorder->[perform_mutation->[save,ProductVariantReorder]],ProductUpdate->[save->[save],Arguments->[ProductInput]],CollectionUpdate->[save->[save],Arguments->[CollectionInput]],VariantImageUnassign->[perform_mutation->[VariantImageUnassign]],AttributeAssignmentMixin->[save->[_pre_save_values],_validate_input->[_check_input_for_product,_check_input_for_variant],clean_input->[_resolve_attribute_global_id,_resolve_attribute_nodes,_validate_input]],ProductVariantDelete->[success_response->[save]],ProductSetAvailabilityForPurchase->[perform_mutation->[ProductSetAvailabilityForPurchase,save]],ProductImageUpdate->[Arguments->[ProductImageUpdateInput],perform_mutation->[save,ProductImageUpdate]],CategoryCreate->[save->[save],Arguments->[CategoryInput]],ProductVariantSetDefault->[perform_mutation->[save,ProductVariantSetDefault]],ProductVariantUpdate->[Arguments->[ProductVariantInput]],CollectionAddProducts->[perform_mutation->[CollectionAddProducts]],CategoryUpdate->[Arguments->[CategoryInput]],ProductCreate->[save->[save],Arguments->[ProductCreateInput],clean_attributes->[clean_input],clean_input->[clean_attributes]],ProductTypeCreate->[Arguments->[ProductTypeInput]],ProductVariantCreate->[save->[save],Arguments->[ProductVariantCreateInput],clean_attributes->[clean_input],clean_input->[clean_attributes,validate_duplicated_attribute_values]],CollectionRemoveProducts->[perform_mutation->[CollectionRemoveProducts]]] | This class will create a class which will remove products from a collection. A class that represents a single private metadata item. | All mutations that return collection should wrap it in `ChannelContext`. You should add it to all deprecated collection meta mutations in this file. |
@@ -105,6 +105,7 @@ class LocalAgent(Agent):
"github-flow-storage",
"webhook-flow-storage",
"gitlab-flow-storage",
+ "bitbucket-flow-storage",
]
for label in all_storage_labels:
if label not in self.labels:
| [LocalAgent->[deploy_flow->[TypeError,populate_env_vars,get_flow_run_command,error,debug,type,exists,getattr,add,format,isinstance,ValueError,Popen,info,RunConfigSchema,StorageSchema],populate_env_vars->[append,get,update,extend,copy,str,join,items,getcwd],heartbeat->[list,poll,info,super,format,remove],generate_supervisor_conf->[dirname,replace,read,format,join,items,open],__init__->[debug,append,gethostname,super,set,isinstance,gethostbyname]],LocalAgent] | Initialize a NestedNode object. | We're planning on removing these default labels in a near future (0.14.0) release so this can be left out since it will be gone soon anyway and we can avoid making it another breaking change |
@@ -118,7 +118,7 @@ class Jetpack_Pre_Connection_JITMs {
* @return array The array of pre-connection JITMs.
*/
public function add_pre_connection_jitms( $pre_connection_messages ) {
- $jetpack_messages = $this->get_raw_messages();
+ $jetpack_messages = array_merge( $this->get_raw_messages(), $this->maybe_get_raw_partnership_messages() );
if ( ! is_array( $pre_connection_messages ) ) {
// The incoming messages aren't an array, so just return Jetpack's messages.
| [Jetpack_Pre_Connection_JITMs->[add_pre_connection_jitms->[get_raw_messages]]] | Add the incoming messages to the list of incoming messages that are to be sent to the J. | Would it be an option to add the message directly into `get_raw_messages()`? Maybe that would make it easier for folks looking to quickly see the whole list of pre-connection JITMs? |
@@ -1,5 +1,5 @@
# -*- encoding : utf-8 -*-
-class MoveRawEmailToFilesystem < ActiveRecord::Migration
+class MoveRawEmailToFilesystem < !rails5? ? ActiveRecord::Migration : ActiveRecord::Migration[4.2] # 2.3
def self.up
RawEmail.find_each(:batch_size => 10) do |raw_email|
if !File.exists?(raw_email.filepath)
| [MoveRawEmailToFilesystem->[up->[dbdata,to_s,filepath,exists?,data,find_each,puts],down->[raise]]] | Look up a sequence of records that can be found in the database. | Line is too long. [104/80] |
@@ -2999,6 +2999,9 @@ class TestReviewPending(ReviewBase):
'Has too few consecutive human-approved updates.')
assert (
doc('.auto_approval li').eq(1).text() ==
+ 'Has too many daily users.')
+ assert (
+ doc('.auto_approval li').eq(2).text() ==
'Uses a custom CSP.')
| [QueueTest->[setUp->[login_as_editor,login_as_senior_editor],get_queue->[get_addon_latest_version],get_expected_addons_by_names->[generate_files],generate_file->[generate_files],_test_get_queue->[get_queue],_test_results->[get_addon_latest_version]],TestAutoApprovedQueue->[test_queue_count->[login_with_permission,generate_files],test_navbar_queue_counts->[login_with_permission,generate_files],test_results->[login_with_permission,_test_results,generate_files]],TestStatusFile->[test_other->[get_file,check_status],test_status_full_reviewed->[get_file,check_status],test_status_full->[get_file,check_status]],TestQueueSearchUnlistedAllList->[test_search_not_deleted->[search,named_addons,generate_files],test_not_searching->[search,named_addons,generate_files],test_search_deleted->[search,named_addons,generate_files],test_search_by_guid->[search,generate_file,named_addons]],TestReview->[test_unlisted_addon_action_links_as_admin->[login_as_admin],test_mixed_channels_action_links_as_admin_on_unlisted_review->[login_as_admin],test_data_value_attributes->[login_as_senior_editor],test_needs_unlisted_reviewer_for_only_unlisted->[login_as_senior_editor],test_dont_need_unlisted_reviewer_for_mixed_channels->[login_as_senior_editor],test_confirm_auto_approval_with_permission->[login_as_senior_editor],test_action_links_as_admin->[login_as_admin],test_mixed_channels_action_links_as_regular_reviewer->[login_as_editor],test_confirm_auto_approval_no_permission->[login_as_editor],test_admin_flagged_addon_actions_as_editor->[login_as_editor,get_addon,get_dict],test_admin_links_as_non_admin->[login_as_editor],test_unflag_option_notflagged_as_admin->[login_as_admin],test_item_history_deleted->[generate_deleted_versions],test_review_unlisted_while_a_listed_version_is_awaiting_review->[login_as_senior_editor],test_viewing_lock_admin->[login_as_admin],test_user_reviews->[login_as_senior_editor],test_no_auto_approval_summaries_since_everything_is_public->[login_as_senior_editor],test_abuse_reports->[login_as_senior_editor],test_data_value_attributes_unreviewed->[login_as_senior_editor],test_viewing_queue->[login_as_admin],test_viewing_lock_limit->[login_as_admin],test_viewing->[login_as_admin],test_item_history_with_unlisted_review_page->[test_item_history,login_as_senior_editor],test_unadmin_flag_as_admin->[login_as_admin],test_mixed_channels_action_links_as_admin->[login_as_admin],test_admin_flagged_addon_actions_as_admin->[get_addon,get_dict,login_as_admin],test_abuse_reports_developers->[login_as_senior_editor],test_approvals_info->[login_as_senior_editor],test_unflag_option_forflagged_as_editor->[login_as_editor],test_unflag_option_forflagged_as_admin->[login_as_admin],test_item_history_with_unlisted_versions_too->[test_item_history],test_unadmin_flag_as_editor->[login_as_editor],test_no_versions->[login_as_senior_editor]],BaseTestQueueSearch->[test_not_searching->[search,named_addons,generate_files],test_search_by_supported_email_in_locale->[generate_file,named_addons],test_clear_search_hidden->[search],test_not_searching_with_param->[test_not_searching],test_search_by_nothing->[search,named_addons,generate_files],test_queue_counts->[search,generate_files],test_search_by_admin_reviewed_admin->[search,generate_files,named_addons,login_as_admin],test_queue_counts_admin->[search,generate_files,login_as_admin],generate_file->[generate_files],test_clear_search_visible->[search],test_search_by_addon_in_locale->[generate_file,named_addons],test_search_by_addon_author->[search,generate_file,named_addons],test_search_by_addon_name_admin->[search,generate_files,named_addons,login_as_admin],test_search_by_addon_name->[search,named_addons,generate_files],test_search_by_admin_reviewed->[search,named_addons,generate_files]],TestBetaSignedLog->[setUp->[login_as_editor]],TestEventLogDetail->[test_me->[make_review]],TestPerformance->[test_performance_other_user_as_senior_editor->[_test_performance_other_user_as_admin,setUpSeniorEditor],setUpAdmin->[login_as_admin],test_performance_chart_editor->[_test_chart,setUpEditor],setUpSeniorEditor->[login_as_senior_editor],test_performance_other_user_as_admin->[_test_performance_other_user_as_admin,setUpAdmin],_test_chart->[get_url],test_usercount_with_more_than_one_editor->[get_url,create_logs,setUpEditor],setUpEditor->[login_as_editor],test_performance_chart_as_senior_editor->[_test_chart,setUpSeniorEditor],test_performance_chart_as_admin->[_test_chart,setUpAdmin],test_performance_other_user_not_admin->[get_url,setUpEditor],_test_performance_other_user_as_admin->[get_url,get_user]],TestEventLog->[test_enddate_filter->[make_review],test_action_filter->[make_review],setUp->[login_as_editor]],TestNominatedQueue->[test_queue_count->[_test_queue_count],test_get_queue->[_test_get_queue],test_results->[_test_results],setUp->[get_expected_addons_by_names]],TestLimitedReviewerQueue->[test_queue_count->[_test_queue_count],test_get_queue->[_test_get_queue],test_results->[_test_results],setUp->[login_as_limited_reviewer,create_limited_user,generate_files]],TestQueueSearch->[test_search_by_many_addon_types->[search,named_addons,generate_files],test_search_by_addon_type_any->[search,generate_file,named_addons],test_search_by_app->[search,named_addons,generate_files],test_search_by_addon_type->[search,named_addons,generate_files],test_preserve_multi_apps->[search,named_addons,generate_files]],TestHome->[test_approved_review->[make_review],test_stats_user_position_unranked->[approve_reviews],test_stats_user_position_ranked->[approve_reviews],test_unlisted_queues_only_for_senior_reviewers->[login_as_senior_editor],test_stats_listed_unlisted->[login_as_senior_editor],setUp->[login_as_editor],delete_review->[make_review],test_undelete_review_admin->[delete_review,undelete_review,login_as_admin],test_stat_display_name->[approve_reviews],test_deleted_review->[delete_review],test_stats_total_admin->[login_as_admin],test_stats_monthly->[approve_reviews],test_undelete_review_own->[delete_review,undelete_review],test_undelete_review_other->[delete_review,undelete_review],test_stats_total->[approve_reviews]],ReviewBase->[setUp->[login_as_editor,generate_file]],TestLimitedReviewerReview->[test_new_addon_review_action_as_limited_editor->[get_addon,get_dict],test_old_addon_review_action_as_limited_editor->[get_addon,get_dict],setUp->[login_as_limited_reviewer,create_limited_user]],TestModeratedQueue->[test_queue_count->[_test_queue_count],test_keep_score->[setup_actions],test_remove_score->[setup_actions],test_remove_fails_for_own_addon->[get_logs,setup_actions],test_skip->[setup_actions],test_remove->[get_logs,setup_actions],test_skip_score->[setup_actions],test_keep->[get_logs,setup_actions]],TestQueueBasics->[test_paginator_when_many_pages->[generate_files],test_reviews_per_page->[generate_files],test_pending_bar->[get_review_data,generate_files],test_navbar_queue_counts->[generate_files],test_max_per_page->[generate_files],check_bar->[style],test_full_reviews_bar->[get_review_data,generate_files]],TestEditorMOTD->[test_require_editor_to_view->[get_url],test_editor_can_view_not_edit->[login_as_editor,get_url],test_form_errors->[get_url,login_as_admin],test_require_admin_to_change_motd->[login_as_editor],test_change_motd->[get_url,login_as_admin],test_motd_tab->[get_url,login_as_admin],test_motd_edit_group->[login_as_editor]],SearchTest->[setUp->[login_as_editor,login_as_senior_editor]],TestUnlistedAllList->[test_results->[_test_results],setUp->[get_expected_addons_by_names]],TestPendingQueue->[test_queue_count->[_test_queue_count],test_get_queue->[_test_get_queue],test_results->[_test_results],setUp->[get_expected_addons_by_names]],TestReviewLog->[test_super_review_logs->[make_an_approval],test_search_comment_exists->[make_an_approval],test_search_author_doesnt_exist->[make_approvals,make_an_approval],test_search_comment_case_exists->[make_an_approval],test_search_author_case_exists->[make_approvals,make_an_approval],test_end_filter->[make_approvals],make_an_approval->[get_user],test_addon_missing->[make_approvals],make_approvals->[get_user],test_search_author_exists->[make_approvals,make_an_approval],test_xss->[get_user],test_search_comment_doesnt_exist->[make_an_approval],test_comment_logs->[make_an_approval],test_search_addon_exists->[make_approvals],test_basic->[make_approvals,login_as_senior_editor],setUp->[login_as_editor],test_review_url->[get_user,login_as_admin],test_search_addon_case_exists->[make_approvals],test_request_info_logs->[make_an_approval],test_search_addon_doesnt_exist->[make_approvals],test_end_filter_wrong->[make_approvals]],TestLeaderboard->[test_leaderboard_ranks->[_award_points,get_cells],setUp->[login_as_editor]],TestReviewPending->[test_auto_approval_summary->[login_as_senior_editor],test_pending_to_public->[get_addon,pending_dict],pending_dict->[get_dict],test_display_only_unreviewed_files->[pending_dict],test_review_unreviewed_files->[pending_dict,login_as_admin]]] | Test auto - approval summary. | Test for it _not_ being displayed for non-auto-approvals? (The issue this pull is purporting to fix.) |
@@ -26,7 +26,7 @@
add_annotation_text(<%= annot.annotation_text.id %>,
'<%= simple_format(escape_javascript(annot.annotation_text.content.to_s)) %>');
add_annotation(<%= annot.id %>,
- { start: <%= annot.line_start %>, end: <%= annot.line_end %> },
+ { start: <%= annot.line_start %>, end: <%= annot.line_end %>, column_start: <%= annot.column_start %>, column_end: <%= annot.column_end %> },
'<%= annot.annotation_text.id %>');
<% end %>
<% end %>
| [No CFG could be retrieved] | Initializes the object. | This line is too long. |
@@ -49,6 +49,15 @@ static int test_sanity_enum_size(void)
return 1;
}
+static int test_sanity_int_size(void)
+{
+ if (!TEST_size_t_eq(sizeof(int), 4)
+ || !TEST_int_eq(CHAR_BIT, 8)
+ || !TEST_size_t_lt(sizeof(int), sizeof(int64_t)))
+ return 0;
+ return 1;
+}
+
static int test_sanity_twos_complement(void)
{
/* Basic two's complement checks. */
| [register_tests->[ADD_TEST],int->[TEST_int_eq,TEST_long_eq,memset,TEST_size_t_eq,TEST_mem_eq]] | Test sanity enum size. Tests if the sequence is empty. | Why did you keep `!TEST_size_t_eq(sizeof(int), 4)`? This line is meant to replace it, and it really makes no sense at all to keep both... |
@@ -54,7 +54,7 @@ public class KsqlConfig extends AbstractConfig implements Cloneable {
public static final String
KSQL_SERVICE_ID_CONFIG = "ksql.service.id";
public static final String
- KSQL_SERVICE_ID_DEFAULT = "ksql_";
+ KSQL_SERVICE_ID_DEFAULT = "default_";
public static final String
KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG = "ksql.persistent.prefix";
| [KsqlConfig->[applyStreamsConfig->[propertiesWithPrefix,commonConfigs],cloneWithPropertyOverwrite->[applyStreamsConfig,KsqlConfig],get->[get],put->[put],clone->[KsqlConfig],applyStreamsConfig]] | Creates a new configuration class that can be used to configure a Ksql instance. The schema registry url is the same as the schema registry url of the Avro schema. | Why `default_` and not just `default`? We're already concatenating `ksql.service.id` and the command topic suffix with an `_`. |
@@ -208,6 +208,11 @@ public class FileSplitter extends AbstractMessageSplitter {
};
+ String firstLineAsHeader =
+ this.firstLieHeaderName != null ?
+ bufferedReader.lines().findFirst().orElse(null)
+ : null;
+
Iterator<Object> iterator = new Iterator<Object>() {
boolean markers = FileSplitter.this.markers;
| [FileSplitter->[splitMessage->[close->[close],next->[hasNext],hasNext->[close],next,hasNext]]] | This method splits a message into two parts. Iterator that implements the Iterator interface. This method is used to build a message from a marker. | Need to be careful here; I think you'll find that the bufferedReader.lines() stream returns an empty string if the file contains just one newline. Probably easier to avoid the stream here. |
@@ -35,8 +35,9 @@ class PyScikitLearn(PythonPackage):
version('0.15.2', 'd9822ad0238e17b382a3c756ea94fe0d')
version('0.16.1', '363ddda501e3b6b61726aa40b8dbdb7e')
version('0.17.1', 'a2f8b877e6d99b1ed737144f5a478dfc')
+ version('0.13.1', 'acba398e1d46274b8470f40d0926e6a4')
depends_on('python@2.6:2.8,3.3:')
- depends_on('py-setuptools', type='build')
+ depends_on('py-setuptools', type='build', when='@0.17.1:')
depends_on('py-numpy@1.6.1:', type=('build', 'run'))
depends_on('py-scipy@0.9:', type=('build', 'run'))
| [PyScikitLearn->[depends_on,version]] | Returns version numbers of all packages that are available on the system. | Is this actually the first version that adds this dependency, or are you just guessing? |
@@ -1,5 +1,5 @@
<%= link_to path do %>
- <span class='hide'>
+ <span class='display-none'>
<%= name %>
</span>
<%= t('forms.buttons.edit') %>
| [No CFG could be retrieved] | Renders a link to the . | Not relevant for the purpose of the pull request, but I'm curious what purpose these hidden action elements are serving. As far as I can tell, they're never toggled visible. Maybe they were expected to be used as an accessible text to contextualize the "Edit" label ("Password Edit", etc)? But `display: none` are ignored by assistive technology, so this would not be happening correctly if that's the intention. |
@@ -319,7 +319,7 @@ func SearchRepository(opts *SearchRepoOptions) (RepositoryList, int64, error) {
func accessibleRepositoryCondition(user *User) builder.Cond {
var cond = builder.NewCond()
- if user == nil || !user.IsRestricted {
+ if user == nil || !user.IsRestricted || user.ID <= 0 {
orgVisibilityLimit := []structs.VisibleType{structs.VisibleTypePrivate}
if user == nil {
orgVisibilityLimit = append(orgVisibilityLimit, structs.VisibleTypeLimited)
| [LoadAttributes->[loadAttributes],loadAttributes,String] | countRepositoryCondition returns a condition for checking if a repository is accessible. Find all repositories that the user has access to. | Isn't it strange that `user ==nil` enables `VisibleTypeLimited` orgs three lines below? I was under the impression that user == nil (or user.ID <= 0) meant anonymous/unidentified. |
@@ -71,7 +71,9 @@ func CreateNewBranch(doer *models.User, repo *models.Repository, oldBranchName,
}
if !git.IsBranchExist(repo.RepoPath(), oldBranchName) {
- return fmt.Errorf("OldBranch: %s does not exist. Cannot create new branch from this", oldBranchName)
+ return models.ErrBranchDoesNotExist{
+ BranchName: oldBranchName,
+ }
}
basePath, err := models.CreateTemporaryPath("branch-maker")
| [RepoPath,GetBranch,IsErrPushRejected,IsErrPushOutOfDate,Error,OpenRepository,FullName,CreateBranch,Close,IsBranchExist,RemoveTemporaryPath,Errorf,Clone,GetBranchesByPath,PushingEnvironment,GetTag,Push,CreateTemporaryPath] | CreateNewBranch creates a new branch from a repository. if is a function that creates a new branch and pushes it to the base path. | Have you checked every call to CreateNewBranch and made sure they handle your new error correctly? |
@@ -403,11 +403,7 @@ qat_compress_impl(qat_compress_dir_t dir, char *src, int src_len,
}
/* we now wait until the completion of the operation. */
- if (!wait_for_completion_interruptible_timeout(&complete,
- QAT_TIMEOUT_MS)) {
- status = CPA_STATUS_FAIL;
- goto fail;
- }
+ wait_for_completion(&complete);
if (dc_results.status != CPA_STATUS_SUCCESS) {
status = CPA_STATUS_FAIL;
| [No CFG could be retrieved] | region Buffer mapping Reads the header and gzip data from the source and writes it to the destination buffer. | Can we remove `QAT_TIMEOUT_MS`? |
@@ -129,6 +129,7 @@ func (s *S2IBuilder) Build() error {
handleBuildStatusUpdate(s.build, s.client, nil)
return err
}
+
contextDir := ""
if len(s.build.Spec.Source.ContextDir) > 0 {
contextDir = filepath.Clean(s.build.Spec.Source.ContextDir)
| [Build->[Builder,Build,ValidateConfig],ValidateConfig->[ValidateConfig]] | Build builds the image This function is used to inject the secrets into a build This config is used to configure a build with a specific configuration for a specific build. PullIfNotPresent config. RuntimeImagePullPolicy = s2iapi. | shouldn't you add the stage info up here, before returning? |
@@ -348,7 +348,8 @@ func (data *resourceRowData) ColorizedColumns() []string {
func (data *resourceRowData) getInfoColumn() string {
step := data.step
- if step.Op == deploy.OpCreateReplacement || step.Op == deploy.OpDeleteReplaced {
+ switch step.Op {
+ case deploy.OpCreateReplacement, deploy.OpDeleteReplaced:
// if we're doing a replacement, see if we can find a replace step that contains useful
// information to display.
for _, outputStep := range data.outputSteps {
| [ColorizedColumns->[IsDone],ColorizedSuffix->[IsDone],RecordPolicyViolationEvent->[recordDiagEventPayload]] | getInfoColumn returns the column name of the diagnostic message that is displayed in the table. Prints out the worst diagnostic next to the tree - view status message. | I assume that there's some hidden invariant here that `data.outputSteps[0]` is correct. Should we be asserting something about `step.Op` at this point? Would it make more sense to write the above as a range like we have for Create/Replace? |
@@ -84,8 +84,14 @@ class DatasetReader(Registrable):
parameters you set for this DatasetReader!_
"""
- def __init__(self, lazy: bool = False, cache_directory: str = None) -> None:
+ def __init__(
+ self,
+ lazy: bool = False,
+ cache_directory: Optional[str] = None,
+ max_instances: Optional[int] = None,
+ ) -> None:
self.lazy = lazy
+ self.max_instances = max_instances
if cache_directory:
self._cache_directory = pathlib.Path(cache_directory)
os.makedirs(self._cache_directory, exist_ok=True)
| [_LazyInstances->[__init__->[super],__iter__->[write,instance_generator,serialize,deserialize,isinstance,exists,open,ConfigurationError]],DatasetReader->[serialize_instance->[dumps],_get_cache_location_for_file_path->[str,flatten_filename],deserialize_instance->[loads],_instances_from_cache_file->[deserialize_instance,open,strip],read->[tqdm,exists,_get_cache_location_for_file_path,getattr,_instances_from_cache_file,info,format,isinstance,_instances_to_cache_file,_LazyInstances,warning,_read,ConfigurationError],_instances_to_cache_file->[open,tqdm,write,serialize_instance],__init__->[Path,makedirs]],getLogger] | Initialize the object with a random identifier. | Can you add this to the docstring? |
@@ -1184,7 +1184,7 @@ function $ParseProvider() {
}
if (isAllDefined(value)) {
scope.$$postDigest(function () {
- if(isAllDefined(lastValue)) unwatch();
+ if (isAllDefined(lastValue)) unwatch();
});
}
}, objectEquality);
| [No CFG could be retrieved] | Adds watch delegates to parsedExpression and adds interceptor to it. Creates a function that intercepts the expression. | I'd rather `function` were trailed immediately by parentheses*, unless it's a named function --- can jscs enforce that? (I meant parentheses, not braces**) |
@@ -36,12 +36,15 @@ abstract class Abstract_Indexable_Tag_Presenter extends Abstract_Indexable_Prese
* @throws \InvalidArgumentException When a subclass does not define a key property. This should appear during development.
*/
public function present() {
+ $value = $this->get();
+
if ( $this->key === 'NO KEY PROVIDED' ) {
- throw new \InvalidArgumentException( \get_class( $this ) . ' is an Abstract_Indexable_Presenter but does not override the key property.' );
+ /**
+ * Required for backwards compatability with add-ons in which we override this class and define the key in the tag_format.
+ */
+ return \sprintf( $this->tag_format, $this->escape_value( $value ) );
}
- $value = $this->get();
-
if ( \is_string( $value ) && $value !== '' ) {
return \sprintf( $this->tag_format, $this->escape_value( $value ), $this->key );
}
| [Abstract_Indexable_Tag_Presenter->[present->[escape_value,get]]] | Presents the tag. | @Xyfi I think you need to check `if ( \is_string( $value ) && $value !== '' ) {` here too, before returning the `/sprintf ` |
@@ -305,8 +305,14 @@ namespace Dynamo.Tests
// Assert ZTTestPackage is still a package dependency
var packageDependencies = CurrentDynamoModel.CurrentWorkspace.NodeLibraryDependencies;
Assert.Contains(new PackageDependencyInfo("ZTTestPackage", new Version("0.0.1")), packageDependencies);
- Assert.IsTrue(packageDependencies.First().IsLoaded == false);
+ var package = packageDependencies.First();
+ if (package is PackageDependencyInfo)
+ {
+ var packageDependencyState = ((PackageDependencyInfo)package).State;
+ // Assert that the package is not loaded.
+ Assert.AreNotEqual(PackageDependencyState.Loaded, packageDependencyState);
+ }
}
[Test]
| [PackageDependencyTests->[PackageDependenciesUpdatedAfterPackageAndNodeAdded->[LoadPackage],PackageDependenciesClearedAfterWorkspaceCleared->[LoadPackage],GetLibrariesToPreload->[GetLibrariesToPreload],DependencyWithTypeLoads->[LoadPackage]]] | PackageDependenciesPreservedWhenPackagesNotLoaded is called. | so I totally get removing all these - BUT why not leave them so that the change you just made still has tests? Then we can remove the tests when we actually remove the property? |
@@ -47,7 +47,7 @@ def test_io_egi():
include = ['TRSP', 'XXX1']
with warnings.catch_warnings(record=True): # preload=None
raw = _test_raw_reader(read_raw_egi, input_fname=egi_fname,
- include=include)
+ include=include, channel_naming='EEG %03d')
assert_equal('eeg' in raw, True)
| [test_io_egi->[loadtxt,_combine_triggers,catch_warnings,assert_raises,enumerate,simplefilter,assert_allclose,assert_equal,_test_raw_reader,assert_array_equal,unique,find_events,len,repr,open,assert_true,read_raw_egi,pick_types,array],run_tests_if_main,dirname,realpath,join,simplefilter] | Test import of EGI simple binary files. Check if there is a missing lease in the raw. event_id. | Do you need to make this change? `_test_raw_reader` shouldn't really impose channel naming requirements. |
@@ -65,9 +65,9 @@ func sprintDeleteRepo(request *pfs.DeleteRepoRequest) string {
force = " --force"
}
if request.All {
- return fmt.Sprintf("delete repo --all%s", request.Repo.Name, force)
+ return fmt.Sprintf("delete repo --all")
}
- return fmt.Sprintf("delete repo %s%s", request.Repo.Name, force)
+ return fmt.Sprintf("delete repo %s %s", request.Repo.Name, force)
}
func sprintStartCommit(request *pfs.StartCommitRequest, response *transaction.TransactionResponse) string {
| [Fprintf,Join,Execute,Sprintf,New,String,Parse,Funcs,Ago] | PrintDetailedTransactionInfo prints detailed information about a transaction. sprintCreateBranch returns a string that can be used to create or delete a branch given. | oh uh, I came across this as well in my upcoming PR - I think we still want the `force` here. |
@@ -84,9 +84,9 @@ func (gc *generateCmd) validate(cmd *cobra.Command, args []string) error {
return errors.Wrap(err, "error loading translation files")
}
- if gc.apimodelPath == "" {
+ if currentConfig.CLIConfig.Generate.APIModel == "" {
if len(args) == 1 {
- gc.apimodelPath = args[0]
+ currentConfig.CLIConfig.Generate.APIModel = args[0]
} else if len(args) > 1 {
cmd.Usage()
return errors.New("too many arguments were provided to 'generate'")
| [loadAPIModel->[LoadContainerServiceFromFile,Join,ReadFile,New,Wrap],mergeAPIModel->[Infoln,MapValues,Sprintf,Wrap,MergeValuesWithAPIModel],validate->[LoadTranslations,IsNotExist,Stat,New,Usage,Errorf,Wrap],run->[SetPropertiesDefaults,PrettyPrintArmTemplate,Infoln,Error,GenerateTemplate,Sprintf,WriteTLSArtifacts,Fatalf,BuildAzureParametersFile,Exit,InitializeTemplateGenerator],StringArrayVar,StringVar,Error,Sprintf,validate,loadAPIModel,StringVarP,Fatalf,run,mergeAPIModel,Flags,BoolVar] | validate validates that the command is valid. | These places can be re-assigned to `cfg` as well |
@@ -350,6 +350,16 @@ export class AccessService {
credentials: 'include',
requireAmpResponseSourceOrigin: true
}));
+ }).catch(error => {
+ this.analyticsEvent_('access-authorization-failed');
+ if (this.config_.authorizationFallbackResponse) {
+ // Use fallback.
+ setTimeout(() => {throw error;});
+ return this.config_.authorizationFallbackResponse;
+ } else {
+ // Rethrow the error.
+ throw error;
+ }
}).then(response => {
log.fine(TAG, 'Authorization response: ', response);
this.setAuthResponse_(response);
| [No CFG could be retrieved] | This method runs the authorization process. Gets the access READER_ID. | Please write a test for this. |
@@ -45,10 +45,15 @@ public final class DiscoveryStrategyFactory {
* @return the discovery strategy
*/
public static DiscoveryStrategy create(ResourceLoader resourceLoader, Bootstrap bootstrap, Set<Class<? extends Annotation>> initialBeanDefiningAnnotations) {
-
- if (Reflections.isClassLoadable(JANDEX_INDEX_CLASS_NAME, resourceLoader)) {
+ if (Reflections.isClassLoadable(resourceLoader, JANDEX_INDEX_CLASS_NAME)) {
CommonLogger.LOG.usingJandex();
- return Reflections.newInstance(resourceLoader, JANDEX_DISCOVERY_STRATEGY_CLASS_NAME, resourceLoader, bootstrap, initialBeanDefiningAnnotations);
+ try {
+ return Reflections.cast(SecurityActions.getConstructor(Reflections.classForName(resourceLoader, JANDEX_DISCOVERY_STRATEGY_CLASS_NAME),
+ ResourceLoader.class, Bootstrap.class, Set.class).newInstance(resourceLoader, bootstrap, initialBeanDefiningAnnotations));
+ } catch (Exception e) {
+ throw CommonLogger.LOG.unableToInstantiate(JANDEX_DISCOVERY_STRATEGY_CLASS_NAME,
+ Arrays.toString(new Object[] { resourceLoader, bootstrap, initialBeanDefiningAnnotations }), e);
+ }
}
return new ReflectionDiscoveryStrategy(resourceLoader, bootstrap, initialBeanDefiningAnnotations);
}
| [DiscoveryStrategyFactory->[create->[isClassLoadable,usingJandex,ReflectionDiscoveryStrategy,newInstance]]] | Creates a discovery strategy. | We should not need to use SecurityActions here as we are obtaining a public constructor of a public class. |
@@ -102,7 +102,7 @@ public class HoodieHFileDataBlock extends HoodieDataBlock {
FSDataOutputStream ostream = new FSDataOutputStream(baos, null);
HFile.Writer writer = HFile.getWriterFactory(conf, cacheConfig)
- .withOutputStream(ostream).withFileContext(context).create();
+ .withOutputStream(ostream).withFileContext(context).withComparator(new HoodieKVComparator()).create();
// Serialize records into bytes
Map<String, byte[]> sortedRecordsMap = new TreeMap<>();
| [HoodieHFileDataBlock->[createRecordsFromContentBytes->[createRecordsFromContentBytes]]] | Serialize the records into bytes. | Lets make sure bootstrap index writing does not use this code path. again problematic |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.