patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -259,8 +259,8 @@ export class BindExpression { return this.eval_(args[0], scope); case AstNodeType.INVOCATION: - // Built-in functions don't have a caller object. - const isBuiltIn = (args[0] === undefined); + // Built-in functions and macros don't have a caller object. + const hasCaller = (args[0] !== undefined); const caller = this.eval_(args[0], scope); const params = this.eval_(args[1], scope);
[No CFG could be retrieved]
Evaluates and returns number of nodes in the given AST. Checks if a function can be called with the given parameters.
Nit: `builtInOrMacro`. It'll avoid `!` in the conditional below which is a tad more readable.
@@ -0,0 +1,3 @@ +// For legacy purposes, preserve the UMD of the public API of Jitsi Meet +// external API (a.k.a. JitsiMeetExternalAPI). +module.exports = require('./external_api').default;
[No CFG could be retrieved]
No Summary Found.
why use module.exports here though?
@@ -64,4 +64,4 @@ using System.Runtime.InteropServices; // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] -[assembly: AssemblyFileVersion("2.1.0.4395")] +[assembly: AssemblyFileVersion("2.1.0.6957")]
[Satellite]
The Build and Revision Numbers are optional.
I don't think that this file is usually included in PRs.
@@ -981,8 +981,8 @@ class SerializedSQLiteStorage: partner_address=partner_address, ) return [ - TimestampedEvent(self.serializer.deserialize(event.wrapped_event), event.log_time) - for event in events + TimestampedEvent(self.serializer.deserialize(data), timestamp) + for data, timestamp in events ] def get_events_with_timestamps(
[_filter_from_dict->[_filter_from_dict],SQLiteStorage->[get_raiden_events_payment_history_with_timestamps->[_sanitize_limit_and_offset],write_state_changes->[_ulid_factory],_query_events->[_form_and_execute_json_query],close->[close],get_events->[_query_events],get_latest_event_by_data_field->[_query_to_string,EventEncodedRecord],batch_query_event_records->[_get_event_records],write_events->[_ulid_factory],write_state_snapshot->[_ulid_factory],get_events_with_timestamps->[_query_events],get_snapshot_before_state_change->[SnapshotEncodedRecord],_form_and_execute_json_query->[_sanitize_limit_and_offset],_get_state_changes->[StateChangeEncodedRecord,_form_and_execute_json_query],get_snapshots->[SnapshotEncodedRecord],batch_query_state_changes->[_get_state_changes],get_state_changes->[_get_state_changes],get_statechanges_records_by_range->[StateChangeEncodedRecord],__exit__->[close],get_latest_state_change_by_data_field->[StateChangeEncodedRecord,_query_to_string],_get_event_records->[_form_and_execute_json_query,EventEncodedRecord]],_query_to_string->[_filter_from_dict],SerializedSQLiteStorage->[get_latest_state_change_by_data_field->[get_latest_state_change_by_data_field,StateChangeRecord],write_events->[write_events],write_state_snapshot->[write_state_snapshot],log_run->[log_run],get_events_with_timestamps->[get_events_with_timestamps],get_version->[get_version],get_events->[get_events],get_raiden_events_payment_history_with_timestamps->[get_raiden_events_payment_history_with_timestamps],count_state_changes->[count_state_changes],__init__->[SQLiteStorage],write_state_changes->[write_state_changes],update_version->[update_version],get_snapshot_before_state_change->[get_snapshot_before_state_change,SnapshotRecord],close->[close],get_statechanges_records_by_range->[StateChangeRecord,get_statechanges_records_by_range],get_statechanges_by_range->[get_statechanges_records_by_range],get_latest_event_by_data_field->[EventRecord,get_latest_event_by_data_field],get_state_changes_stream->[get_state_changes]],Range]
Get the raiden events payment history with timestamps.
If we use `List[Tuple[str, datetime]]` above we could just as well use `List[Tuple[Event, datetime]]` here and get rid of the TimestampedEvent type. That would be more consistent.
@@ -368,9 +368,9 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV String partitionPath = formatPartitionKey(partitionStr); ensurePartitionLoadedCorrectly(partitionPath); return fetchHoodieFileGroup(partitionPath, fileId).map(fileGroup -> fileGroup.getAllBaseFiles() - .filter( - baseFile -> HoodieTimeline.compareTimestamps(baseFile.getCommitTime(), HoodieTimeline.EQUALS, instantTime)) - .filter(df -> !isBaseFileDueToPendingCompaction(df)).findFirst().orElse(null)); + .filter(baseFile -> HoodieTimeline.compareTimestamps(baseFile.getCommitTime(), HoodieTimeline.EQUALS, + instantTime)).filter(df -> !isBaseFileDueToPendingCompaction(df)).findFirst().orElse(null)) + .map(df -> addExternalBaseFileIfPresent(new HoodieFileGroupId(partitionPath, fileId), df)); } finally { readLock.unlock(); }
[AbstractTableFileSystemView->[getLatestFileSlicesBeforeOrOn->[ensurePartitionLoadedCorrectly],getBaseFileOn->[ensurePartitionLoadedCorrectly],fetchMergedFileSlice->[getPendingCompactionOperationWithInstant,mergeCompactionPendingFileSlices],getLatestBaseFile->[ensurePartitionLoadedCorrectly],getLatestBaseFilesBeforeOrOn->[ensurePartitionLoadedCorrectly],getLatestFileSlices->[ensurePartitionLoadedCorrectly],getAllFileSlices->[ensurePartitionLoadedCorrectly],reset->[init],getLatestMergedFileSlicesBeforeOrOn->[ensurePartitionLoadedCorrectly],runSync->[init,refreshTimeline,resetViewState],sync->[getTimeline],buildFileGroups->[getPartitionPathFromFilePath,buildFileGroups],filterBaseFileAfterPendingCompaction->[isFileSliceAfterPendingCompaction],getLatestBaseFiles->[ensurePartitionLoadedCorrectly],isBaseFileDueToPendingCompaction->[getPartitionPathFromFilePath],getAllFileGroups->[ensurePartitionLoadedCorrectly,formatPartitionKey],getLatestFileSlice->[ensurePartitionLoadedCorrectly],ensurePartitionLoadedCorrectly->[addFilesToView],getAllBaseFiles->[ensurePartitionLoadedCorrectly,isBaseFileDueToPendingCompaction],getLatestUnCompactedFileSlices->[ensurePartitionLoadedCorrectly]]]
Returns the base file on the given file id.
more readable line folding?
@@ -637,9 +637,7 @@ final class DocumentationNormalizer implements NormalizerInterface, CacheableSup $schema = new Schema($v3 ? Schema::VERSION_OPENAPI : Schema::VERSION_SWAGGER); $schema->setDefinitions($definitions); - $this->jsonSchemaFactory->buildSchema($resourceClass, $format, $type, $operationType, $operationName, $schema, $serializerContext, $forceCollection); - - return $schema; + return $this->jsonSchemaFactory->buildSchema($resourceClass, $format, $type, $operationType, $operationName, $schema, $serializerContext, $forceCollection); } private function computeDoc(bool $v3, Documentation $documentation, \ArrayObject $definitions, \ArrayObject $paths, array $context): array
[DocumentationNormalizer->[addRequestBody->[addSchemas],updatePutOperation->[addSchemas],addSubresourceOperation->[addSchemas,addPaginationParameters],updateGetOperation->[addSchemas],getPathOperation->[getPath],updatePostOperation->[addSchemas],normalize->[normalize]]]
Returns the JSON schema for the given parameters. Get the missing security keys.
This is a consequence of the fix
@@ -31,6 +31,16 @@ namespace Microsoft.Extensions.Hosting return ResolveFactory<THostBuilder>(assembly, CreateHostBuilder); } + public static Func<string[], IHostBuilder>? ResolveHostBuilderFactory(Assembly assembly) + { + if (assembly.EntryPoint is null) + { + return null; + } + + return args => new DeferredHostBuilder(args, assembly.EntryPoint); + } + private static Func<string[], T>? ResolveFactory<T>(Assembly assembly, string name) { var programType = assembly?.EntryPoint?.DeclaringType;
[HostFactoryResolver->[GetServiceProvider->[GetType,GetProperty],Build->[Array,GetMethod],IsFactory->[ParameterType,IsAssignableFrom,ReturnType,Length,Equals],ResolveServiceProviderFactory->[GetServiceProvider,Build,webHostBuilderFactory,hostBuilderFactory,webHostFactory],ResolveFactory->[Invoke,GetMethod],Public,nameof,NonPublic,DeclaredOnly,Static,Instance]]
Resolve host builder factory.
1. Does this need to be public? 2. If it does, it is a bit concerning that it has the same name as the method above, takes the same parameters, and returns the same type, but doesn't really do the same things. Can we rename it somehow to distinguish it from the existing method above?
@@ -508,9 +508,9 @@ func (g *BuildGenerator) resolveImageSecret(ctx kapi.Context, secrets []kapi.Sec } // getNextBuildName returns name of the next build and increments BuildConfig's LastVersion. -func getNextBuildName(bc *buildapi.BuildConfig) string { - bc.Status.LastVersion++ - return fmt.Sprintf("%s-%d", bc.Name, bc.Status.LastVersion) +func getNextBuildName(buildConfig *buildapi.BuildConfig) string { + buildConfig.Status.LastVersion++ + return fmt.Sprintf("%s-%d", buildConfig.Name, buildConfig.Status.LastVersion) } // For a custom build strategy, update base image env variable reference with the new image.
[Instantiate->[GetBuildConfig,UpdateBuildConfig],resolveImageStreamDockerRepository->[GetImageStreamImage,GetImageStream],Clone->[GetBuild],resolveImageStreamReference->[GetImageStreamImage,GetImageStreamTag],resolveImageSecret->[resolveImageStreamDockerRepository],generateBuildFromConfig->[FetchServiceAccountSecrets],createBuild->[CreateBuild,GetBuild]]
getNextBuildName returns the name of the next build in the build namespace. If there generateBuildFroorImageKey - generate base image key in environment.
Just curious, what was wrong with having 'bc' here?
@@ -109,7 +109,12 @@ class Command(object): options, args = self.parse_args(args) if options.quiet: - level = "WARNING" + if options.quiet == 1: + level = "WARNING" + if options.quiet == 2: + level = "ERROR" + else: + level = "CRITICAL" elif options.verbose: level = "DEBUG" else:
[Command->[main->[_build_session,parse_args],parse_args->[parse_args]]]
Entry point for pip - debug. A dict of dicts where the keys are the names of the modules and the values are the Checks if pip is using the latest version of the .
could be an `elif`
@@ -407,9 +407,10 @@ public abstract class ProcessTree implements Iterable<OSProcess>, IProcessTree, } public void killRecursively() throws InterruptedException { - LOGGER.finer("Killing recursively "+getPid()); - p.killRecursively(); - killByKiller(); + LOGGER.fine("Recursively killing pid="+getPid()); + for (OSProcess p : getChildren()) + p.killRecursively(); + kill(); } public void kill() throws InterruptedException {
[ProcessTree->[Unix->[killAll->[hasMatchingEnvVars,killRecursively],get->[kill->[],get]],Solaris->[SolarisProcess->[getEnvironmentVariables->[getFile],getArguments->[getFile],getParent->[get],getFile]],SerializedProcess->[readResolve->[get]],writeReplace->[Remote],Linux->[LinuxProcess->[getEnvironmentVariables->[getFile],getArguments->[getFile],getParent->[get],getFile]],Darwin->[DarwinProcess->[parse->[StringArrayMemory,peek,skip0,readString,readInt],getParent->[get]]],get->[kill->[killByKiller],get,OSProcess],OSProcess->[killByKiller->[getKillers,getPid,kill],hasMatchingEnvVars->[getEnvironmentVariables,get],getChildren->[getParent]],killAll->[killAll,get],Windows->[killRecursively->[killByKiller,getPid,killRecursively],getEnvironmentVariables->[getEnvironmentVariables],kill->[killByKiller,getPid,kill],get->[kill->[],get,getPid],killAll->[hasMatchingEnvVars,getPid,killRecursively],OSProcess,getPid],UnixProcess->[getFile->[getPid],killRecursively->[getChildren,getPid,killRecursively,kill],kill->[killByKiller,invoke,getPid]],Remote->[killAll->[killAll],RemoteProcess->[killRecursively->[killRecursively],getEnvironmentVariables->[getEnvironmentVariables],getArguments->[getArguments],kill->[kill],act->[act],getParent->[get,getPid,getParent],getPid]],iterator->[iterator]]]
Kills this process recursively.
The change is incorrect, because getChildren() always returns empty list for Windows
@@ -364,6 +364,13 @@ class Operator(object): Block. Users can use the build in instructions to describe their neural network. """ + OP_WITHOUT_KERNEL_SET = { + 'feed', 'fetch', 'save', 'load', 'recurrent', 'go', + 'rnn_memory_helper_grad', 'conditional_block', 'while', 'send', 'recv', + 'listen_and_serv', 'parallel_do', 'save_combine', 'load_combine', + 'ncclInit', 'channel_create', 'channel_close', 'channel_send', + 'channel_recv', 'select' + } def __init__(self, block,
[get_var->[default_main_program,global_block],get_all_op_protos->[get_all_op_protos],dtype_is_floating->[convert_np_dtype_to_dtype_],Operator->[type->[type],output_arg_names->[output_arg_names],input_arg_names->[input_arg_names],rename_input->[rename_input],has_attr->[has_attr],attr_type->[attr_type],rename_output->[rename_output],to_string->[_debug_string_],attr_names->[attr_names],__init__->[type,instance,find_name],output_names->[output_names],block_attr->[block_attr],all_attrs->[block_attr,attr],output->[output],attr->[attr],input_names->[input_names],input->[input],__str__->[to_string]],Block->[sync_with_cpp->[has_var,type,Operator,name,create_var],remove_var->[remove_var],insert_op->[Operator,insert_op],clone_variable->[create_var],to_string->[_debug_string_,to_string],rename_var->[has_var,var,type,rename_var,shape,dtype,Variable],set_forward_block_idx->[set_forward_block_idx],append_op->[Operator,append_op],copy_param_info_from->[iter_parameters],var_recursive->[var],create_var->[Variable],remove_op->[remove_op],prepend_op->[Operator,prepend_op],__str__->[to_string]],Program->[sync_with_cpp->[sync_with_cpp,Block,num_blocks],parse_from_string->[sync_with_cpp,Program,Block],to_string->[_debug_string_,to_string],prune->[sync_with_cpp,Program,prune,Block],__init__->[Block],create_block->[block,Block,current_block],copy_param_info_from->[global_block],num_blocks->[num_blocks],inference_optimize->[sync_with_cpp,Program,has_attr,Block],clone->[sync_with_cpp,Program,Block,copy_param_info_from],rollback->[current_block],__str__->[to_string]],program_guard->[switch_main_program,switch_startup_program],Parameter->[__str__->[to_string],to_string->[to_string],__init__->[__init__]],Variable->[type->[type],lod_level->[lod_level],persistable->[persistable],to_string->[_debug_string_],shape->[shape],__init__->[convert_np_dtype_to_dtype_],dtype->[dtype],name->[name]],OpProtoHolder->[__init__->[get_all_op_protos]],Program]
Initialize a new object of the type of . Adds missing components to the network. if self. block. desc is None or self. block. desc is None or self.
This set is sad...Can you file a issue and assign to me to clean it up? So that I don't forget.
@@ -104,6 +104,14 @@ public class ObjectInDataStoreManagerImpl implements ObjectInDataStoreManager { // TODO: further investigate why an extra event is sent when it is // alreay Ready for DownloadListener stateMachines.addTransition(State.Ready, Event.OperationSuccessed, State.Ready); + // State transitions for data object migration + stateMachines.addTransition(State.Ready, Event.MigrationRequested, State.Migrating); + stateMachines.addTransition(State.Ready, Event.CopyRequested, State.Copying); + stateMachines.addTransition(State.Allocated, Event.MigrationRequested, State.Migrating); + stateMachines.addTransition(State.Migrating, Event.MigrationFailed, State.Failed); + stateMachines.addTransition(State.Migrating, Event.MigrationSucceeded, State.Destroyed); + stateMachines.addTransition(State.Migrating, Event.OperationSuccessed, State.Ready); + stateMachines.addTransition(State.Migrating, Event.OperationFailed, State.Ready); } @Override
[ObjectInDataStoreManagerImpl->[findObject->[findObject]]]
create a new object in the given data store check if the object is in the S3 system and if so update the object in the.
`Event.OperationSuccessed`? or should this be `Event.OperationSucceeded`?
@@ -130,7 +130,8 @@ public class PartitionedDataWriter<S, D> extends WriterWrapper<D> implements Fin this.state.setProp(WRITER_LATEST_SCHEMA, builder.getSchema()); } long cacheExpiryInterval = this.state.getPropAsLong(PARTITIONED_WRITER_CACHE_TTL_SECONDS, DEFAULT_PARTITIONED_WRITER_CACHE_TTL_SECONDS); - this.writeTimeoutInterval = cacheExpiryInterval / 3; + // Increase the timeout value to make it less sensitive to HDFS slow writer + this.writeTimeoutInterval = cacheExpiryInterval / 3 * 2; log.debug("PartitionedDataWriter: Setting cache expiry interval to {} seconds", cacheExpiryInterval); this.partitionWriters = CacheBuilder.newBuilder()
[PartitionedDataWriter->[bytesWritten->[bytesWritten],getFinalState->[getFinalState,bytesWritten,recordsWritten],writeEnvelope->[writeEnvelope,get],commit->[commit],cleanup->[cleanup],serializePartitionInfoToState->[getPartitionsKey],createPartitionWriter->[isDataWriterWatermarkCapable],getPartitionInfoAndClean->[getPartitionsKey],closeWritersInCache->[close],PartitionDataWriterMessageHandler->[handleMessage->[close,handleMessage]],close->[close],isDataWriterForPartitionSafe->[isSpeculativeAttemptSafe],recordsWritten->[recordsWritten]]]
Creates a new instance of PartitionedDataWriter which will clean up all records and bytes of Creates a cache that loads the .
Do we want to derive this from Cache TTL value or can this be an independent config? The reason being the right value for write timeout may vary depending on the destination being written to. And if the config exceeds the cache ttl, we can bound the value to the TTL.
@@ -14,6 +14,10 @@ from parlai.tasks.self_chat.worlds import SelfChatWorld as SelfChatBaseWorld def load_personas(opt): print('[ loading personas.. ]') + if opt.get('include_personas', True): + print( + "\n [NOTE: In the BST paper both partner's have a persona.\n You can choose to ignore yours, the model never sees it.\n In the Blender paper, this was not used for humans.\n You can also turn personas off with --include-personas False]\n" + ) fname = raw_data_path(opt) with open(fname) as json_file: data = json.load(json_file)
[SelfChatWorld->[init_contexts->[load_personas]],InteractiveWorld->[init_contexts->[load_personas]]]
load personas from json file.
nit, i know this isn't your chagne but: if `not opt.get('include_personas', True)` we shouldn't even waste time loading the personas here.
@@ -89,7 +89,8 @@ func newWriter(ctx context.Context, client Client, memCache kv.GetPut, deduper * }, buf: &bytes.Buffer{}, stats: &stats{}, - chain: NewTaskChain(cancelCtx), + // TODO: Make task chain parallelism configurable? + chain: NewTaskChain(cancelCtx, 100), first: true, } WithRollingHashConfig(defaultAverageBits, defaultSeed)(w)
[createChunk->[resetHash],Annotate->[resetHash],Copy->[maybeDone],maybeCheapCopy->[splitAnnotations],writeData->[Write],flushDataRef->[roll],Close->[createChunk,Close,maybeDone,flushBuffer],flushBuffer->[Annotate]]
NewWriter returns a new instance of Writer. Annotate adds an annotation to the current chunk.
Don't we bottleneck on the rolling hash here? Might make sense to set this to `runtime.GOMAXPROCS(0)` if it's CPU bound.
@@ -39,17 +39,9 @@ import ( const ( // DeploymentSchemaVersionCurrent is the current version of the `Deployment` schema. // Any deployments newer than this version will be rejected. - DeploymentSchemaVersionCurrent = 1 + DeploymentSchemaVersionCurrent = 2 ) -// We alias the latest versions of the various types below to their friendly names here. - -type Checkpoint = CheckpointV1 -type Deployment = DeploymentV1 -type Manifest = ManifestV1 -type PluginInfo = PluginInfoV1 -type Resource = ResourceV1 - // VersionedCheckpoint is a version number plus a json document. The version number describes what // version of the Checkpoint structure the Checkpoint member's json document can decode into. type VersionedCheckpoint struct {
[No CFG could be retrieved]
2 ) Make any optional things required. CheckpointVersion is the version of the checkpoint. It contains a newer version of the checkpoint.
I removed these since these are major versioning footguns. Anybody using an API type should indicate which version they are using in the type.
@@ -779,7 +779,7 @@ public class PrecompiledContracts { } byte[] res = new byte[WORD_SIZE]; if (isConstantCall()) { - //for static call not use thread pool to avoid potential effect + //for constant call not use thread pool to avoid potential effect for (int i = 0; i < cnt; i++) { if (DataWord .equalAddressByteArray(addresses[i], recoverAddrBySign(signatures[i], hash))) {
[PrecompiledContracts->[BatchValidateSign->[doExecute->[RecoverAddrTask,getCPUTimeLeftInNanoSecond]],BN128Multiplication->[execute->[encodeRes]],ValidateMultiSign->[execute->[dataOne]],BN128Addition->[execute->[encodeRes]]]]
This method is called from the batch validation thread.
inconsistent with your ``isConstantCall`` to ``isStaticCall`` change.
@@ -8,8 +8,12 @@ import java.nio.file.Path; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import org.triplea.io.FileUtils; class LocalizeHtmlTest { + + private final Path path = Path.of("/does/not/exist"); + @Test void testLocalizeHtml() { final String testHtml =
[LocalizeHtmlTest->[testLocalizeHtml->[is,assertThat,localizeImgLinksInHtml,of],testAbsoluteUrl->[localizeImgLinksInHtml,of,equalTo,is,assertThat]]]
Tests the tag in HTML format.
nit, i'd recommend to just duplicate this. The 'rules' of how to write clean tests are different from otherwise production code. Having a test be more self contained is valuable, particularly so when it means less redirection.
@@ -0,0 +1,11 @@ +function parseVersion( versionNumberString ) { + const versionNumber = ( /(\d+).(\d+).?(\d+)?/g ).exec( versionNumberString ); + + return { + major: parseInt( versionNumber[ 1 ], 10 ), + minor: parseInt( versionNumber[ 2 ], 10 ), + patch: parseInt( versionNumber[ 3 ], 10 ) || 0, + }; +} + +module.exports = parseVersion;
[No CFG could be retrieved]
No Summary Found.
Please add some docs
@@ -160,11 +160,6 @@ class Rule return !$this->disabled; } - public function getLiterals() - { - return $this->literals; - } - public function isAssertion() { return 1 === count($this->literals);
[Rule->[getPrettyString->[getPrettyString],__toString->[isDisabled]]]
Returns true if the node is enabled.
This should maybe be kept (at least temporarily) to avoid BC breaks if a plugin rely on the method
@@ -154,14 +154,6 @@ public final class ParameterModelsLoaderDelegate { } } - private void parseDefaultEncoding(ExtensionParameter extensionParameter, ParameterDeclarer parameter) { - // TODO: MULE-9220 - Add a syntax validator which checks that the annotated parameter is a String - if (extensionParameter.getAnnotation(DefaultEncoding.class).isPresent()) { - parameter.getDeclaration().setRequired(false); - parameter.withModelProperty(new DefaultEncodingModelProperty()); - } - } - private boolean declaredAsGroup(HasParametersDeclarer component, ParameterDeclarationContext declarationContext, ExtensionParameter groupParameter)
[ParameterModelsLoaderDelegate->[declaredAsGroup->[declare],declare->[declare]]]
Parse config override. This method is called when a parameter group is not present in the model.
shouldn't delete this TODO. It should actually move the ModelValidator which verifies you didn't use `@DefaultEncoding` more than once
@@ -80,13 +80,11 @@ public final class ChannelOutboundBuffer { private static final AtomicLongFieldUpdater<ChannelOutboundBuffer> TOTAL_PENDING_SIZE_UPDATER = AtomicLongFieldUpdater.newUpdater(ChannelOutboundBuffer.class, "totalPendingSize"); - @SuppressWarnings({ "unused", "FieldMayBeFinal" }) private volatile long totalPendingSize; private static final AtomicIntegerFieldUpdater<ChannelOutboundBuffer> WRITABLE_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ChannelOutboundBuffer.class, "writable"); - @SuppressWarnings({ "unused", "FieldMayBeFinal" }) private volatile int writable = 1; private ChannelOutboundBuffer(Handle handle) {
[ChannelOutboundBuffer->[newObject->[ChannelOutboundBuffer],nioBuffers->[nioBuffers],remove->[decrementPendingOutboundBytes],close->[run->[close],isEmpty],failFlushed->[remove]]]
Creates a new instance of the channel outbound buffer. Assigns a promise to the last message in the buffer.
Why remove this ?
@@ -263,6 +263,12 @@ module Engine "hexes": [ "H4" ] + }, + { + "type": "exchange", + "corporation": "SC", + "owner_type": "player", + "from": "10%_par" } ] },
[No CFG could be retrieved]
Magic block of a company can be moved to a different place in a different place. Blocks hex G9 while owned by a player. Earns $10 every time a.
this is a very specific ability, i don't know if we should put it in the generic framework
@@ -314,6 +314,8 @@ func (pc *Client) CreateUpdate( } else { endpoint = "update" } + case UpdateKindRefresh: + contract.Failf("Refresh not yet supported for managed stacks [pulumi/pulumi#1081]") case UpdateKindDestroy: endpoint = "destroy" default:
[GetStack->[restCall],ExportStackDeployment->[restCall],GetStackLogs->[restCall],DeleteStack->[restCall],CreateUpdate->[restCall],ImportStackDeployment->[restCall],DownloadTemplate->[apiCall],DecryptValue->[restCall],DescribeUser->[restCall],GetStackUpdates->[restCall],RenewUpdateLease->[restCallWithOptions],DownloadPlugin->[apiCall],CompleteUpdate->[updateRESTCall],PatchUpdateCheckpoint->[updateRESTCall],StartUpdate->[restCall],CreateStack->[restCall],GetUpdateEvents->[restCall],InvalidateUpdateCheckpoint->[updateRESTCall],EncryptValue->[restCall],ListStacks->[restCall],AppendUpdateLogEntry->[updateRESTCall],ListTemplates->[restCall]]
CreateUpdate creates a new update object. This method is called by update to update the specified update.
I get that this is supposed to be a NYI assert but if we're going to check this in can we return an error instead of crashing?
@@ -432,7 +432,7 @@ export function applyStaticLayout(element) { if (layout == Layout.NODISPLAY) { // CSS defines layout=nodisplay automatically with `display:none`. Thus // no additional styling is needed. - applyNoDisplayLayout(element); + toggle(element, false); } else if (layout == Layout.FIXED) { setStyles(element, { width: dev().assertString(width),
[No CFG could be retrieved]
Adds a missing class to the element if layout is fixed - height or intrinSIC padding of the element.
@honeybadgerdontcare This change should be transparent since we always apply it at runtime (line 337), but probably should update the cache transform regardless.
@@ -7,6 +7,17 @@ #include <wchar.h> // wmemcpy_s #endif +int AssertCount = 0; +int AssertsToConsole = false; + +#if _WIN32 + // IsInAssert defined in header +#elif !defined(__IOS__) + __declspec(thread) int IsInAssert = false; +#else +int IsInAssert = false; +#endif + void __stdcall js_memcpy_s(__bcount(sizeInBytes) void *dst, size_t sizeInBytes, __in_bcount(count) const void *src, size_t count) { Assert((count) <= (sizeInBytes));
[No CFG could be retrieved]
- - - - - - - - - - - - - - - - - - region ethernet interface.
nit: extra two lines
@@ -502,7 +502,7 @@ static int error_check(DRBG_SELFTEST_DATA *td) * failure. */ t.entropylen = 0; - if (TEST_false(RAND_DRBG_generate(drbg, buff, td->exlen, 1, + if (!TEST_false(RAND_DRBG_generate(drbg, buff, td->exlen, 1, td->adin, td->adinlen)) || !uninstantiate(drbg)) goto err;
[No CFG could be retrieved]
Instantiate a non - zero - length non - zero - length non - zero - length non Generate output and check if the given has been requested for reseed.
This is an expected fail. I must say these !TEST_something are really confusing and hard to parse.
@@ -393,7 +393,10 @@ async function finishBundle( ); endBuildStep(logPrefix, `${destFilename} → ${latestName}`, startTime); } else { - endBuildStep(logPrefix, destFilename, startTime); + const loggingName = destFilename.startsWith('component-') + ? `${options.name} → ${destFilename}` + : destFilename; + endBuildStep(logPrefix, loggingName, startTime); } const targets = options.minify ? MINIFIED_TARGETS : UNMINIFIED_TARGETS;
[No CFG could be retrieved]
Finishes the final steps after a given JS file entry point with esbuild and babel. Creates a new file in the dest dir with the given options.
We'll eventually be building `custom-element`, too. Seems like we could just check if `!destFileName.startsWith('amp')`?
@@ -81,9 +81,13 @@ func TestExamples(t *testing.T) { Dir: path.Join(cwd, "dynamic-provider/multiple-turns-2"), Dependencies: []string{"@pulumi/pulumi"}, }, - { - Dir: path.Join(cwd, "compat/v0.10.0/minimal"), - Dependencies: []string{"@pulumi/pulumi"}, + } + + // The compat test only works on Node 6.10.X because its uses the old 0.10.0 pulumi package, which only supported + // a single node version, since it had the native runtime component. + if nodeVer, err := getNodeVersion(); err != nil && nodeVer.Major == 6 && nodeVer.Minor == 10 { + examples = append(examples, integration.ProgramTestOptions{ + Dir: path.Join(cwd, "compat/v0.10.0/minimal"), Config: map[string]string{ "name": "Pulumi", },
[IsProviderType,Join,Equal,ProgramTest,Contains,False,NoError,NotNil,String,Getwd,Run]
Dependencies is a list of dependencies for the given path.
The test only works on 6.10.X because that's the only version of node 0.10.0 supported. It was passing everywhere because it incorrectly linked in the version of the just built `@pulumi/pulumi` package. I changed things up such that we will run the test on 6.10.X, but on other nodes, we just skip it.
@@ -19,6 +19,8 @@ module CacheBuster # # https://github.com/fastly/fastly-ruby#efficient-purging return unless Rails.env.production? + return if ENV["FASTLY_API_KEY"].blank? + return if ENV["FASTLY_API_KEY"] == "foobarbaz" HTTParty.post("https://api.fastly.com/purge/https://#{ApplicationConfig['APP_DOMAIN']}#{path}", headers: { "Fastly-Key" => ApplicationConfig["FASTLY_API_KEY"] })
[bust_tag_pages->[bust],bust_podcast->[bust],bust_organization->[bust],bust_user->[bust],bust_tag->[bust],bust_events->[bust],bust_article->[bust],bust_comment->[bust],bust_podcast_episode->[bust],bust_listings->[bust],bust_page->[bust],bust_home_pages->[bust],bust_article_comment->[bust]]
This method will bust the cache if there is a block with the given name.
is there a reason we're hardcoding this `foobarbaz` string ?
@@ -86,8 +86,9 @@ def sign_file(file_obj): log.info('Not signing file {0}: addon is a hotfix'.format(file_obj.pk)) return - # We only sign files that have been reviewed. - if file_obj.status not in amo.REVIEWED_STATUSES: + # We only sign files that have been reviewed plus beta files. + # TODO: Does this make sense? Check repurcussions of this. + if file_obj.status not in amo.REVIEWED_STATUSES + (amo.STATUS_BETA,): log.info("Not signing file {0}: it isn't reviewed".format(file_obj.pk)) return
[sign_file->[get_endpoint,call_signing],call_signing->[SigningError]]
Sign a File object.
We might want another status for beta files that aren't reviewed yet. We could then simply add `amo.STATUS_BETA` to the list of `amo.REVIEWED_STATUSES`.
@@ -327,8 +327,17 @@ namespace System.Text.RegularExpressions.Tests yield return new object[] { @"(cat)(\c[*)(dog)", "asdlkcat\u00FFdogiwod", RegexOptions.None, 0, 15, false, string.Empty }; } - // Surrogate pairs splitted up into UTF-16 code units. + // Surrogate pairs split up into UTF-16 code units. yield return new object[] { @"(\uD82F[\uDCA0-\uDCA3])", "\uD82F\uDCA2", RegexOptions.CultureInvariant, 0, 2, true, "\uD82F\uDCA2" }; + + // Unicode text + foreach (RegexOptions options in new[] { RegexOptions.None, RegexOptions.RightToLeft, RegexOptions.IgnoreCase | RegexOptions.CultureInvariant }) + { + yield return new object[] { "\u05D0\u05D1\u05D2\u05D3(\u05D4\u05D5|\u05D6\u05D7|\u05D8)", "abc\u05D0\u05D1\u05D2\u05D3\u05D4\u05D5def", options, 3, 6, true, "\u05D0\u05D1\u05D2\u05D3\u05D4\u05D5" }; + yield return new object[] { "\u05D0(\u05D4\u05D5|\u05D6\u05D7|\u05D8)", "\u05D0\u05D8", options, 0, 2, true, "\u05D0\u05D8" }; + yield return new object[] { "\u05D0(?:\u05D1|\u05D2|\u05D3)", "\u05D0\u05D2", options, 0, 2, true, "\u05D0\u05D2" }; + yield return new object[] { "\u05D0(?:\u05D1|\u05D2|\u05D3)", "\u05D0\u05D4", options, 0, 0, false, "" }; + } } [Theory]
[RegexMatchTests->[Result->[Result],Match->[Match,VerifyMatch],Match_Timeout_Repetition_Throws->[Match],Match_DeepNesting->[Match],Match_Invalid->[Match],Synchronized->[Synchronized,Match],Match_Timeout_Loop_Throws->[Match],Match_SpecialUnicodeCharacters_enUS->[Match],Match_SpecialUnicodeCharacters_Invariant->[Match],Match_VaryingLengthStrings->[Match],Match_Timeout->[Match],Result_Invalid->[Result,Match]]]
Match_Basic_TestData - yields regex objects matching the contents of a . region > read Returns an array of strings representing the sequence of characters in the sequence that match the pattern specified Case - > String Case - > String region Case generation Returns an array of strings which are not part of the language as defined in section 4.
anything special about `05D0` etc? just arbitrary surrogate pairs?
@@ -156,7 +156,7 @@ public final class ValueProviderMediator<T extends ParameterizedModel & Enrichab return valueSet.stream() .map(option -> cloneAndEnrichValue(option, parameters)) .map(ValueBuilder::build) - .collect(toSet()); + .collect(toCollection(LinkedHashSet::new)); } /**
[ValueProviderMediator->[resolveValues->[toSet,resolve,createValueProvider,collect],getValues->[getMessage,orElseThrow,getName,ValueResolvingException,get,getValues,getParameters,resolveValues,format,isEmpty,withRefreshToken],getParameters->[toList,collect]]]
Resolve all values in a chain of parameters.
I don't think this is correct. Yes, once the LinkedHashSet is created, the order will be preserved, but if the ValueProvider returns a plain HashSet, results will be in a different order every time. Shouldn't we sort the results from the valueSet too? Just in case the VP does not return an ordered set? Maybe this is like this by design and we want to keep the order of the results only if the VP wants them in order. Otherwise, we do not care.
@@ -196,13 +196,14 @@ type TableManager struct { maxChunkAge time.Duration bucketClient BucketClient metrics *tableManagerMetrics + extraTables []ExtraTables bucketRetentionLoop services.Service } // NewTableManager makes a new TableManager func NewTableManager(cfg TableManagerConfig, schemaCfg SchemaConfig, maxChunkAge time.Duration, tableClient TableClient, - objectClient BucketClient, registerer prometheus.Registerer) (*TableManager, error) { + objectClient BucketClient, registerer prometheus.Registerer, extraTables []ExtraTables) (*TableManager, error) { if cfg.RetentionPeriod != 0 { // Assume the newest config is the one to use for validation of retention
[partitionTables->[HasPrefix,ListTables],UnmarshalYAML->[Duration],deleteTables->[DeleteTable,Log,Err,Info,Set,Add],createTables->[CreateTable,Log,Err,Info,Set,Add],SyncTables->[partitionTables,deleteTables,createTables,calculateExpectedTables,SetToCurrentTime,Log,updateTables,Info],Validate->[Duration],bucketRetentionIteration->[Error,Now,DeleteChunksBefore,Log,Add],RegisterFlags->[Int64Var,RegisterFlags,DurationVar,Var,BoolVar],loop->[NewHistogramCollector,Stop,Error,NewTicker,SyncTables,Int63n,Background,Duration,CollectedRequest,Log,After,Done],calculateExpectedTables->[Time,Unix,periodicTables,TimeFromUnix,Now,Sort,After,Add],stopping->[StopAndAwaitTerminated,Background],MarshalYAML->[Duration],updateTables->[Info,DescribeTable,UpdateTable,Log,WithLabelValues,Set,Equals,Debug],starting->[StartAndAwaitRunning,NewTimerService],Strings,NewBasicService,DescribeTable,ListTables,New,NewGaugeVec,Sort,NewGauge,NewHistogramVec,Errorf,MustRegister,Equals]
NewTableManager creates a new TableManager instance. starting - Start the TableManager.
Can we keep the `prometheus.Registerer` at the end always?
@@ -884,7 +884,13 @@ def handle_init( return TransitionResult(iteration.new_state, events) -def handle_block(channelidentifiers_to_channels, state, state_change, block_number): +def handle_block( + mediator_state: MediatorTransferState, + state_change: Block, + channelidentifiers_to_channels: typing.ChannelMap, + pseudo_random_generator: random.Random, + block_number: typing.BlockNumber, +): """ After Raiden learns about a new block this function must be called to handle expiration of the hash time locks. Args:
[events_for_balanceproof->[is_safe_to_wait,get_payer_channel,get_payee_channel],next_transfer_pair->[next_channel_from_routes],state_transition->[handle_refundtransfer,handle_unlock,handle_block,clear_if_finalized,sanity_check,handle_secretreveal,handle_init],handle_init->[mediate_transfer],handle_refundtransfer->[handle_refundtransfer,mediate_transfer],handle_unlock->[handle_unlock],events_for_onchain_secretreveal->[is_safe_to_wait,get_payer_channel,events_for_onchain_secretreveal,get_pending_transfer_pairs],handle_block->[set_expired_pairs,events_for_onchain_secretreveal],set_expired_pairs->[get_pending_transfer_pairs],sanity_check->[is_send_transfer_almost_equal],secret_learned->[events_for_revealsecret,events_for_balanceproof,set_secret,set_onchain_secret,set_payee_state_and_check_reveal_order,events_for_unlock_if_closed],handle_secretreveal->[secret_learned],mediate_transfer->[filter_used_routes,next_transfer_pair,events_for_refund_transfer,get_payer_channel],events_for_unlock_if_closed->[get_payer_channel,get_pending_transfer_pairs],events_for_refund_transfer->[is_channel_usable],next_channel_from_routes->[is_channel_usable]]
Handles a single block.
please, add a predicate function for this in the channel module
@@ -44,7 +44,7 @@ class ProfileIndex end def totp_partial - if current_user.totp_enabled? + if decorated_user.totp_enabled? 'profile/disable_totp' else 'profile/enable_totp'
[ProfileIndex->[manage_personal_key_partial->[present?],password_reset_partial->[present?],header_personalization->[email,present?,first_name],personal_key_partial->[present?],totp_partial->[totp_enabled?],pending_profile_partial->[present?],pii_partial->[present?],attr_reader]]
check if totp partial is not enabled.
this change won't work unless delegation is in play.
@@ -226,12 +226,7 @@ func getUDS(cmd *cobra.Command, uri *url.URL, iden string) (string, error) { if v, found := os.LookupEnv("PODMAN_BINARY"); found { podman = v } - run := podman + " info --format=json" - out, err := ExecRemoteCommand(dial, run) - if err != nil { - return "", err - } - infoJSON, err := json.Marshal(out) + infoJSON, err := ExecRemoteCommand(dial, podman+" info --format=json") if err != nil { return "", err }
[StringVar,ExactArgs,Port,Warnf,JoinHostPort,Close,Hostname,LookupEnv,LookupId,Username,Is,PublicKey,ReadCustomConfig,ParseDuration,Mode,Stat,Dial,UserPassword,Marshal,New,InsecureIgnoreHostKey,Errorf,NewClient,PublicKeysCallback,Type,Debugf,Match,Signers,Wrapf,Current,RegisterFlagCompletionFunc,User,PasswordCallback,ReadPassword,NewSession,Password,Write,Flag,Changed,Sprintf,Unmarshal,FingerprintSHA256,IntVarP,String,Parse,IsLevelEnabled,BoolVarP,Flags]
getUDS returns the UDS socket path for the given host. ValidateAndConfigure validates the identity key and returns a new ssh. Config object.
Question: Why do we run a plain command over ssh instead of using the bindings?
@@ -12,11 +12,11 @@ import ( ) var ( - numberRunsQueued = promauto.NewCounter(prometheus.CounterOpts{ + promNumberRunsQueued = promauto.NewCounter(prometheus.CounterOpts{ Name: "run_queue_runs_queued", Help: "The total number of runs that have been queued", }) - numberRunQueueWorkers = promauto.NewGauge(prometheus.GaugeOpts{ + promNumberRunQueueWorkers = promauto.NewGauge(prometheus.GaugeOpts{ Name: "run_queue_queue_size", Help: "The size of the run queue", })
[Run->[Done,Unlock,Execute,Errorw,Sprint,String,Lock,Inc,Set,Add],Stop->[Wait,Lock,Unlock],WorkerCount->[RUnlock,RLock],NewGauge,NewCounter]
Services for importing a single object. if - returns the last non - stopped if it is not already running.
Should this be `NumberRunQueueWorkersStarted`, since it is only ever incremented?
@@ -51,6 +51,7 @@ async def maybe_extract(extractable: MaybeExtractable) -> ExtractedDigest: argv=("/bin/bash", "-c", f"{extraction_cmd_str}"), input_digest=digest, description=f"Extract {snapshot.files[0]}", + level=LogLevel.DEBUG, env={"PATH": "/usr/bin:/bin:/usr/local/bin"}, output_directories=(output_dir,), )
[maybe_extract->[ExtractedDigest,get_extraction_cmd]]
If digest contains a single archive file extract it otherwise return the input digest.
Extracting an archive doesn't seem substantial enough to persist as a log to a user, imo. It will still show up in the dynamic UI.
@@ -275,7 +275,8 @@ def _ask_user_to_confirm_new_names(config, new_domains, certname, old_domains): new_domains, old_domains)) obj = zope.component.getUtility(interfaces.IDisplay) - if not obj.yesno(msg, "Update cert", "Cancel", default=True): + if not obj.yesno(msg, "Update cert", "Cancel", + default=True, force_interactive=True): raise errors.ConfigurationError("Specified mismatched cert name and domains.") def _find_domains_or_certname(config, installer):
[revoke->[revoke,_determine_account],setup_logging->[setup_log_file_handler,_cli_log_handler],install->[_init_le_client,_find_domains_or_certname],certificates->[certificates],run->[_init_le_client,_suggest_donation_if_appropriate,_auth_from_available,_find_domains_or_certname],main->[set_displayer,make_or_verify_needed_dirs,setup_logging,register],rollback->[rollback],_find_lineage_for_domains->[_handle_identical_cert_request,_handle_subset_cert_request],_csr_obtain_cert->[_report_new_cert],_find_lineage_for_domains_and_certname->[_handle_identical_cert_request,_find_lineage_for_domains],_init_le_client->[_determine_account],make_or_verify_needed_dirs->[make_or_verify_core_dir],obtain_cert->[_suggest_donation_if_appropriate,_auth_from_available,_find_domains_or_certname,_report_successful_dry_run,_csr_obtain_cert,_init_le_client],register->[_determine_account],main]
Ask user to confirm new names.
This is landing in 0.10.0; I'm not sure whether it should have `force_interactive=True`...
@@ -810,10 +810,6 @@ func (c *ClusterUpConfig) PostClusterStartupMutations(out io.Writer) error { return err } - err = c.OpenShiftHelper().SetupPersistentStorage(restConfig, c.HostPersistentVolumesDir) - if err != nil { - return err - } return nil }
[determineAdditionalIPs->[OpenShiftHelper],determineIP->[OpenShiftHelper,DockerHelper],CreateProject->[CreateProject],Login->[Login],ClusterAdminKubeConfigBytes->[GetKubeAPIServerConfigDir]]
PostClusterStartupMutations is called after the cluster is started.
You need to add it to the known and default enabled lists, right?
@@ -28,6 +28,10 @@ module FileStore not_implemented end + def upload_path + "uploads/#{RailsMultisite::ConnectionManagement.current_db}" + end + def has_been_uploaded?(url) not_implemented end
[BaseStore->[internal?->[external?],download->[download],get_from_cache->[get_cache_path_for],get_path_for->[get_depth_for],get_path_for_upload->[get_path_for],cache_file->[get_cache_path_for],get_path_for_optimized_image->[get_path_for]]]
Remove a file from the server.
I've carefully refactored `upload_path` into `base_store.rb` so that we can use it on both `s3_store.rb` and `local_store.rb`. I had to add a couple of backslashes in `local_store.rb` for this to work. No side effects for this, according to the specs
@@ -205,7 +205,7 @@ public class RunManager { "0", // number of initial workers String.valueOf(maximumStartupConcurrency), ResourceUtil.getResourcesStringFromMap(rayConfig.resources), - String.join(",", rayConfig.rayletConfigParameters), // The internal config list. + String.join("," , rayConfig.rayletConfigParameters), // The internal config list. buildPythonWorkerCommand(), // python worker command buildWorkerCommandRaylet() // java worker command );
[RunManager->[startObjectStore->[startProcess],startRayProcesses->[createTempDirs,cleanup],startRaylet->[startProcess],buildWorkerCommandRaylet->[concatPath],startRedisInstance->[startProcess]]]
Start a raylet with the specified parameters.
Remove the space after `","`?
@@ -1596,9 +1596,10 @@ class TestParforsVectorizer(TestPrangeBase): # packed dtype=double add and sqrt instructions. def will_vectorize(A): n = len(A) + acc = 0 for i in range(n): - A[i] += np.sqrt(i) - return A + acc += np.sqrt(i) + return acc arg = np.zeros(10)
[TestPrange->[test_prange_raises_invalid_step_size->[prange_tester],test_parfor_alias2->[prange_tester],test_prange15->[prange_tester],test_prange01->[prange_tester],test_prange07->[prange_tester],test_check_alias_analysis->[_get_gufunc_ir,generate_prange_func,compile_parallel_fastmath,prange_tester],test_prange04->[prange_tester],test_prange06->[prange_tester],test_prange02->[prange_tester],test_prange10->[prange_tester],test_prange20->[prange_tester],test_prange13->[prange_tester],test_prange17->[prange_tester],test_prange09->[prange_tester],test_prange22->[prange_tester],test_kde_example->[prange_tester],test_prange19->[prange_tester],test_prange11->[prange_tester],test_parfor_alias1->[prange_tester],test_prange_fastmath_check_works->[_get_gufunc_ir,generate_prange_func,compile_parallel_fastmath,prange_tester],test_parfor_alias3->[prange_tester],test_prange12->[prange_tester],test_prange18->[prange_tester],test_prange03->[prange_tester],test_prange24->[prange_tester],test_prange25->[prange_tester],test_prange21->[prange_tester],test_prange14->[prange_tester],test_prange23->[prange_tester],test_prange05->[prange_tester],test_prange08->[prange_tester],test_prange16->[prange_tester],test_prange08_1->[prange_tester],test_check_error_model->[compile_parallel,test_impl,generate_prange_func,compile_parallel_fastmath]],TestParfors->[test_np_func_direct_import->[check],test_simple14->[check],test_random_parfor->[countParfors],test_simple11->[check],test_cfg->[check],test_simple08->[check],test_mvdot->[check],test_kmeans->[check],test_simple19->[check],test_arange->[check],test_simple16->[check],test_min->[check],test_simple15->[check],test_randoms->[compile_parallel,test_impl,countParfors],test_simple02->[check],test_simple12->[check],test_fuse_argmin->[check,countParfors],test_simple24->[check],test_simple10->[check],test_arraymap->[check],test_np_random_func_direct_import->[countParfors],test_dead_randoms->[compile_parallel,test_impl,countParfors],test_simple09->[check],test_max->[check],test_simple21->[check],test_pi->[check,countParfors],check->[compile_all,check_parfors_vs_others],test_simple22->[check],test_simple03->[check],test_simple20->[check],test_reduce->[check,countParfors],test_simple01->[check],test_var->[check,countParfors],__init__->[__init__],test_2d_parfor->[check,countParfors],test_0d_broadcast->[check,countParfors],test_simple13->[check],test_simple18->[check],test_linspace->[check],test_mean->[check,countParfors],test_unsupported_combination_raises->[ddot],test_simple07->[check],test_test2->[countParfors],test_size_assertion->[check],test_argmax->[check],test_argmin->[check],test_simple04->[check],test_test1->[countParfors],test_simple17->[check],test_simple23->[check],test_std->[check,countParfors]],TestParforsVectorizer->[test_signed_vs_unsigned_vec_asm->[get_gufunc_asm,strip_instrs],get_gufunc_asm->[compile_parallel,_get_gufunc_asm,generate_prange_func,compile_parallel_fastmath],test_vectorizer_fastmath_asm->[get_gufunc_asm],test_unsigned_refusal_to_vectorize->[get_gufunc_asm]],TestPrangeBase->[prange_tester->[compile_parallel,assert_fastmath,compile_parallel_fastmath,generate_prange_func,check_parfors_vs_others,compile_njit],__init__->[__init__]],TestParforsSlice->[test_parfor_slice4->[check],test_parfor_slice7->[check],test_parfor_slice16->[check,countParfors],test_parfor_slice8->[check],test_parfor_slice14->[check],test_parfor_slice13->[check],test_parfor_slice9->[check],test_parfor_slice5->[check],test_parfor_slice3->[check],test_parfor_slice6->[check],test_parfor_slice2->[check],test_parfor_slice17->[check],test_parfor_slice11->[check],check->[compile_all,check_parfors_vs_others],test_parfor_slice1->[check],test_parfor_slice15->[check],test_parfor_slice12->[check],test_parfor_slice10->[check]],TestParforsBase->[_get_gufunc_modules->[_filter_mod],compile_parallel->[_compile_this],assert_fastmath->[_get_fast_instructions,_get_gufunc_ir,compile_parallel_fastmath,_assert_fast],compile_parallel_fastmath->[_compile_this],_get_gufunc_asm->[_get_gufunc_info],_get_gufunc_ir->[_get_gufunc_info],check_parfors_vs_others->[copy_args],compile_all->[compile_parallel,compile_njit],compile_njit->[_compile_this],_get_gufunc_info->[_get_gufunc_modules]],TestParforsBitMask->[test_parfor_bitmask3->[check],test_parfor_bitmask4->[check],test_parfor_bitmask1->[check],test_parfor_bitmask6->[check],check->[compile_all,check_parfors_vs_others],test_parfor_bitmask2->[check],test_parfor_bitmask5->[check]],TestParforsOptions->[check->[compile_all,check_parfors_vs_others],test_parfor_options->[check,countParfors]]]
Tests if the vectorizer of the machine is suitable for use with fastmath.
Is this change needed because the new LLVM6 will always vectorize the code, and, reduction-loop will only be vectorized if fastmath is ON?
@@ -26,7 +26,10 @@ class Legion(CMakePackage): git = "https://github.com/StanfordLegion/legion.git" version('develop', branch='master') - version('ctrl-rep', commit='177584e77036c9913d8a62e33b55fa784748759c') + version('ctrl-rep', branch='control_replication') + version('ctrl-rep-2', commit='96682fd8aae071ecd30a3ed5f481a9d84457a4b6') + version('ctrl-rep-1', commit='a03671b21851d5f0d3f63210343cb61a630f4405') + version('ctrl-rep-0', commit='177584e77036c9913d8a62e33b55fa784748759c') version('19.06.0', sha256='31cd97e9264c510ab83b1f9e8e1e6bf72021a0c6ee4a028966fce08736e39fbf') version('19.04.0', sha256='279bbc8dcdab4c75be570318989a9fc9821178143e9db9c3f62e58bf9070b5ac') version('18.12.0', sha256='71f2c409722975c0ad92f2caffcc9eaa9260f7035e2b55b731d819eb6a94016c')
[Legion->[cmake_args->[append,join],variant,depends_on,version]]
Describe all of the properties of a single object. Get all the possible versions of a sequence that can be built on top of the current system.
You can rename this to master now that #1983 has been merged.
@@ -102,7 +102,15 @@ func BuildKubernetesNodeConfig(options configapi.NodeConfig) (*NodeConfig, error server := kapp.NewKubeletServer() server.Config = path server.RootDirectory = options.VolumeDirectory - server.HostnameOverride = options.NodeName + + // kubelet finds the node IP address by doing net.ParseIP(hostname) and if that fails, + // it does net.LookupIP(NodeName) and picks the first non-loopback address. + // Pass node IP as hostname to make kubelet use the desired IP address. + if len(options.NodeIP) > 0 { + server.HostnameOverride = options.NodeIP + } else { + server.HostnameOverride = options.NodeName + } server.AllowPrivileged = true server.RegisterNode = true server.Address = kubeAddress
[ExpandOrDie,Join,ParseIP,Resolve,NewKubeletServer,SplitHostPort,NewAggregate,Warningf,Duration,Env,CertPoolFromFile,Errorf,KubeletConfig,UseTLS,GetKubeClient,Atoi,NewDefaultImageTemplate]
This function returns an exec handler for the given node. Cert. Key if sets the server. Containerized flag if set.
Should we also check for loopback here? If we do not then cli may end up showing loopback for hostnames (which may clash) because status update will pick the hostnameoverride anyway whether the IPaddress is properly calculated or not. Not critical.
@@ -34,12 +34,12 @@ export default class ScatterchartVisualization extends Nvd3ChartVisualization { }, { name: 'size', - tooltip: `<li>Size option is valid only when you drop numeric field here.</li> - <li>When data in each axis are discrete, - 'number of values in corresponding coordinate' will be used as size.</li> - <li>Zeppelin consider values as discrete when the values contain string value - or the number of distinct values are bigger than 5% of total number of values.</li> - <li>Size field button turns to grey when the option you chose is not valid.</li>` + tooltip: `This option is only valid for numeric fields. + When data in each axis is discrete, + 'number of values in corresponding coordinate' will be used. + Zeppelin considers values as discrete when input values contain a string + or the number of distinct values is greater than 5% of the total number of values. + This field turns grey when the selected option is invalid.` } ] this.columnselector = new ColumnselectorTransformation(config, this.columnselectorProps)
[No CFG could be retrieved]
Plots a scatter chart of a single number of unique values in a table. Configure the x and y axis of the chart.
To me, it looked this message could be improved. Any suggestions, please let me know. I'm not sure my new sentence is clear as well :)
@@ -813,8 +813,12 @@ export class AmpList extends AMP.BaseElement { this.unlistenLoadMore_ = listen( loadMoreButtonClickable, 'click', () => this.loadMoreCallback_()); + }).then(() => { // Guarantees that the height accounts for the newly visible button - this.attemptToFit_(dev().assertElement(this.container_)); + if (!this.loadMoreShown_) { + this.attemptToFit_(dev().assertElement(this.container_)); + this.loadMoreShown_ = true; + } }); }
[AmpList->[ssrTemplate_->[setupJsonFetchInit,fetchOpt,dict,user,requestForBatchFetch,setupAMPCors,xhrUrl,setupInput],getPolicy_->[getSourceOrigin,ALL,OPT_IN],getLoadMoreButtonClickable_->[childElementByAttr],constructor->[templatesFor,HIGH,hasAttribute,isExperimentOn],changeToLayoutContainer_->[resolve,userAssert,CONTAINER,toggle,isExperimentOn],getLoadMoreFailedElement_->[htmlFor,childElementByAttr],updateBindings_->[scanAndApply,resolve,updateWith,bindForDocOrNull],doRenderPass_->[resolver,dev,append,data,rejecter,devAssert,scheduleNextPass,payload],addElementsToContainer_->[forEach,hasAttribute,setAttribute,appendChild],mutatedAttributesCallback->[dev,user,isArray,getMode],attemptToFit_->[CONTAINER,/*OK*/],buildCallback->[viewerForDoc,user,toggle,bindForDocOrNull],moveButtonsToBottom_->[appendChild],getLoadMoreButton_->[htmlFor,childElementByAttr],truncateToMaxLen_->[slice,length,parseInt],layoutCallback->[dev,isExperimentOn],render_->[removeChildren,dev,hasChildNodes,default,resetPendingChangeSize,DOM_UPDATE,KEY,diff,createCustomEvent,isExperimentOn],undoPreviousLayout_->[FLEX_ITEM,RESPONSIVE,FIXED_HEIGHT,FIXED,setStyles,INTRINSIC],fetchList_->[resolve,getValueForExpr,isArray,userAssert,user,catch],maybeRenderLoadMoreTemplates_->[all,resolve,push],scheduleRender_->[dev],isLayoutSupported->[isLayoutSizeDefined],maybeRenderLoadMoreElement_->[dev,removeChildren,resolve,appendChild],resetIfNecessary_->[dev,removeChildren],setupLoadMoreAuto_->[dev,height,bottom],getLoadMoreFailedClickable_->[childElementByAttr],setLoadMoreEnded_->[classList],getLoadMoreLoadingElement_->[htmlFor,childElementByAttr],setLoadMoreFailed_->[listen,classList],fetch_->[batchFetchJsonFor],toggleLoadMoreLoading_->[classList],createContainer_->[setAttribute],updateLoadMoreSrc_->[getValueForExpr],getLoadMoreEndElement_->[childElementByAttr],maybeSetLoadMore_->[dev,resolve,listen,classList],BaseElement],registerElement,extension]
Determines if the load - more button should be triggered and if so sets it.
We should only ever have this extra `attemptToFit` call once.
@@ -219,6 +219,18 @@ namespace Microsoft.Xna.Framework.Input return index; } + private static int AddKeysToArrayConstrained(uint keys, int offset, Keys[] pressedKeys, int index) + { + for (int i = 0; i < 32; i++) + { + if (index >= pressedKeys.Length) break; + + if ((keys & (1 << i)) != 0) + pressedKeys[index++] = (Keys)(offset + i); + } + return index; + } + /// <summary> /// Returns an array of values holding keys that are currently being pressed. /// </summary>
[KeyboardState->[IsKeyUp->[InternalGetKey],GetPressedKeys->[CountBits,AddKeysToArray],IsKeyDown->[InternalGetKey],InternalGetKey,InternalSetKey]]
Adds pressed keys to the array.
Perhaps we should refactor the existing GetPressedKeys instead of having two different methods. the existing `Keys[] GetPressedKeys()` also needs a Constrain, because the count value is not always valid on platforms that run input on a 2nd thread (WinStoreApps, Android with runOnUIThread=false, WinForms resize fix/PR).
@@ -497,14 +497,15 @@ public class DeltaSync implements Serializable { * @param schemaProvider Schema Provider */ private HoodieWriteConfig getHoodieClientConfig(SchemaProvider schemaProvider) { + final boolean combineBeforeUpsert = true; + final boolean autoCommit = false; HoodieWriteConfig.Builder builder = - HoodieWriteConfig.newBuilder().withPath(cfg.targetBasePath).combineInput(cfg.filterDupes, true) + HoodieWriteConfig.newBuilder().withPath(cfg.targetBasePath).combineInput(cfg.filterDupes, combineBeforeUpsert) .withCompactionConfig(HoodieCompactionConfig.newBuilder().withPayloadClass(cfg.payloadClassName) // Inline compaction is disabled for continuous mode. otherwise enabled for MOR .withInlineCompaction(cfg.isInlineCompactionEnabled()).build()) .forTable(cfg.targetTableName) - .withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.BLOOM).build()) - .withAutoCommit(false).withProps(props); + .withAutoCommit(autoCommit).withProps(props); if (null != schemaProvider && null != schemaProvider.getTargetSchema()) { builder = builder.withSchema(schemaProvider.getTargetSchema().toString());
[DeltaSync->[close->[close],registerAvroSchemas->[registerAvroSchemas],syncOnce->[refreshTimeline],startCommit->[startCommit]]]
Get the Hoodie configuration for the given schema.
@vinothchandar I think this fixed index type to BLOOM. Not sure any historical reason behind it.
@@ -327,8 +327,7 @@ public abstract class Cause { @Exported(visibility=3) public String getUserName() { - User u = User.get(authenticationName, false); - return u != null ? u.getDisplayName() : authenticationName; + return User.get(authenticationName, true).getDisplayName(); } @Override
[Cause->[print->[getShortDescription],UserCause->[hashCode->[hashCode],equals->[equals]],UserIdCause->[print->[getUserId,getUserName],getShortDescription->[getUserName],hashCode->[hashCode],equals->[equals]],UpstreamCause->[trim->[UpstreamCause,trim],equals->[equals],print->[print,indent],pointsTo->[pointsTo,equals],indent->[print],hashCode->[hashCode]],RemoteCause->[hashCode->[hashCode],equals->[equals]]]]
Gets the display name of the user that caused this cause.
Note that passing `, true` is unnecessary since that is the default overload.
@@ -158,10 +158,10 @@ if ( ! class_exists( 'WPSEO_Admin_Pages' ) ) { /** * Generates the header for admin pages * - * @param bool $form Whether or not the form start tag should be included. - * @param string $option The long name of the option to use for the current page. - * @param string $optionshort The short name of the option to use for the current page. - * @param bool $contains_files Whether the form should allow for file uploads. + * @param bool $form Whether or not the form start tag should be included. + * @param string $option The long name of the option to use for the current page. + * @param string $optionshort The short name of the option to use for the current page. + * @param bool $contains_files Whether the form should allow for file uploads. */ function admin_header( $form = true, $option = 'yoast_wpseo_options', $optionshort = 'wpseo', $contains_files = false ) { ?>
[WPSEO_Admin_Pages->[checkbox->[get_option],textinput->[get_option],admin_footer->[admin_sidebar],textarea->[get_option],radio->[get_option],media_input->[get_option],hidden->[get_option],file_upload->[get_option],select->[get_option]]]
Renders the admin header of the Yoast WordPress SEO page.
What's up with the spacing ? Doc alignment check missing ?
@@ -336,6 +336,9 @@ class Quant2Int8MkldnnPass(object): self._dequantize_op_weights(graph, op, "Filter", "Output") elif op.name() in self._mul_ops and _is_int8_weights(op, "Y"): self._dequantize_op_weights(graph, op, "Y", "Out") + elif op.name() in self._matmul_ops and _is_int8_weights(op, "Y"): + self._dequantize_op_weights(graph, op, "Y", "Out") + return graph def _dequantize_op_weights(self, graph, op_node, weight_name, output_name):
[Quant2Int8MkldnnPass->[_dequantize_weights->[_is_int8_weights->[_load_param],_is_int8_weights],_apply_pass->[apply],_quantize_fp32_graph->[_apply_pass,_find_avg_pooling_ids,_get_data_layout],_is_conv_quantized->[_is_any_of_op_types_quantized],_gather_output_scales_from_attr->[_convert_scale2tensor,_add_scale_for_vars],_optimize_fp32_graph->[_update_activations,_is_fc_quantized,_remove_ctrl_vars],_update_relu_output_scales->[_set_unsigned_scale->[_convert_scale2tensor],_set_unsigned_scale],_dequantize_op_weights->[_load_param],_get_data_layout->[_is_conv_quantized],_is_any_of_op_types_quantized->[_is_quantizing_all_ops,_is_any_of_op_types_in_graph],_is_fc_quantized->[_is_any_of_op_types_quantized],_gather_input_scales_from_fake->[_convert_scale2tensor,_add_scale_for_vars],_final_optimizations->[_apply_pass],_propagate_scales->[_update_scales->[_update_scale_op_in_scale],_update_scale_op_in_scale->[_convert_scale2tensor],_update_scales],_compute_weight_scales->[_compute_single_gru_weight_scales->[_load_param,_convert_scale2tensor],_compute_lstm_weight_scales->[_compute_single_lstm_weight_scales],_compute_gru_weight_scales->[_compute_single_gru_weight_scales],_compute_var_scales->[_load_param,_convert_scale2tensor],_compute_single_lstm_weight_scales->[_load_param,_convert_scale2tensor],_compute_gru_weight_scales,_compute_lstm_weight_scales,_compute_var_scales]]]
Dequantizes the weights of the network.
It looks like you can combine this part into one and just check `op.name() in [self._mul_ops, self._matmul_ops]`. What do you think?
@@ -182,6 +182,7 @@ class ExternalGitRepo: @contextmanager def open_by_relpath(self, path, mode="r", encoding=None, **kwargs): + """Opens a specified resource as a file descriptor""" try: abs_path = os.path.join(self.root_dir, path) with open(abs_path, mode, encoding=encoding) as fd:
[_clone_default_branch->[close],_git_checkout->[close],ExternalGitRepo->[close->[close]]]
Open file by relative path.
Note: Added this docstring since external_repo seems like an important module. Kind of like the Repo class. They're very similar from what I can see.
@@ -65,6 +65,18 @@ func ctimeIsStale(ctime keybase1.Time, currentMerkleRoot libkb.MerkleRoot) bool return currentMerkleRoot.Ctime()-ctime.UnixSeconds() >= KeyLifetimeSecs } +// If a teamEK is almost expired we allow it to be created in the background so +// content generation is not blocked by key generation. We *cannot* create a +// teamEK in the background if the key is expired however since the current +// teamEK's lifetime (and supporting device/user EKs) is less than the maximum +// lifetime of ephemeral content. This can result in content loss once the keys +// are deleted. +func backgroundKeygenPossible(ctime time.Time, currentMerkleRoot libkb.MerkleRoot) bool { + diff := keybase1.TimeFromSeconds(currentMerkleRoot.Ctime()).Time().Sub(ctime) + keygenInterval := time.Second * time.Duration(KeyGenLifetimeSecs) + return diff >= (keygenInterval-time.Hour) && diff < keygenInterval +} + func keygenNeeded(ctime keybase1.Time, currentMerkleRoot libkb.MerkleRoot) bool { return currentMerkleRoot.Ctime()-ctime.UnixSeconds() >= KeyGenLifetimeSecs }
[Error->[Sprintf],Time,GetFullSelfer,MakeByte32,Error,CDebugf,UnixSeconds,DeriveFromSecret,Duration,RandBytes,WithSelf,Errorf,Slice,MakeNaclDHKeyPairFromSecret,TimeFromSeconds,ToUserVersion,Ctime]
Error returns an error message describing the last unboxed or missing key. getCurrentUV returns a new udev - based key pair from the derived secret.
This would be easier for me to follow with some more concrete variable names. Maybe `keyAge` or something instead of `diff`? And maybe give the return value an intermediate name like `isOneHourFromExpiration`?
@@ -101,8 +101,12 @@ namespace Dynamo.Search entryDictionary[value] = keys; OnEntryAdded(value); } - foreach (var tag in tags.Select(x => x.ToLower())) - keys[tag] = weight; + + for (int i = 0; i < tags.Count(); i++) + { + var tag = tags.ElementAt(i).ToLower(); + keys[tag] = weights.ElementAt(i); + } } /// <summary>
[SearchDictionary->[Add->[Add,OnEntryAdded],Search->[MatchWithQueryString,SplitOnWhiteSpace],ContainsSpecialCharacters->[Contains],Remove->[Remove,OnEntryRemoved],ComputeWeightAndAddToDictionary->[Add]]]
Add a value to the cache.
`tags.Count()` will be called multiple times, which forces multiple enumeration on `tags` -- we can either cache the count value, or use a `foreach` (with help of another `int counter = 0` for indexing). Also, this assumes that both `tags` and `weights` are of the same length, we need to handle unequal sized list cases.
@@ -1,6 +1,7 @@ from django import forms from django.contrib import admin from django.contrib.admin.widgets import ForeignKeyRawIdWidget +from django.db.models import Q from django.utils import translation from django.utils.html import format_html from django.utils.safestring import mark_safe
[DiscoveryItemAdmin->[formfield_for_foreignkey->[SlugOrPkChoiceField],previews->[build_preview]]]
A field that shows a single object in the administration panel. This method returns a queryset of the items with the value of the n - th item in.
Is this unused?
@@ -325,6 +325,10 @@ public class UserVmJoinDaoImpl extends GenericDaoBaseWithTagInformation<UserVmJo if (vmDetails != null) { Map<String, String> resourceDetails = new HashMap<String, String>(); for (UserVmDetailVO userVmDetailVO : vmDetails) { + if (userVmDetailVO.getName().startsWith(VmDetailConstants.KEY_PAIR_NAMES)) { + s_logger.info(userVmDetailVO.getValue()); + userVmResponse.setKeyPairNames(userVmDetailVO.getValue()); + } if (!userVmDetailVO.getName().startsWith(ApiConstants.PROPERTIES) || (UserVmManager.DisplayVMOVFProperties.value() && userVmDetailVO.getName().startsWith(ApiConstants.PROPERTIES))) { resourceDetails.put(userVmDetailVO.getName(), userVmDetailVO.getValue());
[UserVmJoinDaoImpl->[newUserVmView->[searchByIds]]]
This method is called to populate the response object with the specified parameters. This method is called to populate the response object with details of the user VM. This method is used to populate the response object with details of the VMs.
We could improve this log with some context.
@@ -172,6 +172,11 @@ public class GlobalToolbar extends Toolbar addLeftWidget(deployButton); } + // go to project dir button + if (!StringUtil.isNullOrEmpty( + session_.getSessionInfo().getActiveProjectFile())) + addRightWidget(commands_.setWorkingDirAsProjectDir().createToolbarButton()); + // project popup menu ProjectPopupMenu projectMenu = new ProjectPopupMenu(sessionInfo, commands_);
[GlobalToolbar->[completeInitialization->[createMenuItem,setText,setTitle,ProjectPopupMenu,equals,addSeparator,getToolbarButton,ToolbarPopupMenu,addLeftSeparator,createToolbarButton,git,isVcsEnabled,svn,addLeftWidget,addRightWidget,getShinyappsAvailable,ToolbarButton,addItem],onCancel->[restore],focusGoToFunction->[setFocusDeferred,record],onCompleted->[clear],stock_new,createMenuItem,setTitle,globalToolbar,addSeparator,ToolbarPopupMenu,addLeftSeparator,get,FocusContext,addStyleName,createToolbarButton,getSearchWidget,Observer,addLeftWidget,setObserver,ToolbarButton,addItem]]
Complete the initialization of the UI.
This function is passed a SessionInfo object so you don't need to inject/retain the Session object.
@@ -29,7 +29,9 @@ func (r *JSON) decodeJSON(text []byte) ([]byte, common.MapStr) { err := unmarshal(text, &jsonFields) if err != nil || jsonFields == nil { - logp.Err("Error decoding JSON: %v", err) + if r.cfg.LogParseErrors { + logp.Warn("Error decoding JSON: %v", err) + } if r.cfg.AddErrorKey { jsonFields = common.MapStr{"error": createJSONError(fmt.Sprintf("Error decoding JSON: %v", err))} }
[Next->[AddFields,Next,decodeJSON],decodeJSON->[Sprintf,Err],Time,NewReader,Decode,TransformNumbers,WriteJSONKeys,NewDecoder,UseNumber]
decodeJSON decodes the given text into a slice of bytes and a common. MapStr.
This should still be an `Err` log message.
@@ -196,6 +196,13 @@ bool EncapsulationHeader::to_encoding( return true; } +static const int FOUR_BYTE_ALIGNMENT = 4; + +void EncapsulationHeader::set_padding_marker(char& options, size_t size) +{ + options |= ((FOUR_BYTE_ALIGNMENT - size % FOUR_BYTE_ALIGNMENT) & 0x03); +} + OPENDDS_STRING EncapsulationHeader::to_string() const { switch (kind_) {
[No CFG could be retrieved]
Get the encoding of a header. - - - - - - - - - - - - - - - - - -.
Could this be one of the existing constants in Serializer.h? Maybe `ALIGN_XCDR2`?
@@ -95,6 +95,14 @@ if (! @is_readable(AUTOLOAD_FILE)) { } require_once AUTOLOAD_FILE; +$route = Routing::getCurrentRoute(); + +if ($route === '/import-status') { + // phpcs:disable PSR1.Files.SideEffects + define('PMA_MINIMUM_COMMON', true); + // phpcs:enable +} + $containerBuilder = new ContainerBuilder(); $loader = new PhpFileLoader($containerBuilder, new FileLocator(__DIR__)); $loader->load('services_loader.php');
[setRequestStatus,enableBc,authenticate,setMinimal,selectServer,getVersion,isAjax,loadUserPreferences,checkServers,set,checkErrors,load,setThemeCookie,disableMenuAndConsole,activate,getScripts,checkTwoFactor,rememberCredentials,setGlobal,removeCookie,selectLanguage,showFailure,checkPermissions,setCookie,connect,get,addFile,setAlias,getParameter,addJSON,postConnectControl]
Creates a container for a given version of a PHP script. Configures the label with the given name.
@mauriciofauth the alternative was fd912545cc27e23d97c156316cf4cd0606046c72 Still okay ?
@@ -952,6 +952,16 @@ public class DefaultHeaders<K, V, T extends Headers<K, V, T>> implements Headers return (T) this; } + /** + * Returns a deep copy of this instance. + */ + public DefaultHeaders<K, V, T> copy() { + DefaultHeaders<K, V, T> copy = new DefaultHeaders<K, V, T>( + hashingStrategy, valueConverter, nameValidator, entries.length); + copy.set(this); + return copy; + } + private final class HeaderIterator implements Iterator<Map.Entry<K, V>> { private HeaderEntry<K, V> current = head;
[DefaultHeaders->[containsShort->[contains],contains->[contains,get],getLong->[getLong,get],getShort->[get,getShort],setInt->[set],equals->[size,equals,names,get,getAll],getShortAndRemove->[getShortAndRemove,getAndRemove],getTimeMillisAndRemove->[getTimeMillisAndRemove,getAndRemove],HeaderEntry->[toString->[toString]],getFloatAndRemove->[getFloatAndRemove,getAndRemove],hashCode->[size,names,hashCode,get,getAll],addObject->[addObject,add],get->[get],addFloat->[add],remove->[getAndRemove],addTimeMillis->[add],getInt->[get,getInt],getDouble->[get,getDouble],containsByte->[contains],addImpl->[add],getByte->[getByte,get],getBooleanAndRemove->[getBooleanAndRemove,getAndRemove],getChar->[get,getChar],getIntAndRemove->[getAndRemove,getIntAndRemove],containsTimeMillis->[contains],addShort->[add],getLongAndRemove->[getLongAndRemove,getAndRemove],toString->[size,toString,iterator],addChar->[add],names->[size,isEmpty],addByte->[add],setObject->[set,validateName],getByteAndRemove->[getByteAndRemove,getAndRemove],remove0->[remove,equals],setShort->[set],setAll->[names,addImpl],add0->[newHeaderEntry],addBoolean->[add],setBoolean->[set],addLong->[add],getDoubleAndRemove->[getDoubleAndRemove,getAndRemove],setTimeMillis->[set],containsObject->[contains],setFloat->[set],containsFloat->[contains],setByte->[set],containsDouble->[contains],addDouble->[add],setDouble->[set],getCharAndRemove->[getAndRemove,getCharAndRemove],containsBoolean->[contains],ValueIterator->[calculateNext->[equals],next->[hasNext],hashCode,index],containsChar->[contains],getFloat->[getFloat,get],containsInt->[contains],setChar->[set],set->[addImpl,validateName],getTimeMillis->[getTimeMillis,get],getBoolean->[get,getBoolean],getAndRemove->[getAndRemove],add->[validateName],addInt->[add],getAllAndRemove->[getAll],setLong->[set],containsLong->[contains]]]
Returns true if the current node is not the last node in the chain.
`set` will also do `clear` which will do some unnecessary work (array filling, pointer updating, assignments). We should be able to just use `addImpl` instead of `set`.
@@ -226,6 +226,8 @@ func (mod *modContext) typeName(t *schema.ObjectType, state, input, args bool) s } switch { + case input && args && mod.details(t).usedInFunctionOutputVersionInputs: + return name + "InputArgs" case input: return name + "Args" case mod.details(t).plainType:
[genResource->[propertyName,genInputType,isK8sCompatMode,genOutputType,tokenToNamespace,typeString],gen->[genResource,genType,genFunction,getImportsForResource,isTFCompatMode,isK8sCompatMode,genEnums,getImports,genHeader,details,tokenToNamespace,add,genConfig,genUtilities],genType->[genInputType,details,typeName,genOutputType],getTypeImportsForResource->[getTypeImports],typeName->[details,isTFCompatMode,isK8sCompatMode],genFunction->[genInputType,tokenToNamespace,genOutputType],genEnum->[typeString],getImportsForResource->[getTypeImports,getTypeImportsForResource],genInputType->[genInputProperty,isK8sCompatMode,propertyName],genOutputType->[isK8sCompatMode,typeString,propertyName],unionTypeString->[add,has],genInputProperty->[typeString,propertyName],getConfigProperty->[getConfigProperty,typeString],tokenToNamespace->[isK8sCompatMode],genConfig->[propertyName,genHeader,getConfigProperty,getDefaultValue,typeString],typeString->[unionTypeString,typeName,tokenToNamespace,typeString],gen,details,add]
typeName returns the name of the type that should be used for the given object type.
This approach to resolving the conflicting name LGTM.
@@ -1187,8 +1187,9 @@ class Share_Custom extends Sharing_Advanced_Source { } } - if ( isset( $settings['url'] ) ) + if ( isset( $settings['url'] ) ) { $this->url = $settings['url']; + } } public function get_name() {
[Share_Tumblr->[process_request->[get_share_url,get_share_title],get_display->[get_link,get_share_url,get_process_request_url,get_share_title],display_footer->[js_dialog]],Share_Reddit->[process_request->[http,get_share_url,get_share_title],get_display->[get_process_request_url,get_share_title,http,get_link,get_share_url]],Share_Twitter->[process_request->[sharing_twitter_via,get_share_url,get_related_accounts,get_share_title],get_display->[get_process_request_url,get_related_accounts,get_share_title,http,sharing_twitter_via,get_link,get_share_url],display_footer->[js_dialog]],Share_Custom->[process_request->[get_share_url,get_share_title],get_display->[get_link,get_process_request_url],__construct->[get_options],display_preview->[get_name,get_options]],Share_Facebook->[process_request->[http,get_share_url,get_share_title],get_display->[get_link,get_share_url,get_process_request_url],display_footer->[guess_locale_from_lang,js_dialog]],Share_PressThis->[process_request->[get_share_url,get_share_title],get_display->[get_link,get_process_request_url]],Share_Pocket->[process_request->[get_share_url,get_share_title],get_display->[get_link,get_share_url,get_process_request_url],display_footer->[js_dialog]],Share_Email->[get_display->[get_link,get_process_request_url]],Sharing_Source->[get_link->[get_class],display_preview->[get_name,get_class],get_total->[get_id],get_posts_total->[get_id]],Share_Print->[get_display->[get_link,get_process_request_url]],Share_LinkedIn->[process_request->[get_share_url],get_display->[get_link,get_share_url,get_process_request_url],display_footer->[js_dialog]],Share_Pinterest->[get_external_url->[get_image,get_share_url],process_request->[get_external_url],get_display->[get_external_url,get_link,get_widget_type,get_process_request_url],display_footer->[get_widget_type]],Share_GooglePlus1->[process_request->[get_share_url],get_display->[get_link,get_share_url,get_process_request_url],display_footer->[js_dialog],get_total->[get_id]]]
Constructor for the object.
I'd recommend using `home_url()` instead. `site_url()` returns the location of the WordPress files, not necessarily the site's home page.
@@ -343,8 +343,9 @@ def find_paddle_libraries(use_cuda=False): # pythonXX/site-packages/paddle/libs paddle_lib_dirs = [get_lib()] if use_cuda: - cuda_dirs = find_cuda_includes() - paddle_lib_dirs.extend(cuda_dirs) + cuda_lib_dirs = find_cuda_libraries() + print(cuda_lib_dirs) + paddle_lib_dirs.extend(cuda_lib_dirs) return paddle_lib_dirs
[find_paddle_libraries->[find_cuda_includes],parse_op_info->[instance,load_op_meta_info_and_register_op],_write_setup_file->[is_cuda_file,use_new_custom_op_load_method],_import_module_from_library->[load_op_meta_info_and_register_op],parse_op_name_from->[regex],load_op_meta_info_and_register_op->[load_op_meta_info_and_register_op],_get_api_inputs_str->[parse_op_info],custom_write_stub->[load_op_meta_info_and_register_op]]
Find all paddle libraries.
You can use `log_v` to log necessary log for users.
@@ -76,7 +76,7 @@ tuner_schema_dict = { 'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'), }, Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool), - Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999), + Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'), }, ('Evolution'): { 'builtinTunerName': setChoice('builtinTunerName', 'Evolution'),
[setNumberRange->[And],setType->[And],setChoice->[And,str],setPathCheck->[And],Or,int,split,setNumberRange,setType,setChoice,len,Optional,Schema,setPathCheck,And,Regex]
Schema for optional tokens. Returns a random number generator.
It's better not to remove gpuNum in this phase, and provide a warning message to let user know this breaking change.
@@ -741,7 +741,7 @@ func (c *Container) ContainerRestart(name string, seconds *int) error { } operation = func() error { - return c.containerStart(name, nil, false) + return c.containerStart(name, nil, true) } if err := retry.Do(operation, IsConflictError); err != nil { return InternalServerError(fmt.Sprintf("Start failed with: %s", err))
[ContainerExecStart->[Handle,TaskWaitToStart,TaskInspect],TaskWaitToStart->[Handle],findPortBoundNetworkEndpoint->[defaultScope],ContainerExecInspect->[TaskInspect],containerAttach->[Handle],ContainerExecCreate->[Handle,TaskInspect],TaskInspect->[Handle],containerStart->[Handle,cleanupPortBindings]]
ContainerRestart restarts a container.
I'm not sure this is viable - the stop path deliberately doesn't unbind the container so this will attempt to bind again. It may be that we're tolerant to this (we should be as everything should be idempotent, but I'd not assume it).
@@ -288,7 +288,12 @@ class DigSource(HasPrivateTraits): @cached_property def _get__info(self): - if self.file: + if not self.file: + return + elif self.file.endswith('.mff'): + raw = read_raw_egi(self.file) + return raw.info + else: info = None fid, tree, _ = fiff_open(self.file) fid.close()
[DigSource->[_get_rpa->[_cardinal_point],_get_lpa->[_cardinal_point],_get_nasion->[_cardinal_point]],MRISubjectSource->[create_fsaverage->[get_fs_home]],set_fs_home->[get_fs_home],SurfaceSource->[_surf_default->[Surf],read_file->[Surf]],SubjectSelectorPanel->[_create_fsaverage_fired->[create_fsaverage]]]
Get info about the n - cardinal points in the selected FIFF file. Error Reading FIFF File.
I would make the pattern `self.file.endswith('.fif') or self.file.endswith('.fif.gz'):` and in that case use `return read_info(...)`, else use `return read_raw(...).info`. This should make it work with any reader that supports digitization
@@ -2861,7 +2861,7 @@ def _make_flattening_iter_cls(flatiterty, kind): # where the strides are unknown at compile-time. def iternext_specific(self, context, builder, arrty, arr, result): - zero = context.get_constant(types.intp, 0) + _ = context.get_constant(types.intp, 0) ndim = arrty.ndim nitems = arr.nitems
[np_array->[assign_sequence_to_array,compute_sequence_shape,_empty_nd_impl,check_sequence_shape],array_shape->[make_array],get_array_memory_extents->[offset_bounds_from_strides,compute_memory_extents],numpy_empty_nd->[_parse_empty_args,_empty_nd_impl],normalize_index->[make_array,load_item],maybe_copy_source->[src_getitem->[load_item]],np_stack_axis->[_np_stack_common],_atleast_nd_transform->[transform->[expand_dims]],array_record_getattr->[populate_array,make_array],array_view->[_change_dtype,make_array],_do_concatenate->[_empty_nd_impl,load_item,store_item],np_atleast_2d->[_atleast_nd_transform,_atleast_nd],impl_shape_unchecked->[populate_array,make_array],array_sort->[get_sort_func],array_itemset->[make_array,store_item],compute_sequence_shape->[get_first_item->[_get_borrowing_getitem],get_first_item,_get_seq_size],basic_indexing->[fix_integer_index],numpy_arange_3->[arange->[arange]],numpy_empty_like_nd->[_parse_empty_like_args,_empty_nd_impl],iternext_numpy_nditer->[make_array_ndenumerate_cls,make_array,iternext_specific],array_transpose_vararg->[vararg_to_tuple,array_transpose_tuple],make_array_flatiter->[make_array_flat_cls,make_array,init_specific],array_transpose_tuple->[populate_array,make_array],array_ctypes->[make_array],IntegerIndexer->[get_index_bounds->[get_size]],iternext_numpy_ndindex->[make_nditer_cls,make_ndindex_cls,iternext_specific],fancy_getitem_array->[_getitem_array_generic,normalize_index,make_array,fancy_getitem],record_setitem->[impl],numpy_zeros_like_nd->[_parse_empty_like_args,_zero_fill_array,_empty_nd_impl],_getitem_array1d->[load_item],SliceIndexer->[get_shape->[get_size]],array_flags_f_contiguous->[_call_contiguous_check],numpy_linspace_2->[linspace->[linspace]],np_frombuffer->[populate_array,get_itemsize,make_array],check_sequence_shape->[check_seq_size->[check_seq_size,_get_borrowing_getitem,_get_seq_size,_fail],check_seq_size],make_ndindex_cls->[NdIndexIter->[iternext_specific->[mark_positive,_increment_indices]]],BooleanArrayIndexer->[loop_head->[_getitem_array1d],get_shape->[get_size],get_size->[_getitem_array1d]],np_concatenate->[_np_concatenate],array_copy->[_array_copy],array_ascontiguousarray->[_as_layout_array],_np_concatenate->[_normalize_axis,make_array,_do_concatenate],_parse_empty_args->[_parse_shape],np_cfarray->[populate_array,get_itemsize,make_array],record_getattr->[populate_array,make_array],make_array->[ArrayStruct->[shape->[mark_positive]]],numpy_arange_2->[arange->[arange]],getitem_arraynd_intp->[_getitem_array_generic,make_array],_empty_nd_impl->[populate_array,get_itemsize,make_array],_array_copy->[_empty_nd_impl,make_array],as_strided->[as_strided_impl->[get_strides,reshape_unchecked,get_shape]],getitem_array_tuple->[_getitem_array_generic,normalize_indices,make_array],_make_flattening_iter_cls->[FlatIter->[getitem->[_ptr_for_index,load_item],iternext_specific->[load_item],setitem->[_ptr_for_index,store_item]],CContiguousFlatIter->[getitem->[load_item],iternext_specific->[_increment_indices_array,load_item],setitem->[store_item]]],iternext_array->[_getitem_array1d,make_array],np_concatenate_axis->[_np_concatenate],array_argsort->[get_sort_func],_increment_indices_array->[_increment_indices],_change_dtype->[populate_array,get_itemsize,update_array_info,make_array],array_flags_c_contiguous->[_call_contiguous_check],array_nbytes->[make_array],iternext_numpy_flatiter->[make_array_flat_cls,make_array,iternext_specific],make_nditer_cls->[NdIter->[_arrays_or_scalars->[make_array],_make_view->[populate_array,get_itemsize,compute_pointer,make_array],iternext_specific->[_increment_indices],_loop_continue->[loop_continue],init_specific->[init_specific],_loop_break->[loop_break]],FlatSubIter->[init_specific->[set_member_ptr]]],array_record_getitem->[array_record_getattr],expand_dims->[_insert_axis_in_shape,populate_array,make_array,_insert_axis_in_strides],numpy_eye->[impl->[_eye_none_handler]],_parse_empty_like_args->[make_array],make_array_nditer->[make_nditer_cls,init_specific],numpy_zeros_nd->[_parse_empty_args,_zero_fill_array,_empty_nd_impl],fancy_getitem->[begin_loops,prepare,store_item,end_loops,get_shape,FancyIndexer,load_item],array_strides->[make_array],array_item->[make_array,load_item],array_complex_attr->[make_array],make_array_ndindex->[make_ndindex_cls,init_specific],mark_positive->[set_range_metadata],_as_layout_array->[populate_array,_array_copy,make_array,_call_contiguous_check],np_column_stack->[_np_concatenate,expand_dims],np_hstack->[_np_stack_common],array_reshape->[populate_array,_attempt_nocopy_reshape,make_array],np_vstack->[np_vstack_impl->[expand_dims],_np_stack_common],_broadcast_to_shape->[_bc_adjust_dimension,_bc_adjust_shape_strides,make_array],_getitem_array_generic->[load_item,make_view,basic_indexing],record_getitem->[impl],np_atleast_1d->[_atleast_nd_transform,_atleast_nd],numpy_copy->[_array_copy],array_T->[populate_array,make_array],IntegerArrayIndexer->[loop_head->[_getitem_array1d,fix_integer_index]],array_itemsize->[make_array],array_size->[make_array],iternext_numpy_getitem->[getitem,make_array_flat_cls,make_array,setitem],np_dstack->[_np_stack_common,expand_dims],normalize_indices->[normalize_index],setitem_array->[normalize_indices,make_array,store_item,basic_indexing],array_reshape_vararg->[array_reshape,vararg_to_tuple],array_asfortranarray->[_as_layout_array],FancyIndexer->[prepare->[get_shape,prepare],end_loops->[loop_tail],__init__->[SliceIndexer,EntireIndexer,IntegerIndexer,BooleanArrayIndexer,fix_integer_index,IntegerArrayIndexer,make_array],begin_loops->[loop_head],get_offset_bounds->[get_index_bounds]],make_view->[populate_array,make_array],type_reshape_unchecked->[typer->[check_shape]],_np_stack->[_normalize_axis,make_array,_do_concatenate],array_len->[make_array],update_array_info->[get_itemsize],np_stack->[_np_stack_common],fancy_setslice->[extents_may_overlap,get_array_memory_extents,compute_memory_extents,prepare,store_item,_broadcast_to_shape,end_loops,get_shape,FancyIndexer,maybe_copy_source,src_cleanup,src_getitem,make_array,begin_loops,get_offset_bounds],numpy_arange_1->[arange->[arange]],_call_contiguous_check->[make_array],array_astype->[_empty_nd_impl,make_array,load_item,store_item],make_array_ndenumerate->[make_array_ndenumerate_cls,make_array,init_specific],np_expand_dims->[expand_dims,_normalize_axis],assign_sequence_to_array->[assign->[_get_borrowing_getitem,assign,assign_item],assign_item->[store_item],assign],_np_stack_common->[_np_stack]]
Create a CContiguousFlatIter class for the given flattening kind. Yields the next object in the array. Initialize indices pointers and start values. Load and build a from the given indices and dimensions.
this is dead?
@@ -181,8 +181,9 @@ export function getHighestAvailableDomain(win) { * @param {time} expirationTime * @param {string|undefined} domain * @param {!SameSite=} sameSite + * @param {boolean} secure */ -function trySetCookie(win, name, value, expirationTime, domain, sameSite) { +function trySetCookie(win, name, value, expirationTime, domain, sameSite, secure) { // We do not allow setting cookies on the domain that contains both // the cdn. and www. hosts. // Note: we need to allow cdn.ampproject.org in order to optin to experiments
[No CFG could be retrieved]
Try to set a cookie with the given name value expirationTime and domain. Returns the cookie string to use for SameSite.
Nit: This signature means that you can either do `foo(sameSite)` or `foo()`. It should be `{!SameSite|undefined}`
@@ -3,12 +3,14 @@ import os import json +import shutil from .constants import NNICTL_HOME_DIR +from .command_utils import print_error class Config: '''a util class to load and save config''' - def __init__(self, file_path): - config_path = os.path.join(NNICTL_HOME_DIR, str(file_path)) + def __init__(self, file_path, home_dir = NNICTL_HOME_DIR): + config_path = os.path.join(home_dir, str(file_path)) os.makedirs(config_path, exist_ok=True) self.config_file = os.path.join(config_path, '.config') self.config = self.read_file()
[Experiments->[remove_experiment->[write_file],update_experiment->[write_file],add_experiment->[write_file],__init__->[read_file]]]
Initialize the object with the configuration file.
better to remove spaces around `=` for default values.
@@ -58,6 +58,9 @@ public class EventDrivenConsumer extends AbstractEndpoint implements Integration if (this.handler instanceof MessageProducer) { return ((MessageProducer) this.handler).getOutputChannel(); } + else if (this.handler instanceof AbstractMessageRouter) { + return ((AbstractMessageRouter) this.handler).getDefaultOutputChannel(); + } else { return null; }
[EventDrivenConsumer->[getOutputChannel->[getOutputChannel]]]
Gets the output channel.
I wonder if it may be proxy and therefore we need one more special interface... WDYT?
@@ -54,6 +54,9 @@ def setup_parser(subparser): subparser.add_argument( '--update', metavar='FILE', default=None, action='store', help='write output to the specified file, if any package is newer') + subparser.add_argument( + '-v', '--include-virtuals', action='store_true', default=False, + help='include virtual packages in list') arguments.add_common_arguments(subparser, ['tags'])
[version_json->[github_url,get_dependencies],list->[formatter,filter_by_name],html->[rows_for_ncols,github_url,head],filter_by_name->[match->[match],match]]
Setup the parser for the command.
`--virtuals` (to be consistent with other places like `find` help). Also need to run `spack commaands --update-completion`
@@ -5,7 +5,7 @@ * First Introduced: 2.9 * Sort Order: 29 * Recommendation Order: 9 - * Requires Connection: Yes + * Requires Connection: No * Auto Activate: No * Module Tags: Recommended * Feature: Engagement
[No CFG could be retrieved]
Provides a class that provides related posts. region RelatedPosts_Module methods.
Something to think about as we dev this out -- ideally this should still be yes-it does require a connection to work in any real way, even with this (from my initial look) -- but we want a development-friendly experience. Do we need to rework how "require connection" modules are loaded? Or have a new category "mockable" that represents modules that requires a connection, but have a built-in ability to do _something_ offline.
@@ -129,6 +129,12 @@ public final class HiveQueryRunner } } + private static void setupLogging() + { + Logging logging = Logging.initialize(); + logging.setLevel("org.apache.parquet.hadoop", WARN); + } + private static Database createDatabaseMetastoreObject(String name) { return Database.builder()
[HiveQueryRunner->[createQueryRunner->[createQueryRunner],main->[createQueryRunner]]]
Creates a DistributedQueryRunner with the given parameters. This method creates a metastore object that can be used to query the database.
I would setup this to `org.apache`, then I would move it to `DistributedQueryRunner`
@@ -246,6 +246,8 @@ public final class MethodArgumentResolverDelegate implements ArgumentResolverDel Object parameterValue = valueSupplier.get(); if (parameterValue == null) { return resolvePrimitiveTypeDefaultValue(parameterType); + } else if (parameterType.equals(InputStream.class) && parameterValue instanceof CursorStream) { + return resolveToUnclosableCursor((CursorStream) parameterValue); } else { return resolveCursor(parameterValue); }
[MethodArgumentResolverDelegate->[initialise->[initArgumentResolvers],resolve->[resolve]]]
Wrap the parameter resolution.
should this wrapping be done for other InputStream impls that are not ours?
@@ -847,14 +847,11 @@ public class BasicGameMenuBar<CustomGameFrame extends MainGameFrame> extends JMe data.releaseReadLock(); } try { - final FileWriter writer = new FileWriter(chooser.getSelectedFile()); - try { + try(final FileWriter writer = new FileWriter(chooser.getSelectedFile());) { writer.write(xmlFile); - } finally { - writer.close(); } } catch (final IOException e1) { - e1.printStackTrace(); + ClientLogger.logQuietly(e1); } } };
[BasicGameMenuBar->[getData->[getData],getLookAndFeelAvailableList->[isJavaGreatThan5,isJavaGreatThan6],dispose->[dispose],getLookAndFeelList->[getLookAndFeelAvailableList],getGame->[getGame],createHelpMenu->[addGameNotesMenu],addSaveMenu->[actionPerformed->[getSaveGameLocationDialog]]]]
Add the action which exports the game. xml file.
single line try statements please do not have semicolon in them
@@ -222,6 +222,7 @@ export class PreactBaseElement extends AMP.BaseElement { ...childrenInit, ...passthroughInit, ...templatesInit, + ...this.getAdditionalMutationObserverInitProperties(), }); this.mediaQueryProps_ = hasMediaQueryProps(Ctor)
[No CFG could be retrieved]
Initializes the component s default Preact props. Unblock rendering on first CanRender response.
So now the init from the amp layer is mixed with the init for the generic mutation observer. Hmm... does it still make sense to use the base-element Mutation Observer then?
@@ -33,8 +33,8 @@ class SublimeText(Package): homepage = "http://www.sublimetext.com/" url = "https://download.sublimetext.com/sublime_text_3_build_3126_x64.tar.bz2" - version('3126', 'acc34252b0ea7dff1f581c5db1564dcb') - version('2.0.2', '699cd26d7fe0bada29eb1b2cd7b50e4b') + version('3_build_3126', 'acc34252b0ea7dff1f581c5db1564dcb') + version('2.0.2', '699cd26d7fe0bada29eb1b2cd7b50e4b') # Sublime text comes as a pre-compiled binary. # Since we can't link to Spack packages, we'll just have to
[SublimeText->[install->[copy_tree],url_for_version->[up_to,format],depends_on,version]]
Creates a link to a sophisticated text editor for a single object. Download the SublimeText from the SublimeText build.
this one is ugly, `2` are called `2.0.2` and alike, but for `3` they use `3 Build 3126`.
@@ -120,9 +120,6 @@ class Options: # -- experimental options -- self.fast_parser = True - self.incremental = False - self.cache_dir = defaults.CACHE_DIR - self.debug_cache = False self.shadow_file = None # type: Optional[Tuple[str, str]] self.show_column_numbers = False # type: bool self.dump_graph = False
[Options->[select_options_affecting_cache->[getattr],clone_for_module->[Options,module_matches_pattern,update],__repr__->[pformat,'Options],module_matches_pattern->[match]]]
Initialize the object with default values. Initialize the object with default values.
``fast-parser`` is no more experimental, so that it also could be moved above.
@@ -121,14 +121,11 @@ def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1, else: data = sum(np.dot(e, e.T) for e in epochs) # compute data covariance event_id = epochs.event_id - if event_id is None: - event_id = 0 + if event_id is None or len(event_id.keys()) == 0: + event_id = '0' else: - if len(event_id.keys()) == 0: - event_id = 0 - else: - event_id = event_id[event_id.keys()[0]] - desc_prefix = "%-d-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax) + event_id = '_'.join([str(v) for v in event_id.values()]) + desc_prefix = "%s-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax) return _compute_proj(data, epochs.info, n_grad, n_mag, n_eeg, desc_prefix)
[compute_proj_evoked->[_compute_proj],compute_proj_epochs->[_compute_proj],write_proj->[write_proj],read_proj->[read_proj],compute_proj_raw->[_compute_proj]]
Compute SSP and evoked vectors on Epochs and Event Events. Compute the projection vectors of the n - th n - th n - th n - th.
For backward compatibility ok. With the new feature whatever you do --- len(event_id) will never be 0 and event_id will never be None..
@@ -0,0 +1,17 @@ +import unittest +from conans.client.tools import environment_append +from conans.test.utils.tools import TestClient + +from conans.util.conan_v2_mode import CONAN_V2_MODE_ENVVAR + + +class ConanV2ModeTestCase(unittest.TestCase): + + @staticmethod + def get_client(*args, **kwargs): + # TODO: Initialize with the default behavior for Conan v2 + return TestClient(*args, **kwargs) + + def run(self, *args, **kwargs): + with environment_append({CONAN_V2_MODE_ENVVAR: "1"}): + super(ConanV2ModeTestCase, self).run(*args, **kwargs)
[No CFG could be retrieved]
No Summary Found.
I don't see why this is necessary, nothing should be special about the client instance, a totally normal, one, only difference should be CONAN_V2_MODE_ENVVAR
@@ -252,4 +252,15 @@ def test_degenerate(): bad_epochs_fname) assert_equal(len(w), 1) + +@requires_version('scipy', '0.12') +@testing.requires_testing_data +def test_eeglab_annotations(): + """Test reading annotations in EEGLAB files""" + for fname in [raw_fname_onefile, raw_fname]: + annotations = read_annotations_eeglab(fname) + assert len(annotations) == 154 + assert set(annotations.description) == set(['rt', 'square']) + assert np.all(annotations.duration == 0.) + run_tests_if_main()
[test_degenerate->[_TempDir,catch_warnings,assert_equal,len,assert_raises,copyfile,savemat,join,loadmat,simplefilter],test_io_set->[any,_TempDir,catch_warnings,read_events_eeglab,assert_raises,filter,savemat,copyfile,join,loadmat,enumerate,simplefilter,assert_array_almost_equal,get_data,assert_equal,epochs,read_epochs_eeglab,_test_raw_reader,zeros,range,LooseVersion,SkipTest,assert_array_equal,find_events,len,zip,read_raw_eeglab,write_events,assert_true,str,random,Epochs,array],run_tests_if_main,simplefilter,data_path,join]
Test some degenerate conditions.
0.12 is our current minimum req so this can be removed alongside any others in the file
@@ -117,7 +117,9 @@ function jetpack_mobile_template( $theme ) { } function jetpack_mobile_available() { - echo '<div class="jetpack-mobile-link" style="text-align:center;margin:10px 0;"><a href="'. home_url( '?ak_action=accept_mobile' ) . '">' . __( 'View Mobile Site', 'jetpack' ) . '</a></div>'; + global $wp; + $current_url = home_url( add_query_arg( array( 'ak_action' => 'accept_mobile' ), add_query_arg( $_GET, $wp->request ) ) ); + echo '<div class="jetpack-mobile-link" style="text-align:center;margin:10px 0;"><a href="'. $current_url . '">' . __( 'View Mobile Site', 'jetpack' ) . '</a></div>'; } function jetpack_mobile_request_handler() {
[jetpack_mobile_customizer_controls->[add_setting,add_control]]
The function that handles the mobile request. This function is used to redirect to the mobile page.
I would recommend escaping the URL here.
@@ -46,10 +46,7 @@ public class GlueMetastoreModule protected void setup(Binder binder) { configBinder(binder).bindConfig(GlueHiveMetastoreConfig.class); - - newOptionalBinder(binder, GlueColumnStatisticsProvider.class) - .setDefault().to(DisabledGlueColumnStatisticsProvider.class).in(Scopes.SINGLETON); - + configBinder(binder).bindConfig(HiveConfig.class); newOptionalBinder(binder, Key.get(RequestHandler2.class, ForGlueHiveMetastore.class)); newOptionalBinder(binder, Key.get(new TypeLiteral<Predicate<Table>>() {}, ForGlueHiveMetastore.class))
[GlueMetastoreModule->[setup->[install,bindConfig,get,CachingHiveMetastoreModule,newOptionalBinder,in,RecordingHiveMetastoreModule,withGeneratedName],createExecutor->[newCachedThreadPool,BoundedExecutor,daemonThreadsNamed,getGetPartitionThreads,directExecutor]]]
Setup the bundle.
It feels to me that you should not need this one as it is bind in `HIveModule` which should be always present.
@@ -64,8 +64,8 @@ public class TestPortableRunner extends PipelineRunner<PipelineResult> { TestPortablePipelineOptions testPortablePipelineOptions = options.as(TestPortablePipelineOptions.class); String jobServerHostPort; - Object jobServerDriver; - Class<?> jobServerDriverClass = testPortablePipelineOptions.getJobServerDriver(); + JobServerDriver jobServerDriver; + Class<JobServerDriver> jobServerDriverClass = testPortablePipelineOptions.getJobServerDriver(); String[] parameters = testPortablePipelineOptions.getJobServerConfig(); try { jobServerDriver =
[TestPortableRunner->[run->[run,fromOptions],fromOptions->[TestPortableRunner]]]
Runs the pipeline and waits until it completes or the pipeline is complete.
We should watch out for #9872 because the method name/usage changes there.
@@ -162,6 +162,8 @@ ds_mgmt_tgt_pool_create_ranks(uuid_t pool_uuid, char *tgt_dev, D_DEBUG(DB_TRACE, "fill ranks %d idx %d "DF_UUID"\n", tc_out_ranks[i], idx, DP_UUID(tc_out_uuids[i])); } + D_FREE(tc_out_uuids); + D_FREE(tc_out_ranks); rc = DER_SUCCESS;
[No CFG could be retrieved]
find the target uuid in the DF pool and return it - - - - - - - - - - - - - - - - - -.
These are quite probably correct, although the error checking in this function wants looking at to ensure it handles all cases.
@@ -1575,7 +1575,16 @@ namespace Kratos } else if(KratosComponents<array_1d_component_type>::Has(variable_name)) { - ReadNodalDofVariableData(rThisNodes, static_cast<array_1d_component_type const& >(KratosComponents<array_1d_component_type>::Get(variable_name))); + bool has_been_added = rThisVariables.Has(KratosComponents<array_1d_component_type>::Get(variable_name)) ; + if( !has_been_added && mOptions.Is(IGNORE_VARIABLES_ERROR) ) { + KRATOS_WARNING("ModelPartIO") <<"WARNING: Skipping NodalData block. Variable "<<variable_name<<" has not been added to ModelPart '"<<rThisModelPart.Name()<<"'"<<std::endl<<std::endl; + SkipBlock("NodalData"); + } + else if (!has_been_added) + KRATOS_THROW_ERROR(std::invalid_argument,"The nodal solution step container deos not have this variable: ", variable_name) + else { + ReadNodalDofVariableData(rThisNodes, static_cast<array_1d_component_type const& >(KratosComponents<array_1d_component_type>::Get(variable_name))); + } } else if(KratosComponents<Variable<array_1d<double, 3> > >::Has(variable_name)) {
[No CFG could be retrieved]
Reads the value of the n - ary variable from the model part. Reads the next n - word from the word array and stores it in the ModelPart array.
Edit KRATOS_ERROR (not KRATOS_ERROR_IF_NOT)
@@ -94,6 +94,12 @@ obj_tls_fini(const struct dss_thread_local_storage *dtls, struct dss_module_key *key, void *data) { struct obj_tls *tls = data; + struct migrate_pool_tls *pool_tls; + struct migrate_pool_tls *tmp; + + d_list_for_each_entry_safe(pool_tls, tmp, &tls->ot_pool_list, + mpt_list) + migrate_pool_tls_destroy(pool_tls); if (tls->ot_echo_sgl.sg_iovs != NULL) daos_sgl_fini(&tls->ot_echo_sgl, true);
[int->[srv_profile_start,d_errstr,obj_ec_codec_fini,obj_utils_init,obj_tls_get,obj_ec_codec_init,srv_profile_stop,DP_RC,D_ERROR,obj_utils_fini,D_DEBUG],void->[daos_sgl_fini,srv_profile_destroy,D_FREE,D_ALLOC_PTR]]
dss_thread_local_storage_fini is called to free the obj_.
Force destroy without looking refcount? BTW, what if pool destroy is received while migration is in process? Any way to abort migration?
@@ -551,6 +551,13 @@ public class PutS3Object extends AbstractS3Processor { if (currentState.getUploadId().isEmpty()) { final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key, objectMetadata); + if (keyId != null) { + if (!context.getProperty(SIGNER_OVERRIDE).getValue().equals("AWSS3V4Signer")) { + getLogger().error("Uploading with AWS:KMS requires S3V4 signature, please enable it"); + return; + } + initiateRequest.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(keyId)); + } initiateRequest.setStorageClass(currentState.getStorageClass()); final AccessControlList acl = createACL(context, ff); if (acl != null) {
[PutS3Object->[getLocalState->[getPersistenceFile],getLocalStateIfInS3->[localUploadExistsInS3],removeLocalState->[persistLocalState],MultipartState->[toString->[toString]],ageoffLocalState->[removeLocalState,getPersistenceFile],persistLocalState->[getPersistenceFile],getS3AgeoffListAndAgeoffLocalState->[ageoffLocalState],onTrigger->[process->[persistLocalState,getLocalStateIfInS3],removeLocalState]]]
On trigger. This method is used to upload the user - defined properties to S3. This method checks if there is a duplicate key in the user metadata and if so loads or This method is called to initiate a multipart upload or find the next available position in the file check if there is a sequence of bytes to upload and if so upload the next chunk.
Same issue with return routing the flowfile to success.
@@ -1101,6 +1101,8 @@ class RestAPI: status_code=HTTPStatus.CONFLICT, ) + secret = payment_status.payment_done.get() + payment = { 'initiator_address': self.raiden_api.address, 'registry_address': registry_address,
[endpoint_not_found->[api_error],RestAPI->[initiate_payment->[api_error,api_response],connect->[api_error,api_response],get_raiden_events_payment_history_with_timestamps->[api_error,api_response,get_raiden_events_payment_history_with_timestamps],_deposit->[api_error,api_response,get_channel],get_blockchain_events_token_network->[normalize_events_list,get_blockchain_events_token_network,api_error,api_response],get_partners_by_token->[api_error,api_response,get_channel_list],get_pending_transfers->[api_error,api_response,get_pending_transfers],get_token_network_for_token->[api_error,api_response],open->[api_error,api_response],leave->[api_response],register_token->[api_error,api_response],patch_channel->[_close,api_error,get_channel,_deposit],get_channel->[api_error,api_response,get_channel],get_our_address->[api_response],get_blockchain_events_network->[normalize_events_list,get_blockchain_events_network,api_response,api_error],get_blockchain_events_channel->[normalize_events_list,api_error,get_blockchain_events_channel,api_response],_close->[api_error,api_response,get_channel],get_tokens_list->[get_tokens_list,api_response],get_channel_list->[api_response,get_channel_list]],APIServer->[unhandled_exception->[api_error],stop->[stop],__init__->[restapi_setup_type_converters,restapi_setup_urls]],normalize_events_list->[hexbytes_to_str,encode_object_to_str,encode_byte_values]]
Initiate a payment for a token. Get a list of all payment node ids.
the secret should be accessible from the instance you have `payment_status.secret`... it doesn't need to be passed through in the `AsyncResult`
@@ -198,6 +198,7 @@ class WP_Test_Jetpack_Sync_Options extends WP_Test_Jetpack_Sync_Base { 'wordads_display_page' => '1', 'wordads_display_archive' => '1', 'wordads_custom_adstxt' => 'pineapple', + 'site_user_type' => json_encode( array( 0 => 'pineapple' ) ), 'site_segment' => 'pineapple', 'site_vertical' => 'pineapple', );
[WP_Test_Jetpack_Sync_Options->[test_don_t_sync_option_if_not_on_whitelist->[do_sync,get_option,assertEquals],test_sync_options_that_use_filter->[do_sync,get_option,assertEquals,update_options_whitelist],test_added_option_is_synced->[get_option,assertEquals],test_deleted_option_is_synced->[do_sync,get_option,assertEquals],test_updated_option_is_synced->[do_sync,get_option,assertEquals],assertOptionIsSynced->[get_option,assertEqualsObject],setUp->[do_sync,set_options_whitelist],test_sync_default_options->[assertTrue,setSyncClientDefaults,assertOptionIsSynced,do_sync,assertEquals,get_options_whitelist],test_sync_default_contentless_options->[assertTrue,setSyncClientDefaults,get_options_contentless,assertOptionIsSynced,do_sync,assertEquals],test_sync_initalize_Jetpack_Sync_Action_on_init->[assertEquals],test_add_whitelisted_option_on_init_89->[assertTrue,get_options_whitelist]]]
This method is used to set the default options for the sync client This function is used to create a network interface that can be used to create a network interface This function is used to provide a list of all the available packages.
Let's use `wp_json_encode()` here instead. Also, `0` isn't a valid user ID - lets use a positive integer instead.
@@ -375,3 +375,14 @@ class WebhookPlugin(BasePlugin): previous_value, **kwargs, ) + + @staticmethod + def _trigger_webhook_requests(payload, event_type): + webhooks = _get_webhooks_for_event(event_type) + deliveries_qs = create_event_delivery_list_for_webhooks( + webhooks=webhooks, + event_payload=payload, + event_type=event_type, + ) + for delivery in deliveries_qs: + send_webhook_request.delay(delivery.id)
[WebhookPlugin->[refund_payment->[__run_payment_webhook],capture_payment->[__run_payment_webhook],confirm_payment->[__run_payment_webhook],authorize_payment->[__run_payment_webhook],void_payment->[__run_payment_webhook],process_payment->[__run_payment_webhook]]]
Process a payment condition.
We could rename this function to `trigger_webooks_async`, it would be consistent with naming for sync webhooks where we have `trigger_webhook_sync`. We could also move this function to `webhook/tasks.py` for now so both methods for sending webhooks are next to each other. Later I think we will refactor the `tasks.py` file to some smaller modules, but this can be done once we merge this PR.
@@ -153,6 +153,6 @@ func (loader KibanaLoader) statusMsg(msg string, a ...interface{}) { if loader.msgOutputter != nil { loader.msgOutputter(msg, a...) } else { - logp.Debug("dashboards", msg, a...) + loader.defaultLogger.Debugf("%s %v", msg, a) } }
[ImportIndexFile->[ImportIndex,Errorf,Unmarshal,ReadFile],Close->[Close],statusMsg->[msgOutputter,Debug],ImportIndex->[Wrapf,ImportJSON,Err,Wrap,Set],ImportDashboard->[ImportJSON,ReadFile,Unmarshal,Errorf,Set,Add],GetVersion,statusMsg,Enabled,String,Errorf,After,Done,NewKibanaClient]
statusMsg logs a message to the user if the messageOutputter is not nil.
the old implementation looks like "statusMsg" implements a Printf like interface (better named would have been `statusMsgf`. `s/loader.defaultLogger.Debugf("%s %v", msg, a)/loader.defaultLogger.debugf(msg, a...)/` Is MsgOutputer used somewhere? It kind of looks like a 'duplicate' interface for logging.
@@ -138,9 +138,11 @@ public class ArchivedCommitsCommand implements CommandMarker { throws IOException { System.out.println("===============> Showing only " + limit + " archived commits <==============="); - String basePath = HoodieCLI.getTableMetaClient().getBasePath(); + HoodieTableMetaClient metaClient = HoodieCLI.getTableMetaClient(); + String basePath = metaClient.getBasePath(); + Path archivePath = new Path(metaClient.getArchivePath() + "/.commits_.archive*"); FileStatus[] fsStatuses = - FSUtils.getFs(basePath, HoodieCLI.conf).globStatus(new Path(basePath + "/.hoodie/.commits_.archive*")); + FSUtils.getFs(basePath, HoodieCLI.conf).globStatus(archivePath); List<Comparable[]> allCommits = new ArrayList<>(); for (FileStatus fs : fsStatuses) { // read the archived file
[ArchivedCommitsCommand->[showCommits->[hasNext,Path,getBasePath,newReader,toList,addTableHeaderField,HoodieLogFile,println,close,getClassSchema,print,globStatus,getFs,addAll,collect,next,getPath,getRecords],readCommit->[size,toArray,get,printStackTrace,toString,add],showArchivedCommits->[Path,getBasePath,addAll,addTableHeaderField,globStatus,isEmpty,getRecords,hasNext,newReader,print,HoodieLogFile,println,getClassSchema,close,next,toList,getFs,collect,getPath]]]
Read archived commits and show details. Returns a String with the Hoodie header of the object that represents the type of commit.
This can affect all the old tables read archives.
@@ -431,9 +431,9 @@ WRITE8_MEMBER(coco_ssc_device::ssc_port_c_w) m_spo->ald_w(space, 0, m_tms7000_portd); } - if( (data & C_BSY) == 0 ) +if( ((m_tms7000_portc & C_BSY) == 0) && ((data & C_BSY) == C_BSY) ) { - m_host_busy = false; + m_timer->adjust(attotime::from_usec(1800)); } if (LOG_SSC)
[No CFG could be retrieved]
read and write the specified NIC on the specified port read the specified number of bytes from the specified device Missing - Hosted Timeout.
Why is this now up against the left margin?
@@ -694,6 +694,10 @@ function createBaseCustomElementClass(win) { ); } + if (this.hasAttribute('disable-inline-width')) { + return; + } + // Sizes. if (this.sizeList_ === undefined) { const sizesAttr = this.getAttribute('sizes');
[No CFG could be retrieved]
Method to apply the sizes and media query to the element. Updates the size of the element based on the height list attribute.
This blocks `heights` behavior. Additionally, we'd need to set `sizeList_ = null`. I think this should just be handled as part of the ternary in the sizes block below.
@@ -118,7 +118,7 @@ class JobManagerTask(Model): :type authentication_token_settings: ~azure.batch.models.AuthenticationTokenSettings :param allow_low_priority_node: Whether the Job Manager task may run on a - low-priority compute node. The default value is false. + low-priority compute node. The default value is true. :type allow_low_priority_node: bool """
[JobManagerTask->[__init__->[super]]]
Creates a copy of an application package that is referenced by the given application package. If the TaskConstraints - A map of constraints to their respective properties.
Was it a doc issue?
@@ -79,9 +79,9 @@ class SuluCoreExtension extends Extension implements PrependExtensionInterface $config = $this->processConfiguration($configuration, $configs); $loader = new Loader\XmlFileLoader($container, new FileLocator(__DIR__ . '/../Resources/config')); - $container->setParameter('sulu_core.locales', array_keys($config['locales'])); + $container->setParameter('sulu_core.locales', array_unique(array_keys($config['locales']))); $container->setParameter('sulu_core.translated_locales', $config['locales']); - $container->setParameter('sulu_core.translations', $config['translations']); + $container->setParameter('sulu_core.translations', array_unique($config['translations'])); $container->setParameter('sulu_core.fallback_locale', $config['fallback_locale']); $container->setParameter('sulu.cache_dir', $config['cache_dir']);
[SuluCoreExtension->[load->[load],initCache->[load],initWebspace->[load],initContent->[load],initPhpcr->[load]]]
Loads the configuration for a node.
Why are these two changes necessary?