patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -97,11 +97,15 @@ public class DefaultFileRegion extends AbstractReferenceCounted implements FileR @Override protected void deallocate() { try { - file.close(); + if (file != null) { + file.close(); + } } catch (IOException e) { if (logger.isWarnEnabled()) { logger.warn("Failed to close a file.", e); } + } finally { + file = null; } } }
[DefaultFileRegion->[deallocate->[isWarnEnabled,warn,close],transferTo->[IllegalArgumentException,transferTo],NullPointerException,getInstance,IllegalArgumentException]]
Deallocate a from the file.
`file = null;` in the `finally` block is evaluated even when `file` is `null`. I would move the null-check out of the try block and return early when `file` is `null`.
@@ -3923,6 +3923,17 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv volPath = physicalDisk.getPath(); } + // check for disk activity, if detected we should exit because vm is running elsewhere + if (_diskActivityCheckEnabled && physicalDisk != null && physicalDisk.getFormat() == PhysicalDiskFormat.QCOW2) { + s_logger.debug("Checking physical disk file at path " + volPath + " for disk activity to ensure vm is not running elsewhere"); + try { + HypervisorUtils.checkVolumeFileForActivity(volPath, _diskActivityCheckTimeoutSeconds, _diskActivityInactiveThresholdMilliseconds, _diskActivityCheckFileSizeMin); + } catch (IOException ex) { + throw new CloudRuntimeException("Unable to check physical disk file for activity", ex); + } + s_logger.debug("Disk activity check cleared"); + } + // if params contains a rootDiskController key, use its value (this is what other HVs are doing) DiskDef.diskBus diskBusType = null; Map <String, String> params = vmSpec.getDetails();
[LibvirtComputingResource->[getInterfaces->[getInterfaces],isSnapshotSupported->[executeBashScript],getNetworkStats->[networkUsage],doPingTest->[execute],cleanupVMNetworks->[getAllVifDrivers],cleanupNetworkElementCommand->[vifHotUnPlug,VifHotPlug,getBroadcastUriFromBridge],getVPCNetworkStats->[VPCNetworkUsage],can_bridge_firewall->[execute],configure->[getDefaultNetworkScriptsDir,configure,getDeveloperProperties,getDefaultStorageScriptsDir,getDefaultKvmScriptsDir,getDefaultDomrScriptsDir],getVifDriverClass->[configure],createVMFromSpec->[getUuid],execute->[getNetworkStats,checkNetwork,getVPCNetworkStats,configure,createVMFromSpec,execute,getResizeScriptType,getAllVifDrivers,network_rules_vmSecondaryIp,default_network_rules_for_systemvm,configureTunnelNetwork,createVifs,startVM,handleVmStartFailure,findOrCreateTunnelNetwork,getVmState,VPCNetworkUsage,networkUsage,passCmdLine],vifHotUnPlug->[getAllVifDrivers],add_network_rules->[execute],post_default_network_rules->[getInterfaces,execute],network_rules_vmSecondaryIp->[execute],getGuestDiskModel->[isGuestPVEnabled],executeBashScript->[execute],default_network_rules_for_systemvm->[execute],checkBridgeNetwork->[matchPifFileInDirectory],configureTunnelNetwork->[findOrCreateTunnelNetwork,execute],getDisks->[getDisks],cleanup_rules->[execute],get_rule_logs_for_vms->[execute],rebootVM->[getPif,startVM],checkCgroups->[execute],attachOrDetachISO->[cleanupDisk],destroyTunnelNetwork->[execute],prepareNetworkElementCommand->[VifHotPlug,getBroadcastUriFromBridge],stopVM->[stopVM],destroy_network_rules_for_vm->[getInterfaces,execute],getIqn->[execute],findOrCreateTunnelNetwork->[checkNetwork],getVmState->[convertToPowerState],getVersionStrings->[KeyValueInterpreter,execute,getKeyValues],executeRequest->[executeRequest],attachOrDetachDisk->[getUuid],initialize->[getVersionStrings,getUuid],default_network_rules->[getInterfaces,execute],executeInVR->[executeInVR],getHostVmStateReport->[convertToPowerState,getHostVmStateReport],getVmDiskStat->[getDomain,getDisks],VPCNetworkUsage->[execute],networkUsage->[execute],getDeveloperProperties->[getEndIpFromStartIp],getVncPort->[getVncPort],getVmStat->[getInterfaces,getDomain,VmStats,getDisks],createVbd->[getUuid],syncNetworkGroups->[get_rule_logs_for_vms],getBroadcastUriFromBridge->[matchPifFileInDirectory]]]
Creates a VBD. Private method to find the matching device in the system. find and add the data disk get data store.
Wouldn't this block best be refactored into a separate method?
@@ -220,7 +220,7 @@ int MainWrappers<Scalar,LocalOrdinal,GlobalOrdinal,Node>::main_(Teuchos::Command tm = Teuchos::null; if (solverName == "Belos") { - auto tm2 = TimeMonitor(*TimeMonitor::getNewTimer("Maxwell: 2 - Build Belos solver etc")); + auto tm2 = TimeMonitor::getNewTimer("Maxwell: 2 - Build Belos solver etc"); // construct preconditioner RCP<MueLu::RefMaxwell<SC,LO,GO,NO> > preconditioner
[No CFG could be retrieved]
Maxwell add function. Turns a Belos operator into a Belos linear problem.
I don't think this would work like you think -- getNewTimer doesn't start the retrieved Teuchos::Time object, it just gets a pointer to it. The result is that you wouldn't end up timing the thing. You would just get a zero time.
@@ -724,7 +724,14 @@ class ComposerRepository extends ArrayRepository implements ConfigurableReposito } /** - * @param array $packageNames array of package name => ConstraintInterface|null - if a constraint is provided, only packages matching it will be loaded + * @param array<string, ConstraintInterface|null> $packageNames array of package name => ConstraintInterface|null - if a constraint is provided, only packages matching it will be loaded + * @param int[]|null $acceptableStabilities + * @phpstan-param array<string, BasePackage::STABILITY_*>|null $acceptableStabilities + * @param int[]|null $stabilityFlags an array of package name => BasePackage::STABILITY_* value + * @phpstan-param array<string, BasePackage::STABILITY_*>|null $stabilityFlags + * @param array<string, mixed> $alreadyLoaded + * + * @return array{namesFound: array, packages: array} */ private function loadAsyncPackages(array $packageNames, array $acceptableStabilities = null, array $stabilityFlags = null, array $alreadyLoaded = array()) {
[ComposerRepository->[createPackages->[getRepoName,configurePackageTransportOptions,loadPackages],addPackage->[configurePackageTransportOptions],getPackageNames->[getPackages],initializePartialPackages->[loadRootServerFile],loadIncludes->[loadIncludes],loadRootServerFile->[getPackagesJsonUrl],loadProviderListings->[loadProviderListings],loadDataFromServer->[loadRootServerFile],search->[getPackageNames]]]
Loads packages asynchronously. Load packages from the repository. Returns an array of namesFound packages and metadataComplete when all promises are resolved.
this is `array{namesFound: array<string, true>, packages: array<string, BasePackage>}`
@@ -25,6 +25,17 @@ function getNewAudioOutputDevice(newDevices) { d.deviceId === selectedAudioOutputDeviceId)) { return 'default'; } + + const settings = APP.store.getState()['features/base/settings']; + const preferredAudioOutputDeviceId = settings.userSelectedAudioOutputDeviceId; + + // if the preferred one is not the selected and is available in the new devices + // we want to use it as it was just added + if (preferredAudioOutputDeviceId + && preferredAudioOutputDeviceId !== selectedAudioOutputDeviceId + && availableAudioOutputDevices.find(d => d.deviceId === preferredAudioOutputDeviceId)) { + return preferredAudioOutputDeviceId; + } } /**
[No CFG could be retrieved]
Determines if currently selected audio output device should be changed after the list of available devices has been if returns the label of the first available device.
Shouldn't we include this logic to the logic above somehow?
@@ -72,4 +72,11 @@ public interface Pool<T> { */ int getAllocatedCount(); + /** + * Close the pool; returned items will be destroyed. + * @since 4.3.23 + */ + default void close() { + } + }
[No CFG could be retrieved]
Returns the number of allocated memory in the pool.
The `default` is not going to work for Java 6, but we may not just introduce this method into the `Pool` interface for backward bytecode compatibility.
@@ -0,0 +1,15 @@ +package org.baeldung.conditionalflow.step; + +import org.baeldung.conditionalflow.model.NumberInfo; +import org.springframework.batch.core.StepExecution; +import org.springframework.batch.item.ItemProcessor; + +public class NumberInfoClassifierWithDecider + implements ItemProcessor<NumberInfo, Integer> { + private StepExecution stepExecution; + + @Override + public Integer process(NumberInfo numberInfo) throws Exception { + return Integer.valueOf(numberInfo.getNumber()); + } +}
[No CFG could be retrieved]
No Summary Found.
Formatting is off on this class
@@ -137,7 +137,12 @@ class GraphLock(object): total.extend(node.requires.values()) roots = set(self._nodes).difference(total) assert len(roots) == 1 - return self._nodes[roots.pop()] + root_node = self._nodes[roots.pop()] + if root_node.path: + return root_node.path + if not self.revisions_enabled: + return root_node.pref.ref.copy_clear_rev() + return root_node.pref.ref @staticmethod def from_dict(data):
[GraphLockNode->[from_dict->[loads,GraphLockNode],as_dict->[dumps]],GraphLock->[from_dict->[GraphLock,from_dict],update_check_graph->[_closure_affected],__init__->[GraphLockNode],find_consumer_node->[get_node],as_dict->[as_dict]],GraphLockFile->[dumps->[dumps],loads->[loads,GraphLockFile],save->[save],load->[load]]]
obtain the root node in the graph that is not depended by anyone else.
Idem about `copy_clear_revs()`
@@ -17,7 +17,11 @@ _LOGGER = logging.getLogger(__name__) class ServiceBusSessionReceiver(ServiceBusReceiver, SessionReceiverMixin): """The ServiceBusSessionReceiver class defines a high level interface for - receiving messages from the Azure Service Bus Queue or Topic Subscription. + receiving messages from the Azure Service Bus Queue or Topic Subscription + while utilizing a session for FIFO and ownership semantics. + + The two primary channels for message receipt are `receive()` to make a single request for messages, + and `for message in receiver:` to continuously receive incoming messages in an ongoing fashion. :ivar fully_qualified_namespace: The fully qualified host name for the Service Bus namespace. The namespace format is: `<yournamespace>.servicebus.windows.net`.
[ServiceBusSessionReceiver->[__init__->[ServiceBusSession,super,_populate_session_attributes],from_connection_string->[super]],getLogger]
Creates a new ServiceBusSessionReceiver object that can be used to receive messages from the Service.
Are you getting tired of typing this yet? ;)
@@ -210,6 +210,17 @@ def message_about_scripts_not_on_PATH(scripts): else: msg_lines.append(last_line_fmt.format("these directories")) + # Add a note if any directory starts with ~ + warn_for_tilde = any([ + i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i + ]) + if warn_for_tilde: + tilde_warning_msg = ( + "NOTE: The current PATH contains path(s) starting with `~`, " + "which may not be expanded by all applications." + ) + msg_lines.append(tilde_warning_msg) + # Returns the formatted multiline message return "\n".join(msg_lines)
[_raise_for_invalid_entrypoint->[MissingCallableSuffix],install_unpacked_wheel->[record_installed->[normpath],clobber->[record_installed],open_for_csv,root_is_purelib,message_about_scripts_not_on_PATH,sorted_outrows,clobber,get_entrypoints,PipScriptMaker,get_csv_rows_for_installed],Wheel->[get_formatted_file_tags->[format_tag]],PipScriptMaker->[make->[_raise_for_invalid_entrypoint]],get_entrypoints->[_split_ep],get_csv_rows_for_installed->[normpath,rehash]]
Determines if any scripts are not on PATH and format a warning. Returns a formatted multiline message of the last n - tuples in the CSV file.
You can drop the list: ``` warn_for_tilde = any( i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i )
@@ -23,6 +23,8 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.locks.Lock; +import com.google.common.collect.ImmutableList; +import lombok.extern.slf4j.Slf4j; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path;
[ParallelRunner->[deletePath->[call->[deletePath]],renamePath->[call->[renamePath]],movePath->[call->[movePath]]]]
Creates a new instance of a n - ary type in a thread pool. This method is called to perform serialization of a single .
Order of imports.
@@ -165,8 +165,8 @@ public class PCollectionViews { throw new RuntimeException("Unexpected IOException: ", e); } } + return defaultValue; } - return defaultValue; } @Override
[PCollectionViews->[PCollectionViewBase->[fromIterableInternal->[fromElements],toString->[toString],equals->[getTagInternal,equals]],SingletonPCollectionView->[fromElements->[getDefaultValue]]]]
Returns the default value of the PCollection.
I think the prior code is also fine, as it is lazy init. But it makes no real difference so this is fine too.
@@ -65,7 +65,7 @@ func testTLSRequest(t *testing.T, testURL string, extraConfig map[string]interfa jobs, endpoints, err := create("tls", config) require.NoError(t, err) - job := jobs[0] + job := monitors.WrapCommon(jobs, "tls", "", "http")[0] event := &beat.Event{} _, err = job(event)
[CertToTempFile,Compose,HelloWorldHandler,AvailableTCP4Port,NewConfigFrom,Test,RespondingTCPChecks,NewUnstartedServer,Close,Hostname,StartTLS,URLFields,ReadOpen,ParseCertificate,MustCompile,NewTLSServer,BuildNameToCertificate,TLSChecks,Join,Equal,BaseChecks,Name,Strict,Remove,ErrorChecks,SizedResponseHandler,NoError,NewCertPool,NewServer,Sprintf,Parse,AppendCertsFromPEM,Getwd,ReadAll,Run,True]
testRequest tests the given request. testRequest returns server event and minimum response.
Ideally we wouldn't have this in tests, and the call to `create` above would be a different call that would do the wrapping. Unfortunately `newMonitor`, where this happens now would need a refactor for this to be clean. This PR has enough scope, so I'd like to defer that refactor.
@@ -173,7 +173,7 @@ func DeleteIssueLabel(ctx *context.APIContext) { label, err := models.GetLabelInRepoByID(ctx.Repo.Repository.ID, ctx.ParamsInt64(":id")) if err != nil { - if models.IsErrLabelNotExist(err) { + if models.IsErrRepoLabelNotExist(err) { ctx.Error(http.StatusUnprocessableEntity, "", err) } else { ctx.Error(http.StatusInternalServerError, "GetLabelInRepoByID", err)
[ReplaceLabels,Status,ClearLabels,IsErrIssueNotExist,GetIssueByIndex,Error,ParamsInt64,ToLabelList,CanWriteIssuesOrPulls,NotFound,GetLabelInRepoByID,IsErrLabelNotExist,GetLabelsInRepoByIDs,LoadAttributes,GetLabelsByIssueID,RemoveLabel,AddLabels,JSON]
Remove a label from an issue Replace an issue s labels.
I think org labels should be considered here as well?
@@ -653,10 +653,9 @@ public class IsmReaderImpl<V> extends IsmReader<V> { try (Closeable readerCloser = readCounter.enter()) { int shardId = coder.encodeAndHash( - ImmutableList.builder() - .addAll(keyComponents) - .addAll(additionalKeyComponents) - .build(), + Collections.unmodifiableList( + Stream.concat(keyComponents.stream(), additionalKeyComponents.stream()) + .collect(Collectors.toList())), keyBytes); return getBlock(keyBytes, shardId, readCounter).get(keyBytes); }
[IsmReaderImpl->[ShardAwareIsmPrefixReaderIterator->[getLast->[initializeForKeyedRead,getBlock],start->[start],getCurrent->[getCurrent],close->[close],advance->[start,advance],iterator],WithinShardIsmReaderIterator->[start->[advance],getCurrent->[get]],WithinShardIsmPrefixReaderIterator->[start->[advance],advance->[fetch,iterator],getCurrent->[get]],IsmShardKey->[hashCode->[hashCode],toString->[toString],equals->[equals]],LazyIsmPrefixReaderIterator->[start->[start,getKeyComponents,overKeyComponents],close->[close],advance->[advance],getCurrent->[getCurrent]],closeIfPresent->[close],IsmCacheLoader->[call->[call]],initializeBloomFilterAndIndexPerShard->[iterator],overKeyComponents->[iterator,overKeyComponents],openIfNeeded->[get],toString->[toString],IsmPrefixReaderIteratorImpl->[get->[get],getLast->[initializeFooterAndShardIndex,get,bloomFilterMightContain,fetch,initializeForKeyedRead]],initializeForKeyedRead->[IsmShardKey,initializeFooterAndShardIndex,initializeBloomFilterAndIndexPerShard,toString],position->[position],getBlock->[initializeFooterAndShardIndex,get,bloomFilterMightContain,fetch,initializeForKeyedRead],open->[open]]]
Get a windowed value from the cache.
Why is this change necessary?
@@ -33,6 +33,7 @@ class ComputeDragProcess(KratosMultiphysics.Process): "model_part_name" : "", "interval" : [0.0, 1e30], "write_drag_output_file" : true, + "output_folder" : "TimeBasedAsciiResults", "print_drag_to_screen" : false, "write_buffer_size" : -1, "print_format" : ""
[ComputeDragProcess->[ExecuteFinalize->[GetCommunicator,close],ExecuteFinalizeSolutionStep->[write,_PrintToScreen,str,format,GetCommunicator,_GetCorrespondingDragForce],_GetFileHeader->[Exception],_PrintToScreen->[Exception],_GetCorrespondingDragForce->[Exception],ExecuteInitialize->[_GetFileHeader,TimeBasedAsciiFileWriterUtility,GetCommunicator,Parameters,AddValue,file_handler_params,Vector,params],__init__->[Has,ValidateAndAssignDefaults,super,Parameters,params,Exception]],Factory->[Exception,ComputeDragProcess,type],CheckRegisteredApplications]
Initialize the object with the given model and parameters.
why not "DragResults"?
@@ -11,7 +11,9 @@ def read_invalid_file(): hdfs_path = 'hdfs:///user/jenkins/tests/invalid' h2o.import_file(hdfs_path) assert False, "Read of file, which does not exists was sucessfull. This is impossible" - except ValueError: + except ValueError as ve: + print(ve) + assert str(ve) == "ImportFiles of 'hdfs:///user/jenkins/tests/invalid' failed on [u\"Path does not exist: 'hdfs:/user/jenkins/tests/invalid'\"]" pass if __name__ == "__main__":
[read_invalid_file->[import_file],standalone_test,read_invalid_file,insert,join]
Read invalid file.
I guess the // gets removed on server side, just weird to have this server processing necessary to get the test passing.
@@ -117,6 +117,10 @@ namespace System.Xml.Schema Write(writer, null); } + [DynamicDependency(TrimmerConstants.PublicMembers, typeof(XmlSchema))] + [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode", + Justification = "Supressing warning since the right members of XmlSchema will be preserved by the " + + "DynamicDependency attribute.")] public void Write(XmlWriter writer, XmlNamespaceManager? namespaceManager) { XmlSerializer serializer = new XmlSerializer(typeof(XmlSchema));
[XmlSchema->[GetExternalSchemasList->[GetExternalSchemasList],Read->[Read,XmlSchema],CompileSchemaInSet->[Compile],Write->[Write]]]
Writes this object to the given writer.
This is not enough unfortunately. Serialization is recursive - the dynamic dependency you added will only work on `XmlSchema` itself, not any of the other types the serializer will get to by recursively serialization data on the schema. @sbomer and/or @LakshanF are working on adding some support for `XmlSerializer` based on custom steps. I *think* it could make this work if fully implemented (the `XmlSchema` has `XmlRootAttribute` on it, so it's discoverable by the custom step). I don't know how far we've got though.
@@ -31,7 +31,11 @@ public class JacksonMessageBodyReader extends AbstractJsonMessageBodyReader { @Override public Object readFrom(Class<Object> type, Type genericType, Annotation[] annotations, MediaType mediaType, MultivaluedMap<String, String> httpHeaders, InputStream entityStream) throws IOException, WebApplicationException { - throw new IllegalStateException("Should never be called"); + try { + return doReadFrom(type, genericType, entityStream); + } catch (MismatchedInputException e) { + throw new WebApplicationException(Response.Status.BAD_REQUEST); + } } @Override
[JacksonMessageBodyReader->[doReadFrom->[readValue],readFrom->[abortWith,build,IllegalStateException,getInputStream,doReadFrom],reader]]
Override readFrom to handle a missing entity.
Looks like the original exception is lost here
@@ -581,7 +581,6 @@ public class QueueTest { * Make sure that the running build actually carries an credential. */ @Test public void accessControl() throws Exception { - r.configureUserRealm(); FreeStyleProject p = r.createFreeStyleProject(); QueueItemAuthenticatorConfiguration.get().getAuthenticators().add(new MockQueueItemAuthenticator(Collections.singletonMap(p.getFullName(), alice))); p.getBuildersList().add(new TestBuilder() {
[QueueTest->[queueApiOutputShouldBeFilteredByUserPermission->[equals],TestTask->[hashCode->[hashCode],createExecutable->[run->[doRun]]],pendingsConsistenceAfterErrorDuringMaintain->[equals,getName,getDisplayName],testBlockBuildWhenUpstreamBuildingLock->[save],permissionSensitiveSlaveAllocations->[getACL->[getACL]],shouldBeAbleToBlockFlyweightTaskAtTheLastMinute->[equals,getDisplayName],fileItemPersistence->[FileItemPersistenceTestServlet],BlockDownstreamProjectExecution->[canTake->[equals]],waitForStart->[waitForStart],waitForStartAndCancelBeforeStart->[waitForStart]]]
Test whether Jenkins has access control.
These code was no-op because it is ignoring the return value.
@@ -66,6 +66,15 @@ export function isCanary(win) { return !!(win.AMP_CONFIG && win.AMP_CONFIG.canary); } +/** + * Returns runtime type, e.g., canary, control, or production. + * @param {!Window} win + * @return {string} + */ +export function getRuntimeType(win) { + return (win.AMP_CONFIG && win.AMP_CONFIG.type) || 'unknown'; +} + /** * Enable experiments detailed in an origin trials token iff the token is
[No CFG could be retrieved]
Enables the experiment from a given token. - Encodes the token and returns a Promise.
I'm adding @erwinmombay and @rsimha-amp for review since they own this overall. My recommendation, however, is to call it something other than "runtime type" - we already have that concept for "single doc" vs "shadow mode" vs "in-a-box". Maybe "binary type"? Or "coarse version"? Or???
@@ -245,13 +245,16 @@ namespace System.IO } } - private unsafe void ParseEventBufferAndNotifyForEach(byte[] buffer) + private unsafe void ParseEventBufferAndNotifyForEach(byte[] buffer, uint numBytes) { Debug.Assert(buffer != null); Debug.Assert(buffer.Length > 0); + Debug.Assert(numBytes <= buffer.Length); fixed (byte* b = buffer) { + byte* pBufferLimit = b + numBytes; + Interop.Kernel32.FILE_NOTIFY_INFORMATION* info = (Interop.Kernel32.FILE_NOTIFY_INFORMATION*)b; ReadOnlySpan<char> oldName = ReadOnlySpan<char>.Empty;
[FileSystemWatcher->[Monitor->[IsHandleInvalid],ReadDirectoryChangesCallback->[IsHandleInvalid,Monitor]]]
This is the callback which is called when a file or directory change is received. This method is called when a file action is about to be handled. It asserts that no This method is called when a file or directory change has been detected.
This should be stronger than an Assert. Since we're guarding against Windows giving us garbage we should guard against this being invalid as well. We should take min of numBytes and buffer.Length below.
@@ -3,7 +3,7 @@ var $controllerMinErr = minErr('$controller'); -var CNTRL_REG = /^(\S+)(\s+as\s+(\w+))?$/; +var CNTRL_REG = /^(\S+)(\s+as\s+([\w$]+))?$/; function identifierForController(controller, ident) { if (ident && isString(ident)) return ident; if (isString(controller)) {
[No CFG could be retrieved]
Provides a controller provider for the action. A string which is used to retrieve the controller constructor.
Perhaps only allow `$` at the beginning or is that being too strict?
@@ -410,6 +410,7 @@ define([ */ CesiumMath.toRadians = function(degrees) { //>>includeStart('debug', pragmas.debug); + Check.defined('degrees', degrees); if (!defined(degrees)) { throw new DeveloperError('degrees is required.'); }
[No CFG could be retrieved]
Integrity number of radians in degrees and arc second. This function converts a negative value to a positive value in the range [ - Math. PI.
remove this developer error
@@ -43,16 +43,13 @@ public class FilebasedSchemaProvider extends SchemaProvider { private static final String TARGET_SCHEMA_FILE_PROP = "hoodie.deltastreamer.schemaprovider.target.schema.file"; } - private final FileSystem fs; - private final Schema sourceSchema; private Schema targetSchema; public FilebasedSchemaProvider(TypedProperties props) { - super(props); StreamerUtil.checkRequiredProperties(props, Collections.singletonList(Config.SOURCE_SCHEMA_FILE_PROP)); - this.fs = FSUtils.getFs(props.getString(Config.SOURCE_SCHEMA_FILE_PROP), StreamerUtil.getHadoopConf()); + FileSystem fs = FSUtils.getFs(props.getString(Config.SOURCE_SCHEMA_FILE_PROP), StreamerUtil.getHadoopConf()); try { this.sourceSchema = new Schema.Parser().parse(fs.open(new Path(props.getString(Config.SOURCE_SCHEMA_FILE_PROP)))); if (props.containsKey(Config.TARGET_SCHEMA_FILE_PROP)) {
[FilebasedSchemaProvider->[getTargetSchema->[getTargetSchema]]]
Provides a schema provider that reads off files on DFS. Returns the target schema if any.
Will we always reuse the config option from `deltastreamer`? Let us think about it with more insight?
@@ -33,7 +33,7 @@ class PodcastTag < LiquidTagBase end def render(_context) - ActionController::Base.new.render_to_string( + ApplicationController.render( partial: PARTIAL, locals: { episode: @episode,
[PodcastTag->[render->[render_to_string],parse_link->[gsub,count],fetch_podcast->[podcast_id,parse_link,find_by,last,id],raise_error->[raise],initialize->[overcast_url,android_url,new,itunes_url,to_sym,feed_url,cloudinary,fetch_podcast],attr_reader,freeze,include],register_tag]
Renders a with a partial and locals.
Had to make this change so that it can access `ApplicationHelper`. I'm going to make this change to the rest of liquid tag in a different PR.
@@ -106,6 +106,10 @@ export class AmpImg extends BaseElement { if (!IS_ESM) { guaranteeSrcForSrcsetUnsupportedBrowsers(this.img_); } + + if (AmpImg.V1() && !this.img_.complete) { + this.onReadyState(ReadyState.LOADING); + } } }
[No CFG could be retrieved]
This method is called when the element has a size attribute and has a width attribute. Create the actual image element and set up instance variables.
nonblocking nit: I explained the logic more fully in the design doc, but I think a slightly better `setReadyState`
@@ -0,0 +1,18 @@ +/* +Copyright (c) 2018 Uber Technologies, Inc. + +This source code is licensed under the MIT license found in the +LICENSE file in the root directory of this source tree. +*/ +// @flow +export {default as Slider} from './slider'; +export {default as StatefulSlider} from './stateful-slider'; +export {default as StatefulSliderContainer} from './stateful-slider-container'; +// Styled elements +export { + Root as StyledRoot, + Axis as StyledAxis, + Tick as StyledTick, + Thumb as StyledThumb, +} from './styled-components'; +export {} from './constants';
[No CFG could be retrieved]
No Summary Found.
`StyledTickBar` was added in the RFC but is not exported here
@@ -1206,6 +1206,7 @@ known_config_types = ( 'MNE_SKIP_TESTING_DATASET_TESTS', 'MNE_STIM_CHANNEL', 'MNE_USE_CUDA', + 'MNE_TESTING', 'SUBJECTS_DIR', )
[set_config->[get_config_path,warn,_load_config],get_config->[get_config_path,_load_config],estimate_rank->[warn],object_diff->[_sort_keys,object_diff],_load_config->[warn],_get_stim_channel->[get_config],deprecated->[_decorate_class->[deprecation_wrapped->[warn]],_decorate_fun->[deprecation_wrapped->[warn]]],_get_ftp->[ProgressBar],_chunk_write->[update_with_increment_value],warn->[warn],check_fname->[warn],object_hash->[object_hash,_sort_keys],md5sum->[update],_check_mayavi_version->[check_version],set_log_file->[warn,WrapStdOut],get_config_path->[_get_extra_data_path],requires_nibabel->[has_nibabel],ProgressBar->[update_with_increment_value->[update]],_get_http->[ProgressBar,update_with_increment_value],run_subprocess->[warn],random_permutation->[check_random_state],compute_corr->[_get_fast_dot],catch_logging->[__exit__->[set_log_file]],_TempDir->[__new__->[__new__]],pformat->[_FormatDict]]
This function is used to configure memmap of large arrays. Load a MNE - Python config file and return a dictionary.
We have others of the form `MNE_SKIP_*`, so maybe `MNE_SKIP_FLASH_CALL` or something would be better. Although in this case, we want the default behavior when running `nosetests mne/tests/test_bem.py` to skip the FreeSurfer call, since it takes forever. Your current code will still make the call to FreeSurfer by default (although you have made AppVeyor and Travis skip it, _developers_ will still be haunted by the slowness).
@@ -1269,3 +1269,8 @@ func templateSafeString(value string) error { _, err := strconv.Unquote(`"` + value + `"`) return err } + +func isPem(data []byte) bool { + block, _ := pem.Decode(data) + return block != nil +}
[loadIngresses->[Itoa,Join,GetService,Warn,shouldProcessIngress,HasPrefix,GetIngresses,Sprintf,FormatInt,Warnf,JoinHostPort,updateIngressStatus,Errorf,GetEndpoints,getWeight,addGlobalBackend,Debugf],Init->[Init],loadConfig->[Error,GetConfiguration],newK8sClient->[Infof,Sprintf,Parse,Errorf,Getenv],updateIngressStatus->[GetService,New,Errorf,UpdateIngressStatus,Split,Debugf],Provide->[loadIngresses,NewTimer,OperationWithRecover,loadConfig,Go,Duration,DeepEqual,WatchAll,newK8sClient,Errorf,RetryNotify,NewBackOff,NewExponentialBackOff,Get,Sleep,Set,Debugf],addGlobalBackend->[GetService,Sprintf,New,FormatInt,JoinHostPort,String,Errorf,GetEndpoints,HasPrefix],TrimRight,GetSecret,Warnf,HasCustomHeadersDefined,Strings,Has,Error,Unquote,New,Errorf,NewScanner,TrimSpace,Debugf,Text,Join,Contains,ToLower,FileOrContent,Scan,Split,TrimLeft,EqualFold,NewReader,Sprintf,Unmarshal,IntValue,Replace,String,HasSecureHeadersDefined]
Get the id of the node.
since according to RFC 7468 PEM blocks must begin with `-----BEGIN`, could you not first do a comparison to the first bytes in the slice to see if they contain a valid header, and fail fast without having to decode the whole dataset first?
@@ -224,11 +224,11 @@ if ($device['os'] === 'f5' && (version_compare($device['version'], '11.2.0', '>= } } -if ($config['enable_ports_etherlike']) { - echo 'dot3Stats '; - $port_stats = snmpwalk_cache_oid($device, 'dot3StatsEntry', $port_stats, 'EtherLike-MIB'); -} elseif ($device['os'] != 'asa') { +if ($device['os'] != 'asa') { echo 'dot3StatsDuplexStatus'; + if ($config['enable_ports_poe'] || $config['enable_ports_etherlike']) { + $port_stats = snmpwalk_cache_oid($device, 'dot3StatsIndex', $port_stats, 'EtherLike-MIB'); + } $port_stats = snmpwalk_cache_oid($device, 'dot3StatsDuplexStatus', $port_stats, 'EtherLike-MIB'); }
[addDataset]
Get the network - specific information for a given device. ADSL - LINE - MIB.
We don't use any of this table.
@@ -286,11 +286,13 @@ seeder.create_if_none(Broadcast) do } broadcast_messages.each do |type, message| + broadcastable = WelcomeNotification.create + Broadcast.create!( title: "Welcome Notification: #{type}", processed_html: message, - type_of: "Welcome", active: true, + broadcastable: broadcastable, ) end
[Seeder->[create_if_none->[none?,pluralize,puts,join]],email,create_with_users,sentence,create,new,max,bs,slug,to_a,current,at,create!,join,first,name,order,city,rand,freeze,hex_color,quote,chomp,concat,perform_async,paragraph_by_chars,username,last,company,sample,word,to_s,logo,with_index,times,paragraph,url,take,production?,open,sql,length,puts,id,each,title,zero?,create_if_none,pluck,random,say_something_smart,dev_account,add_to,add_role,find_each]
You can start a new tag page with your content. A function to create a welcome thread from a specific neccesary object.
any reason not to use `create!` here? I suspect if you _did_ use it here, then this would fail, since the `WelcomeNotification` would want/need the `Broadcast` to exist before the `WelcomeNotification` was created
@@ -763,6 +763,13 @@ def plot_trans(info, trans='auto', subject=None, subjects_dir=None, opacity=alphas[key], figure=fig) if key != 'helmet': surface.actor.property.backface_culling = True + if brain and 'lh' not in surfs: # one layer sphere + assert bem['coord_frame'] == FIFF.FIFFV_COORD_HEAD + center = bem['r0'].copy() + if coord_frame == 'mri': + center = apply_trans(head_mri_t, center) + mlab.points3d(*center, scale_factor=0.01, color=colors['lh'], + opacity=alphas['lh']) # plot points defaults = DEFAULTS['coreg']
[_plot_mpl_stc->[_handle_time,_limits_to_control_points,_smooth_plot],plot_source_estimates->[_plot_mpl_stc,_limits_to_control_points,_handle_time],_sensor_shape->[_make_tris_fan],_dipole_changed->[_plot_dipole],plot_trans->[_fiducial_coords,_create_mesh_surf]]
Plots the head sensor and source space alignment in 3D. Plot a single - part of a sequence of CNA - specific objects. Missing nanomagical index. Return a object if the source spaces have a non - zero n - node cycles.
You aren't using the standard system here -- you should apply `head_trans` here (with no conditional). The current code won't work, for example, when the user plots in the MEG device coord frame.
@@ -410,8 +410,12 @@ zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res) &child, &children); if (children == 0) { - char *path = zpool_vdev_name(g_zfs, zhp, nvroot, B_FALSE); - fnvlist_add_boolean(res, path); + char *path = zpool_vdev_name(g_zfs, zhp, nvroot, + VDEV_NAME_PATH); + + if (strcmp(path, VDEV_TYPE_INDIRECT) != 0) + fnvlist_add_boolean(res, path); + free(path); return; }
[zpool_do_clear->[usage],zpool_do_checkpoint->[usage],list_callback->[print_list_stats],zpool_do_detach->[usage],zpool_do_reopen->[usage],zpool_do_iostat->[print_cmd_columns,usage],zpool_do_split->[usage],zpool_do_add->[usage],zpool_do_create->[usage],zpool_do_online->[usage],zpool_do_resilver->[usage],zpool_do_export->[usage],main->[usage],zpool_do_offline->[usage],zpool_do_labelclear->[usage],zpool_do_initialize->[usage],zpool_do_reguid->[usage],print_list_stats->[print_list_stats],void->[print_iostat_labels,print_cmd_columns,usage],zpool_do_upgrade->[usage],zpool_do_status->[usage],zpool_do_remove->[usage],zpool_do_history->[usage],zpool_do_list->[usage],zpool_do_get->[usage],zpool_do_import->[usage],zpool_do_destroy->[usage],zpool_do_scrub->[usage],int->[usage],status_callback->[print_cmd_columns],print_iostat->[print_cmd_columns],zpool_do_set->[usage],zpool_do_events->[usage]]
collects all leaves of the node in the zpool.
I think it is good that we are changing this function to not include indirect vdevs, at the same time I'm thinking if that's something that we should explicitly test though. Just thinking out loud.
@@ -727,7 +727,12 @@ function conversation_fetch_comments($thread_items, $pinned) { && ($row['thr-parent'] == $row['parent-uri']) && ($row['received'] > $received) && Contact::isSharing($row['author-id'], $row['uid'])) { $direction = ['direction' => 3, 'title' => DI::l10n()->t('%s reshared this.', $row['author-name'])]; - $actor = ['link' => $row['author-link'], 'avatar' => $row['author-avatar'], 'name' => $row['author-name']]; + + $author = ['uid' => 0, 'id' => $row['author-id'], + 'network' => $row['author-network'], 'url' => $row['author-link']]; + $url = '<a href="'. Contact::magicLinkByContact($author) .'">' . htmlentities($row['author-name']) . '</a>'; + + $actor = ['url' => $url, 'link' => $row['author-link'], 'avatar' => $row['author-avatar'], 'name' => $row['author-name']]; $received = $row['received']; }
[localize_item->[t,match,attributes],conversation_fetch_comments->[t,get],item_photo_menu->[t],builtin_activity_puller->[match],conv_get_blocklist->[get],conv_sort->[get],conversation_add_children->[get],format_like->[t],visible_activity->[match,isHidden],status_editor->[t,getQueryString,get],conversation->[determineCategoriesTerms,getCommand,get,registerStylesheet,registerFooterScript,addParent,getQueryString,remove,getTemplateData,t]]
Fetch comments from a conversation return array of comments.
The href value must be HTML encoded as well.
@@ -133,8 +133,7 @@ class GUFunc(object): # at this point we know the gufunc is a dynamic one ewise = self._get_ewise_dtypes(args) if not (self.ufunc and ufunc_find_matching_loop(self.ufunc, ewise)): - self._is_dynamic = True sig = self._get_signature(*args) self.add(sig) self.build_ufunc() - return self.ufunc(*args) + return self.ufunc(*args, **kwargs)
[GUFunc->[__call__->[_num_args_match,_get_signature,build_ufunc,add,_get_ewise_dtypes],_get_signature->[_get_ewise_dtypes],build_ufunc->[build_ufunc],add->[add]]]
Call the underlying gufunc with the given arguments.
This is odd. Is this fixing an oversight unrelated to kwarg processing?
@@ -1412,7 +1412,7 @@ Generator.prototype.generateKeyStore = function() { `-dname "CN=Java Hipster, OU=Development, O=${this.packageName}, L=, ST=, C="` , function(code) { if (code !== 0) { - parent.env.error(chalk.red(`\nFailed to create a KeyStore with \'keytool\'`), code); + parent.env.error(chalk.red('\nFailed to create a KeyStore with \'keytool\''), code); } else { parent.log(chalk.green(`\nKeyStore '${keyStoreFile}' generated successfully.\n`)); }
[No CFG could be retrieved]
Generates a new KeyStore. ██████ ██║ ██� �═╝ �═�.
we should have used `parent.error` here without needing to use chalk here. could you plz change it as part of this PR
@@ -19,11 +19,8 @@ use Sulu\Bundle\MediaBundle\Entity\MediaRepositoryInterface; use Sulu\Bundle\MediaBundle\Media\Exception\CollectionNotFoundException; use Sulu\Bundle\MediaBundle\Media\Exception\CollectionTypeNotFoundException; use Sulu\Bundle\MediaBundle\Media\FormatManager\FormatManagerInterface; -use Sulu\Component\Rest\ListBuilder\Doctrine\FieldDescriptor\DoctrineJoinDescriptor; use Sulu\Component\Rest\ListBuilder\Doctrine\FieldDescriptor\DoctrineFieldDescriptor; -use Doctrine\Common\Persistence\ObjectManager; -use Sulu\Bundle\MediaBundle\Entity\Collection as CollectionEntity; -use Sulu\Bundle\MediaBundle\Api\Collection; +use Sulu\Component\Rest\ListBuilder\Doctrine\FieldDescriptor\DoctrineJoinDescriptor; use Sulu\Component\Security\Authentication\UserRepositoryInterface; class DefaultCollectionManager implements CollectionManagerInterface
[DefaultCollectionManager->[modifyCollection->[getById]]]
<?php Create a new object from a non - empty array. Provides access to the object manager and the user repository.
Would it be a lot of work to rename this to just to `CollectionManager`? We never use this Default-prefix anywhere else...
@@ -77,14 +77,16 @@ func (o *CreateRouteSubcommandOptions) Complete(f kcmdutil.Factory, cmd *cobra.C return err } - o.KubeClient, err = f.ClientSet() + clientConfig, err := f.ToRESTConfig() if err != nil { return err } - clientConfig, err := f.ToRESTConfig() + + o.CoreClient, err = corev1client.NewForConfig(clientConfig) if err != nil { return err } + o.Client, err = routev1client.NewForConfig(clientConfig) if err != nil { return err
[Complete->[ToRESTConfig,GetDryRunFlag,ToRawKubeConfigLoader,NewForConfig,ToRESTMapper,Namespace,ClientSet,ToPrinter,Complete],DefaultSubCommandRun,Sprintf,LongDesc,Errorf,WithTypeSetter,AddCommand,NewPrintFlags]
Complete completes the command line options for CreateRouteSubcommand.
How about using more narrow client? I'm shooting core (without looking at code), which should suffice here.
@@ -432,7 +432,7 @@ func hasOrgVisible(e Engine, org *User, user *User) bool { return true } - if org.Visibility == structs.VisibleTypePrivate && !org.isUserPartOfOrg(e, user.ID) { + if org.IsOrganization() && (org.Visibility == structs.VisibleTypePrivate || user.IsRestricted) && !org.isUserPartOfOrg(e, user.ID) { return false } return true
[RepoIDs->[cond],MirrorRepos->[MirrorRepoIDs],CountRepos->[cond],accessibleReposEnv->[getUserTeamIDs],GetUserTeamIDs->[getUserTeamIDs],MirrorRepoIDs->[cond],GetTeams->[getTeams],GetUserTeams->[getUserTeams],GetTeam->[getTeam],getOwnerTeam->[getTeam],GetOwnerTeam->[getOwnerTeam],Repos->[RepoIDs],RemoveOrgRepo->[removeOrgRepo],getOwnerTeam]
GetOrgByName returns organization by given name. Delete deletes the user and unit if it exists.
Why is `org.IsOrganization()` needed?
@@ -944,6 +944,10 @@ public class ExpressionAnalyzer windowFunctions.add(NodeRef.of(node)); } + if (node.isDistinct() && extractAggregateFunctions(List.of(node), metadata).isEmpty()) { + throw semanticException(NOT_SUPPORTED, node, "DISTINCT is not supported for non-aggregation functions"); + } + if (node.getFilter().isPresent()) { Expression expression = node.getFilter().get(); process(expression, context);
[ExpressionAnalyzer->[analyzeExpressions->[getTypeOnlyCoercions,getSubqueryInPredicates,getScalarSubqueries,getWindowFunctions,analyze,getExpressionTypes,getExpressionCoercions,getExistsSubqueries,getQuantifiedComparisons,getColumnReferences],Context->[getFunctionInputTypes->[isExpectingLambda],notInLambda->[Context],inLambda->[Context],getFieldToLambdaArgumentDeclaration->[isInLambda],expectingLambda->[Context],notExpectingLambda->[Context]],create->[ExpressionAnalyzer,create],analyzeExpression->[getTypeOnlyCoercions,getResolvedFunctions,getSubqueryInPredicates,getScalarSubqueries,getWindowFunctions,analyze,getReferencedFields,getExpressionTypes,getExpressionCoercions,getLambdaArgumentReferences,getExistsSubqueries,getQuantifiedComparisons,getTableColumnReferences,getColumnReferences],Visitor->[visitSearchedCaseExpression->[setExpressionType,process],getVarcharType->[process],visitExists->[setExpressionType],visitBinaryLiteral->[setExpressionType],visitSubscriptExpression->[setExpressionType,process],visitFunctionCall->[setExpressionType,getExpressionType,process],visitDereferenceExpression->[setExpressionType,handleResolvedField,process],visitLikePredicate->[setExpressionType,process],visitLogicalBinaryExpression->[setExpressionType],visitIsNotNullPredicate->[setExpressionType,process],visitDoubleLiteral->[setExpressionType],visitSymbolReference->[setExpressionType],visitTimeLiteral->[setExpressionType],visitInListExpression->[setExpressionType],visitCurrentUser->[setExpressionType],visitCoalesceExpression->[setExpressionType],visitParameter->[setExpressionType,process],visitStringLiteral->[setExpressionType],visitLambdaExpression->[setExpressionType,process],getCallArgumentTypes->[setExpressionType,ExpressionAnalyzer,getExpressionType],visitAtTimeZone->[setExpressionType,process],visitCharLiteral->[setExpressionType],visitBooleanLiteral->[setExpressionType],visitNullLiteral->[setExpressionType],visitFormat->[setExpressionType],visitGroupingOperation->[setExpressionType,process],visitIsNullPredicate->[setExpressionType,process],visitExtract->[setExpressionType,process],coerceType->[coerceType,process],visitArithmeticUnary->[setExpressionType,process],visitTryExpression->[setExpressionType,process],visitLongLiteral->[setExpressionType],visitSubqueryExpression->[setExpressionType,analyze],visitIfExpression->[setExpressionType,process],visitQuantifiedComparisonExpression->[setExpressionType,process],visitBindExpression->[setExpressionType,process],visitGenericLiteral->[setExpressionType],coerceCaseOperandToToSingleType->[process],visitIntervalLiteral->[setExpressionType],getOperator->[setExpressionType,process],visitCurrentTime->[setExpressionType],process->[process],visitSimpleCaseExpression->[setExpressionType,process],visitTimestampLiteral->[setExpressionType],visitCast->[setExpressionType,process],visitInPredicate->[setExpressionType,process],visitFieldReference->[handleResolvedField],visitCurrentPath->[setExpressionType],visitBetweenPredicate->[setExpressionType,process],visitDecimalLiteral->[setExpressionType],visitArrayConstructor->[setExpressionType],visitNullIfExpression->[setExpressionType,process],coerceToSingleType->[process],handleResolvedField->[setExpressionType],visitNotExpression->[setExpressionType],visitRow->[setExpressionType]],createWithoutSubqueries->[ExpressionAnalyzer,createWithoutSubqueries]]]
Determines if a function call has a window function or not. Resolves a function call node and checks if it can be found. DISTINCT can only be applied to comparable types.
The error should be `FUNCTION_NOT_AGGREGATE` instead of `NOT_SUPPORTED`
@@ -823,7 +823,7 @@ function photos_post(App $a) { $limit = service_class_fetch($a->data['user']['uid'],'photo_upload_limit'); if ($limit) { - $r = q("select sum(octet_length(data)) as total from photo where uid = %d and scale = 0 and album != 'Contact Photos' ", + $r = q("SELECT SUM(OCTET_LENGTH(`data`)) AS `total` FROM `photo` WHERE `uid` = %d AND `scale` = 0 AND `album` != 'Contact Photos'", intval($a->data['user']['uid']) ); $size = $r[0]['total'];
[photos_content->[set_pager_itemspage,set_pager_total],photos_post->[scaleImage,getExt,getWidth,rotate,getHeight,get_hostname,orient,imageString,store,is_valid]]
post a photo to the current user This function is called by the page_setup_photo_callback function. It checks if This function is used to delete a photo album and all its photos. It will also check remove the associated photos and remove the associated item This function is called by the administration functions to update the photo albums cache and the DELETE THE OBJECTS WHERE the user is the owner of the resource and the resource is the This function is called from the action GET to retrieve a photo object from the database.
Standards: Can you please add a space after commas?
@@ -82,14 +82,15 @@ public class DbTaskStorage implements TaskStorage { handle.createStatement( String.format( - "INSERT INTO %s (id, created_date, payload, status_code, status_payload) VALUES (:id, :created_date, :payload, :status_code, :status_payload)", + "INSERT INTO %s (id, created_date, datasource, payload, active, status_payload) VALUES (:id, :created_date, :datasource, :payload, :active, :status_payload)", dbConnectorConfig.getTaskTable() ) ) .bind("id", task.getId()) .bind("created_date", new DateTime().toString()) + .bind("datasource", task.getDataSource()) .bind("payload", jsonMapper.writeValueAsString(task)) - .bind("status_code", status.getStatusCode().toString()) + .bind("active", status.isRunnable() ? 1 : 0) .bind("status_payload", jsonMapper.writeValueAsString(status)) .execute();
[DbTaskStorage->[addAuditLog->[withHandle],getRunningTaskIds->[withHandle],getAuditLogs->[withHandle],removeLock->[withHandle],setStatus->[withHandle],getStatus->[withHandle],addLock->[withHandle],getTask->[withHandle],getLocksWithIds->[withHandle]]]
Inserts a new task in the task table.
"runnable" and "active" don't always mean the same thing to me (runnable means it could be run but isn't necessarily running. active often means that it is actively running). Is there more explanation of the semantics/a different (and hopefully better) set of namings that could convey what the state is?
@@ -0,0 +1,16 @@ +package io.quarkus.arc.deployment; + +import io.quarkus.runtime.annotations.ConfigGroup; +import io.quarkus.runtime.annotations.ConfigItem; + +@ConfigGroup +public class ArcTestConfig { + + /** + * If set to true then disable {@code StartupEvent} and {@code ShutdownEvent} observers declared on application bean classes + * during the tests. + */ + @ConfigItem(defaultValue = "false") + public boolean disableApplicationLifecycleObservers; + +}
[No CFG could be retrieved]
No Summary Found.
I am wondering if it makes sense to disable the `ShutdownEvent` as well
@@ -278,10 +278,17 @@ if ( ! class_exists( 'Jetpack_Simple_Payments_Widget' ) ) { $errors->add( 'post_title', __( "People need to know what they're paying for! Please add a brief title.", 'jetpack' ) ); } - if ( empty( $params['price'] ) || floatval( $params['price'] ) <= 0 ) { + if ( empty( $params['price'] ) || ! is_numeric( $params['price'] ) || floatval( $params['price'] ) <= 0 ) { $errors->add( 'price', __( 'Everything comes with a price tag these days. Please add a your product price.', 'jetpack' ) ); } + // Japan's Yen is the only supported currency with a zero decimal precision. + $precision = strcmp( $params['currency'], 'JPY' ) === 0 ? 0 : 2; + $price_decimal_places = $this->get_decimal_places( $params['price'] ); + if ( is_null( $price_decimal_places ) || $price_decimal_places > $precision ) { + $errors->add( 'price', __( 'Invalid price', 'jetpack' ) ); + } + if ( empty( $params['email'] ) || ! is_email( $params['email'] ) ) { $errors->add( 'email', __( 'We want to make sure payments reach you, so please add an email address.', 'jetpack' ) ); }
[Jetpack_Simple_Payments_Widget->[form->[defaults],widget->[defaults],get_product_from_post->[defaults],update->[defaults,get_product_from_post,get_latest_field_value]]]
Validates the ajax request parameters.
Is there a reason why we're using `strcmp` instead of `===`?
@@ -409,17 +409,7 @@ public class MetadataInfo implements Serializable { } public String toDescString() { - return this.getMatchKey() + getMethodSignaturesString() + new TreeMap<>(getParams()); - } - - private String getMethodSignaturesString() { - SortedSet<String> methodStrings = new TreeSet(); - - Method[] methods = ClassUtils.forName(name).getMethods(); - for (Method method : methods) { - methodStrings.add(method.toString()); - } - return methodStrings.toString(); + return this.getMatchKey() + name + group + version + path + protocol + new TreeMap<>(getParams()); } public void addParameter(String key, String value) {
[MetadataInfo->[ServiceInfo->[toDescString->[getParams,getMatchKey],equals->[equals,getName,getProtocol,getVersion,getGroup],getMethodSignaturesString->[toString],hashCode->[getProtocol,getName,getVersion,getGroup],getMethodParameter->[getMethodParameter],hasMethodParameter->[getMethodParameter],getParameters,getParameter],getParameter->[getParameter],MetadataInfo]]
This method returns a String that can be used to describe this consumer.
The following two parts have the same meaning `name + group + version + path + protocol` `getServiceKey()`
@@ -362,14 +362,14 @@ public class LocalExchange bufferCount = defaultConcurrency; checkArgument(partitionChannels.isEmpty(), "Arbitrary exchange must not have partition channels"); } - else if (partitioning.equals(FIXED_HASH_DISTRIBUTION)) { - bufferCount = defaultConcurrency; - checkArgument(!partitionChannels.isEmpty(), "Partitioned exchange must have partition channels"); - } else if (partitioning.equals(FIXED_PASSTHROUGH_DISTRIBUTION)) { bufferCount = defaultConcurrency; checkArgument(partitionChannels.isEmpty(), "Passthrough exchange must not have partition channels"); } + else if (partitioning.equals(FIXED_HASH_DISTRIBUTION) || partitioning.getConnectorId().isPresent()) { + // partitioned exchange + bufferCount = defaultConcurrency; + } else { throw new IllegalArgumentException("Unsupported local exchange partitioning " + partitioning); }
[LocalExchange->[LocalExchangeFactory->[getLocalExchange->[LocalExchange]],getBufferedBytes->[getBufferedBytes],LocalExchangeSinkFactory->[createSink->[createSink],close->[sinkFactoryClosed],duplicate->[createSinkFactory],noMoreSinkFactories->[noMoreSinkFactories]]]]
Compute the number of buffers to be used for the given partitioning.
nit: again is moving out of here and after FIXED_PASSTHROUGH_DISTRIBUTION case intentional?
@@ -463,6 +463,7 @@ public class PlatformLevel4 extends PlatformLevel { org.sonar.server.rule.ws.ShowAction.class, org.sonar.server.rule.ws.CreateAction.class, org.sonar.server.rule.ws.DeleteAction.class, + org.sonar.server.rule.ws.ListAction.class, TagsAction.class, RuleMapping.class, ActiveRuleCompleter.class,
[PlatformLevel4->[configureLevel->[add,newMetadata,get,addAll],start->[start,getContainer,getComponentByType,installExtensions]]]
Configures the level. This is a private method that can be called by the server side to provide a list of This method is called from the server side of the WsServerFactory. It is called by This method is called by the server code to register a bunch of actions that are not part This is a public method that can be accessed by the client.
it would be nice to put the rule actions in a module
@@ -8,8 +8,8 @@ describe Encryption::Encryptors::AttributeEncryptor do before do allow(AppConfig.env).to receive(:attribute_encryption_key).and_return(current_key) - allow(AppConfig.env).to receive(:attribute_encryption_key_queue).and_return( - [{ key: retired_key, cost: retired_cost }].to_json, + allow(IdentityConfig.store).to receive(:attribute_encryption_key_queue).and_return( + [{ 'key' => retired_key, 'cost' => retired_cost }], ) end
[to,context,let,to_not,describe,decrypt,to_json,before,encrypt,eq,it,require,raise_error,and_return]
A basic encryption algorithm that uses a single key to encrypt some text. returns true if old key used to decrypt and we turn on new encryption.
should we set `options: { symbolize_names: true }` on this one?
@@ -16,6 +16,10 @@ class WPCOM_REST_API_V2_Endpoint_Podcast_Player extends WP_REST_Controller { * Constructor. */ public function __construct() { + if ( ! class_exists( 'Jetpack_Podcast_Helper' ) ) { + jetpack_require_lib( 'class-jetpack-podcast-helper' ); + } + $this->namespace = 'wpcom/v2'; $this->rest_base = 'podcast-player'; // This endpoint *does not* need to connect directly to Jetpack sites.
[WPCOM_REST_API_V2_Endpoint_Podcast_Player->[get_player_data->[get_player_data]]]
Initialize the object.
Just curious. Why did you remove checking the class existence?
@@ -56,7 +56,7 @@ def test_user_agent(): def test_request_url(authority): """the credential should accept an authority, with or without scheme, as an argument or environment variable""" - tenant_id = "expected_tenant" + tenant_id = "expected-tenant" access_token = "***" parsed_authority = urlparse(authority) expected_netloc = parsed_authority.netloc or authority # "localhost" parses to netloc "", path "localhost"
[test_request_url->[mock_send->[startswith,urlparse,mock_response],get_token,VisualStudioCodeCredential,dict,patch,Mock,urlparse],test_policies_configurable->[send->[build_aad_response,mock_response],get_token,Mock,VisualStudioCodeCredential,patch],test_credential_unavailable_error->[get_token,raises,VisualStudioCodeCredential,patch],test_mac_keychain_valid_value->[get_credentials,patch],test_no_obtain_token_if_cached->[get_token,VisualStudioCodeCredential,AccessToken,patch,Mock],test_segfault->[_get_refresh_token],test_mac_keychain_error->[get_token,VisualStudioCodeCredential,KeychainError,object,raises],test_redeem_token->[get_token,VisualStudioCodeCredential,assert_called_with,AccessToken,patch,Mock],test_adfs->[get_token,raises,lower,VisualStudioCodeCredential],test_no_scopes->[get_token,raises,VisualStudioCodeCredential],test_cache_refresh_token->[get_token,VisualStudioCodeCredential,AccessToken,patch,Mock],test_user_agent->[get_token,VisualStudioCodeCredential,Request,validating_transport,build_aad_response,mock_response,patch],skipif,startswith,parametrize]
test_request_url - test that the request URL is correct.
Does the constructor for `VisualStudioCodeCredential` need to validate the tenant ID as well, since it accepts a `tenant_id` keyword argument?
@@ -69,14 +69,14 @@ public class JdkVersionUtilsTestCase extends AbstractMuleTestCase assertTrue(JdkVersionUtils.isSupportedJdkVersion()); setJdkVersion("1.7"); assertTrue(JdkVersionUtils.isSupportedJdkVersion()); + setJdkVersion("1.8"); + assertTrue(JdkVersionUtils.isSupportedJdkVersion()); //not supported setJdkVersion("1.4.2"); assertFalse(JdkVersionUtils.isSupportedJdkVersion()); setJdkVersion("1.4.2_12"); assertFalse(JdkVersionUtils.isSupportedJdkVersion()); - setJdkVersion("1.8"); - assertFalse(JdkVersionUtils.isSupportedJdkVersion()); } @Test
[JdkVersionUtilsTestCase->[testRecommendedJdkVersion->[setJdkVersion],testValidateJdk5->[setJdkVersion],testValidateJdk->[setJdkVersion],testValidateJdk8->[setJdkVersion],testUndefinedJdkPreferences->[setJdkVersion],setJdkPreferences->[setManifest],testIsSupportedJdkVersion->[setJdkVersion]]]
Test if the specified JDK versions are supported.
this belongs in another PR
@@ -1339,12 +1339,12 @@ function PMA_showMessage($message, $sql_query = null, $type = 'notice', "//]]>\n" . "</script>"; } - echo $edit_link . $explain_link . $php_link . $refresh_link . $validate_link; - echo '</div>'; + $retval .= $edit_link . $explain_link . $php_link . $refresh_link . $validate_link; + $retval .= '</div>'; } - echo '</div>'; + $retval .= '</div>'; if ($GLOBALS['is_ajax_request'] === false) { - echo '<br class="clearfloat" />'; + $retval .= '<br class="clearfloat" />'; } // If we are in an Ajax request, we have most probably been called in
[PMA_showMessage->[getLevel,addMessage,display],PMA_getImage->[getPath],PMA_ajaxResponse->[getDisplay],PMA_selectUploadFile->[display],PMA_getSupportedDatatypes->[getTypeDescription,getColumns],PMA_showHint->[getLevel,getHash],PMA_getFunctionsForField->[getTypeClass,getAllFunctions,getFunctions]]
Display a message in the browser Checks if a node is missing a node and if so displays it parse the SQL query and return the base query of a node that can be used to display appends limit clause to the query base and appends limit clause to the query base Basic url query part of a node - type query Generates a link to the n - ary query box Print a single missing node in the UI Displays a link to the sql query and if necessary refreshes the link.
Reverted. Same for the above one.
@@ -339,6 +339,13 @@ func (m *MetricSet) Cleanup() { m.log.Warnf("Failed to remove KProbes on exit: %v", err) } } + if m.mountedFS != nil { + if err := m.mountedFS.unmount(); err != nil { + m.log.Errorf("Failed to umount %s: %v", m.mountedFS, err) + } else { + m.log.Debugf("Unmounted %s", m.mountedFS) + } + } } func (m *MetricSet) clockSyncLoop(interval time.Duration, done <-chan struct{}) {
[Cleanup->[Close,UninstallIf,Warnf],clockSyncLoop->[NewTicker,Stop],isKernelFunctionAvailable->[UninstallInstalled,Has,Install,Count],Run->[CreateProcess,Warn,Warnf,Wrap,Info,Done,Atoi,LostC,C,Error,clockSyncLoop,Errorf,Processes,Debug,AddUint64,Cleanup,Infof,User,Update,ErrC,String,Run],Setup->[WithTID,Wrap,WithBufferSize,WithRingSizeExponent,ApplyTemplate,WithErrBufferSize,NewPerfChannel,Strings,WithLostBufferSize,Errorf,NewTraceFS,Debugf,MonitorProbe,HasKey,Wrapf,Infof,NewTraceFSWithPath,GuessAll,UninstallIf,Update,WithTimestamp,Install,UninstallInstalled,String,isKernelFunctionAvailable],NewSystemMetricSet,DefaultMetricSet,Wrapf,IsDebug,PutUint64,UnixNano,WithNamespace,Beta,NewLogger,MustAddMetricSet,Now,Module,Uname,Err,KernelVersion,UnpackConfig,Setup]
Cleanup cleans up the metric set after the process exits.
If another application starts using kprobes after Auditbeat mounts this will it unmount? On one hand it's good to clean up something Auditbeat mounts, on the other hand we can't know if anything else is using it after Auditbeat mounts it. Not requesting any change here.
@@ -275,7 +275,14 @@ func (sg *stepGenerator) GenerateSteps(event RegisterResourceEvent) ([]Step, *re // Determine whether the change resulted in a diff. d, diffErr := sg.diff(urn, old.ID, oldInputs, oldOutputs, inputs, prov, allowUnknowns) if diffErr != nil { - return nil, result.FromError(diffErr) + // If the plugin indicated that the diff is unavailable, assume that the resource will be updated and + // report the message contained in the error. + if d.Changes == plugin.DiffUnavailable { + d.Changes = plugin.DiffSome + sg.plan.ctx.Diag.Warningf(diag.RawMessage(urn, diffErr.Error())) + } else { + return nil, result.FromError(diffErr) + } } diff = d }
[GenerateDeletes->[V,Infof],GenerateSteps->[Analyze,GetProvider,Diag,ParseReference,DependingOn,Analyzer,Errorf,Assert,Olds,issueCheckErrors,diff,IsProviderType,Infof,V,Bail,generateURN,GetAnalyzeResourceFailureError,GetDuplicateResourceURNError,FromError,Replace,Check,NewState,Goal],GenerateReadSteps->[ID,Infof,Provider,Name,generateURN,Dependencies,V,Properties,Parent,Olds,Type,NewState],ScheduleDeletes->[Infof,Res,V,DependenciesOf,Intersect],issueCheckErrors->[GetResourcePropertyInvalidValueError,GetResourceInvalidError,Name,Diag,Errorf],GeneratePendingDeletes->[V,Infof],diff->[Diff,DeepEquals]]
GenerateSteps generates the necessary steps for a checkpoint Check if a resource has a provider reference and if so return it. This function is called by the engine to analyze a resource and perform the necessary actions.
We have the URN of the provider from the provider reference, right? can we issue the warning for that URN and not this resource? I think it would make more sense for this message to be attached to the provider since the message directly pertains to it.
@@ -83,7 +83,10 @@ class PkgConfigGenerator(Generator): ["-D%s" % d for d in cpp_info.defines]])) if cpp_info.public_deps: - public_deps = " ".join(cpp_info.public_deps) + pkg_config_names = [] + for public_dep in cpp_info.public_deps: + pkg_config_names += [self.deps_build_info[public_dep].get_name("pkg_config")] + public_deps = " ".join(pkg_config_names) lines.append("Requires: %s" % public_deps) return "\n".join(lines) + "\n"
[PkgConfigGenerator->[content->[single_pc_file_contents,lower],compiler->[get_safe],single_pc_file_contents->[get_safe,format_frameworks,append,_concat_if_not_empty,extend,format_framework_paths,replace,join,_generate_dir_lines,rpath_flags]],_concat_if_not_empty->[strip,join],_generate_dir_lines->[relpath,append,startswith,normpath,isabs,enumerate]]
Generate a single PC file. Returns a string representation of the .
I think that this will break previous behavior.
@@ -1248,6 +1248,13 @@ namespace System.Security.Cryptography.X509Certificates.Tests.RevocationTests chain.ChainPolicy.RevocationFlag = X509RevocationFlag.ExcludeRoot; chain.ChainPolicy.VerificationFlags |= X509VerificationFlags.IgnoreCertificateAuthorityRevocationUnknown; + if (PlatformDetection.IsAndroid) + { + // Android stops validation at the first failure, so the end certificate would + // end up marked with PartialChain and RevocationStatusUnknown + chain.ChainPolicy.VerificationFlags |= X509VerificationFlags.AllowUnknownCertificateAuthority; + chain.ChainPolicy.VerificationFlags |= X509VerificationFlags.IgnoreEndRevocationUnknown; + } chainBuilt = chain.Build(endEntity);
[DynamicRevocationTests->[BuildPrivatePki->[BuildPrivatePki]]]
RunWithInconclusiveIntermediateRevocation.
I believe the PartialChain part is out of date.
@@ -74,12 +74,12 @@ public class ObjectDecoder extends LengthFieldBasedFrameDecoder { * if the length of the received object is greater * than this value, {@link StreamCorruptedException} * will be raised. - * @param classLoader the {@link ClassLoader} which will load the class + * @param classResolver the {@link ClassResolver} which will load the class * of the serialized object */ - public ObjectDecoder(int maxObjectSize, ClassLoader classLoader) { + public ObjectDecoder(int maxObjectSize, ClassResolver classResolver) { super(maxObjectSize, 0, 4, 0, 4); - this.classLoader = classLoader; + this.classResolver = classResolver; } @Override
[ObjectDecoder->[decode->[decode,readObject],extractFrame->[slice]]]
Decodes a sequence number from a channel buffer.
Could we keep this constructor? It could simply call `this(maxObjectSize, ClassResolvers.weakCachingResolver(classLoader))`. And then let's backport it to 3.2. I'm tempted to include this in 3.2.7. :-) Thanks a lot for a quality patch!
@@ -613,7 +613,7 @@ public class DefaultJobBundleFactory implements JobBundleFactory { // These will be closed in the reverse creation order: try (AutoCloseable envCloser = environment; AutoCloseable provisioningServer = serverInfo.getProvisioningServer(); - AutoCloseable retrievalServer = serverInfo.getRetrievalServer(); + AutoCloseable retrievalServer = serverInfo.getLegacyRetrievalServer(); AutoCloseable stateServer = serverInfo.getStateServer(); AutoCloseable dataServer = serverInfo.getDataServer(); AutoCloseable controlServer = serverInfo.getControlServer();
[DefaultJobBundleFactory->[createServerInfo->[create],prepare->[PreparedClient],getTimerReceivers->[create],WrappedSdkHarnessClient->[unref->[close],wrapping->[WrappedSdkHarnessClient]],getOutputReceivers->[create],create->[DefaultJobBundleFactory],getEnvironmentExpirationMillis->[getEnvironmentExpirationMillis],createEnvironmentCaches->[EnvironmentCacheAndLock],SimpleStageBundleFactory->[getBundle->[getTimerReceivers->[getTimerReceivers],getId->[getId],split->[split],close->[close],getInputReceivers->[getInputReceivers],getOutputReceivers,prepare,getTimerReceivers],prepare],create]]
Closes the server which is currently running.
Shouldn't we also close `retrievalServer`?
@@ -138,7 +138,7 @@ public class HotRodTestingUtils { return startHotRodServer(manager, host, port, delay, false, builder); } - public static HotRodServer startHotRodServer(EmbeddedCacheManager manager, String host, int port, long delay, Boolean perf, HotRodServerConfigurationBuilder builder) { + public static HotRodServer startHotRodServer(EmbeddedCacheManager manager, String host, int port, long delay, boolean perf, HotRodServerConfigurationBuilder builder) { log.infof("Start server in port %d", port); HotRodServer server = new HotRodServer() { @Override
[HotRodTestingUtils->[startHotRodServerWithDelay->[startHotRodServer],k->[k],withClientListener->[withClientListener,assertStatus],assertHashTopology10Received->[assertHashTopology10Received],startHotRodServer->[createTopologyCacheConfig->[createTopologyCacheConfig],startHotRodServer,serverPort],v->[v,k],assertNoHashTopologyReceived->[assertHashTopology10Received],getDefaultHotRodConfiguration->[serverPort],assertHotRodEquals->[assertHotRodEquals],assertSuccess->[assertStatus,assertSuccess]]]
Start a hot - raod server.
Thanks, darn scala conversion I missed
@@ -64,6 +64,13 @@ const getPastEvents = memoize( debug('Skipped loading event from IPFS cache.') } + // Paranoia check. + if (!instance.contract || !instance.contract.options.address) { + throw new Error( + `EventCache.getPastEvents failure. Contract ${this.prefix} missing address!` + ) + } + const requests = range(fromBlock, toBlock + 1, batchSize).map(start => limiter.schedule( args => instance.contract.getPastEvents('allEvents', args),
[No CFG could be retrieved]
Reads events from IPFS and adds them to the backend. This interface is used to define the interface for EventCache backends.
There's no `this` in this function. Use `instance`
@@ -1,6 +1,11 @@ module Admin class OverviewController < Admin::ApplicationController layout "admin" - def index; end + def index + @open_abuse_reports_count = + FeedbackMessage + .where(status: "Open", feedback_type: "abuse-reports") + .count + end end end
[OverviewController->[layout]]
Returns the index of the object.
could we make this a scope instead or a class method on Feedback message? `scope open_abuse_reports` or `def self.open_abuse_report_count` It's might easier to avoid duplication later if we don't keep this logic in the controller. The diff gets a little larger though :wink:
@@ -356,8 +356,8 @@ async function esbuildCompile(srcDir, srcFilename, destDir, options) { let map = result.outputFiles.find(({path}) => path.endsWith('.map')).text; if (options.minify) { - ({code, map} = await minify(code, map)); - map = await massageSourcemaps(map, options); + ({code, map: minifiedMap} = await minify(code)); + map = await massageSourcemaps(map, minifiedMap, options); } await Promise.all([
[No CFG could be retrieved]
Builds a single from the given input files. remap - dependencies.
nit: why the rename?
@@ -436,6 +436,12 @@ class TestMultiScaleRoIAlign: ) assert repr(t) == expected_string + @pytest.mark.parametrize("device", cpu_and_gpu()) + def test_feature_extractor(self, device): + op_obj = TestMultiScaleRoIAlign.make_obj().to(device=device) + return_nodes = get_graph_node_names(op_obj)[1][-1:] + create_feature_extractor(op_obj, return_nodes=return_nodes) + class TestNMS: def _reference_nms(self, boxes, scores, iou_threshold):
[TestPSRoIPool->[expected_fn->[get_slice],test_boxes_shape->[_helper_boxes_shape]],TestBoxIou->[test_iou->[iou_check]],TestMasksToBoxes->[test_masks_box->[masks_box_check,_create_masks,_get_image]],TestBoxArea->[test_box_area->[area_check]],TestDeformConv->[test_forward->[get_fn_args,expected_fn],test_backward->[script_func_no_mask,get_fn_args,script_func],test_wrong_sizes->[get_fn_args],test_autocast->[test_forward],expected_fn->[bilinear_interpolate]],TestRoIAlign->[test_qroi_align_multiple_images->[_make_rois],test_qroialign->[_make_rois],test_autocast->[test_forward],expected_fn->[bilinear_interpolate],test_boxes_shape->[_helper_boxes_shape]],RoIOpTester->[_helper_boxes_shape->[func],test_autocast->[test_forward]],TestGenBoxIou->[test_gen_iou->[gen_iou_check]],TestNMS->[test_nms_cuda->[_create_tensors_with_iou],test_qnms->[_create_tensors_with_iou],test_autocast->[test_nms_cuda],test_nms_ref->[_reference_nms,_create_tensors_with_iou]],TestPSRoIAlign->[expected_fn->[bilinear_interpolate],test_boxes_shape->[_helper_boxes_shape]],TestBoxConversion->[_get_box_sequences],TestRoiPool->[expected_fn->[get_slice],test_boxes_shape->[_helper_boxes_shape]]]
Test for the MSROialign repr of the nms object.
same here, unclear why we do static calls if the base-class defines `make_obj()` as a non-static method.
@@ -141,9 +141,8 @@ func (t *tether) stopReaper() { signal.Reset(syscall.SIGCHLD) // just closing the incoming channel is not going to stop the iteration - // so we use done channel to signal it - log.Debugf("Signalling the child reaper loop") - close(t.done) + // so we use the context cancellation to signal it + t.cancel() log.Debugf("Closing the reapers signal channel") close(t.incoming)
[stopReaper->[End,Reset,Debugf,Begin],childReaper->[Printf,handleSessionExit,RawSyscall,Unlock,Debug,removeChildPid,Warnf,Exited,Notify,ExitStatus,Lock,Errorf,Info,Wait4,Signaled,Debugf],Copy,SplitList,Done,Stat,IsDir,Begin,Sprintf,Contains,End,Start,TrimPrefix,Errorf,Debugf,HasPrefix,Add,Mode]
stopReaper stops the reaper loop.
I thought that was what the `cancel` function did -- close a channel?
@@ -25,6 +25,10 @@ func newEnvInitCmd() *cobra.Command { return errors.New("missing required environment name") } + if _, staterr := os.Stat(workspace.EnvPath(tokens.QName(args[0]))); staterr == nil { + return fmt.Errorf("environment '%v' already exists", args[0]) + } + return lumiEngine.InitEnv(args[0]) }), }
[InitEnv,New,RunFunc]
Initialization of environment variables.
Random aside: as I am burning the ships, I keep running into tokens. It feels like a useless abstraction in a world where we don't have a runtime. And in fact, we freely cast (like this) everywhere and seldom check invariants like I had originally intended. It feels like unidiomatic Go. Do you vote to rip and remove those too?
@@ -16,7 +16,7 @@ class Foo < AbstractFoo end end -class SomeClass +class SomeClass # error: The super class `AnotherModule::NestedModule` of `SomeClass` does not derive from `Class` include HelperMethods # ^ usage: HelperMethods extend StaticHelperMethods
[SomeClass->[include,extend],type_alias]
The file filtering logic in LSPQueryBySymbol is used to filter out the type of.
Extremely slight bummer that this error locates here when the actual offense is down below when we reopen the class. (But I guess one sees it down there as well, and I dunno if there's any way to make this not happen.)
@@ -170,9 +170,4 @@ public interface Query<T> { return withOverriddenContext(ImmutableMap.of(QueryContexts.LANE_KEY, lane)); } - - default VirtualColumns getVirtualColumns() - { - return VirtualColumns.EMPTY; - } }
[withLane->[withOverriddenContext],withPriority->[withOverriddenContext]]
withLane method.
I'm assuming this change has nothing to do with the bug fix correct?
@@ -743,8 +743,13 @@ class MachineManager(QObject): if extruder_stacks: # Add an extra entry for the global stack. - result.append({"stack": global_container_stack, "quality": result[0]["quality"], - "quality_changes": empty_quality_changes}) + global_quality = quality_manager.findQualityByQualityType(quality_type, global_machine_definition, [], global_quality = "True") + + if not global_quality: + global_quality = self._empty_quality_container + + result.append({"stack": global_container_stack, "quality": global_quality, "quality_changes": empty_quality_changes}) + return result ## Determine the quality and quality changes settings for the current machine for a quality changes name.
[MachineManager->[createMachineManager->[MachineManager],getQualityVariantId->[getQualityDefinitionId],setActiveVariant->[setActiveMaterial],_updateQualityContainer->[getQualityDefinitionId,getQualityVariantId],_updateMaterialContainer->[getQualityDefinitionId,getQualityVariantId],renameMachine->[_createUniqueName]]]
Determines the quality and quality_changes for a given quality type.
I think we should add a log entry here. It could happen that we mess things up that all extruders do have a quality profile, but the global is empty.
@@ -253,4 +253,12 @@ public interface ChannelConfig { * to detect the size of a message. */ ChannelConfig setMessageSizeEstimator(MessageSizeEstimator estimator); + + /** + * Set the {@link WriteBufferWaterMark} which is used for set the high and low + * water mark of the write buffer. + */ + ChannelConfig setWriteBufferWaterMark(WriteBufferWaterMark writeBufferWaterMark); + + WriteBufferWaterMark getWriteBufferWaterMark(); }
[No CFG could be retrieved]
Sets the message size estimate.
maybe we could `@deprecated` the old methods and `ChannelConfig` ? I think users should always us the new one.
@@ -5882,7 +5882,7 @@ inline void gcode_M17() { KEEPALIVE_STATE(IN_HANDLER); } - static void resume_print(const float& load_length = 0, const float& initial_extrude_length = 0, int max_beep_count = 0) { + static void resume_print(const float& load_length = 0, const float& initial_extrude_length = 0, int8_t max_beep_count = 0) { bool nozzle_timed_out = false; if (!move_away_flag) return;
[No CFG could be retrieved]
This is a private method that is called by the UI thread and is called by the UI This is the main method that is called when the user tries to print the image.
Should the &s be moved to be next to the variable name since you made that change to pause_print?
@@ -410,6 +410,10 @@ var nodeProps = ` "deprecations_count": { "type": "integer", "null_value": 0 + }, + "created": { + "type": "date", + "format": "strict_date_optional_time||epoch_millis" } }`
[Sprintf]
NodeState is the representation of the node - state object Node state configuration.
Adding the created date to be able to count how many nodes existed at a past time period.
@@ -22,6 +22,8 @@ namespace Dynamo.Interfaces List<string> RecentFiles { get; set; } List<string> BackupFiles { get; set; } List<string> PackageDirectoriesToUninstall { get; set; } + List<string> CustomNodeFolders { get; set; } + List<string> PackageFolders { get; set; } /// <summary> /// Call this method to serialize PreferenceSettings given the output
[No CFG could be retrieved]
This method serializes the preferences of a specific into a file.
Is it not better to use `IEnumerable` in interface definitions?
@@ -94,6 +94,8 @@ public abstract class AbstractPort implements Port { private final Lock readLock = rwLock.readLock(); private final Lock writeLock = rwLock.writeLock(); + protected PublicPort publicPort; + public AbstractPort(final String id, final String name, final ProcessGroup processGroup, final ConnectableType type, final ProcessScheduler scheduler) { this.id = requireNonNull(id); this.name = new AtomicReference<>(requireNonNull(name));
[No CFG could be retrieved]
Creates a new instance of AbstractPort.
There exists a getter and setter for this already. Should keep the member variable private.
@@ -175,6 +175,7 @@ public class ExecRemoteInterpreterProcess extends RemoteInterpreterManagedProces @Override public void waitForReady(int timeout) { + this.waitForThread = Thread.currentThread(); synchronized (this) { long startTime = System.currentTimeMillis(); long timeoutTime = startTime + timeout;
[ExecRemoteInterpreterProcess->[isRunning->[isRunning],getErrorMessage->[getErrorMessage],stop->[stop],InterpreterProcessLauncher->[onProcessFailed->[onProcessFailed],onProcessRunning->[onProcessRunning]],processStarted->[processStarted]]]
Wait for the ready state.
Wouldn't it be better to be inside the synchronized block? And is it okay to not assign `this.waitForThread` to null again?
@@ -185,9 +185,12 @@ func (m *medianFetcher) Fetch() (decimal.Decimal, error) { } } - errorRate := float64(len(fetchErrors)) / float64(len(m.fetchers)) + fetchersCount := len(m.fetchers) + fetchErrorsCount := len(fetchErrors) + errorRate := float64(fetchErrorsCount) / float64(fetchersCount) if errorRate >= 0.5 { - return decimal.Decimal{}, errors.Wrap(multierr.Combine(fetchErrors...), "majority of fetchers in median failed") + err := errors.Wrap(multierr.Combine(fetchErrors...), fmt.Sprintf("at least 50%% of the fetchers in median failed (%d/%d)", fetchErrorsCount, fetchersCount)) + return decimal.Decimal{}, err } sort.Slice(prices, func(i, j int) bool {
[Fetch->[Slice,Wrap,Fetch,Set,Add,Error,Post,New,LessThan,Errorf,Combine,WithLabelValues,NewFromInt,NewDecoder,ErrorIfCalling,NewReader,Sprintf,Result,Decode,Debugw,Div,String,Float64],String->[Sprintf,Join,String],InstrumentRoundTripperDuration,Sprintf,Valid,New,Duration,NewID,TrimSpace]
Fetch fetches the median price from all the fetcher s peers and returns the result.
Why not put the rate in the error message? Instead of saying `at least 50% of..` just say `67% of fetchers failed`
@@ -58,6 +58,15 @@ class NamespaceConfig(object): def __setattr__(self, name, value): setattr(self.namespace, name, value) + def __str__(self): + s = list() + for k, v in self.__dict__.items(): + s.append("{}='{}'".format(k, v)) + return "< NamespaceConfig {} >".format(" ".join(s)) + + def __repr__(self): + return self.__str__() + @property def server_path(self): """File path based on ``server``."""
[check_config_sanity->[,enforce_domain_sanity,ConfigurationError],NamespaceConfig->[server_path->[,urlparse],renewal_hooks_dir->[join],__getattr__->[getattr],in_progress_dir->[join],__init__->[check_config_sanity,abspath,__setattr__],renewal_configs_dir->[join],backup_dir->[join],key_dir->[join],renewal_pre_hooks_dir->[join],temp_checkpoint_dir->[join],renewal_post_hooks_dir->[join],__setattr__->[setattr],__deepcopy__->[deepcopy,type],live_dir->[join],accounts_dir->[accounts_dir_for_server_path],accounts_dir_for_server_path->[underscores_for_unsupported_characters_in_path,join],default_archive_dir->[join],renewal_deploy_hooks_dir->[join],csr_dir->[join]],implementer]
Set attribute on the object.
Mainly a choice of style and speed, `s = []` is faster. Generally, literals are better for speed and probably also more pythonic.
@@ -1755,6 +1755,10 @@ class Archiver: The version of borg, only major, minor and patch version, e.g.: 1.0.8 + If literal curly braces need to be used, double them for escaping:: + + borg create /path/to/repo::{{literal_text}} + Examples:: borg create /path/to/repo::{hostname}-{user}-{utcnow} ...
[main->[get_args,run,Archiver],with_repository->[decorator->[wrapper->[argument]]],Archiver->[do_prune->[print_error,write],do_mount->[print_error],do_check->[print_error],do_extract->[build_filter,print_warning,build_matcher],_list_archive->[_list_inner->[write],_list_inner,build_matcher],do_debug_get_obj->[write],run->[_setup_topic_debugging,prerun_checks,_setup_implied_logging],do_debug_dump_archive->[output->[write,do_indent],output],do_recreate->[print_error,write,build_matcher],_list_repository->[write],do_key_export->[print_error],do_upgrade->[write],do_benchmark_crud->[measurement_run,test_files],_info_archives->[format_cmdline],_process->[print_file_status,_process,print_warning],do_debug_dump_archive_items->[write],build_parser->[process_epilog],do_key_import->[print_error],do_diff->[contents_changed->[sum_chunk_size,fetch_and_compare_chunks],compare_archives->[update_hardlink_masters->[is_hardlink_master],compare_or_defer->[update_hardlink_masters,hardlink_master_seen,compare_items],hardlink_master_seen,print_output,update_hardlink_masters,compare_items,compare_or_defer],fetch_and_compare_chunks->[compare_chunk_contents],compare_content->[sum_chunk_size,contents_changed],compare_mode->[get_mode],compare_owner->[get_owner],compare_items->[compare_directory,get_mode,has_hardlink_master,compare_link,compare_content,compare_mode,compare_owner],build_matcher,compare_archives,print_warning],do_debug_dump_repo_objs->[write],parse_args->[parse_args,build_parser,preprocess_args],do_list->[write->[write]],do_change_passphrase_deprecated->[do_change_passphrase],do_create->[create_inner->[print_file_status,print_warning],create_inner],with_repository],main]
The full name of the machine. level 6 - level compression.
hmm, does a (non-coding) reader understand "literal" and "escaping"?
@@ -88,6 +88,9 @@ def histogram_fixed_width_bins(values, # map tensor values within the open interval value_range to {0,.., nbins-1}, # values outside the open interval will be zero or less, or nbins or more. indices = math_ops.floor(nbins_float * scaled_values, name='indices') + + if value_range[0].numpy() == value_range[1].numpy(): + indices=tf.where(math_ops.is_nan(indices), tf.ones_like(indices), indices) # Clip edge cases (e.g. value = value_range[1]) or "outliers." indices = math_ops.cast(
[histogram_fixed_width_bins->[clip_by_value,cast,truediv,name_scope,reshape,convert_to_tensor,floor,shape],histogram_fixed_width->[name_scope,_histogram_fixed_width],tf_export]
Bins the given values for a histogram into which each element of values would be bin Convert nbins to tensor.
tensor.numpy() is only available when eager execution is enabled (not while building graph functions) So please write this using array_ops.where or control_flow_ops.cond
@@ -607,8 +607,10 @@ export class FixedLayer { if (!this.transfer_ || this.fixedLayer_) { return this.fixedLayer_; } - this.fixedLayer_ = this.ampdoc.win.document.createElement('body'); - this.fixedLayer_.id = '-amp-fixedlayer'; + const doc = this.ampdoc.win.document; + const bodyId_ = doc.getElementsByTagName('body')[0].getAttribute('id'); + this.fixedLayer_ = doc.createElement('body'); + this.fixedLayer_.id = bodyId_ || '-amp-fixedlayer'; setStyles(this.fixedLayer_, { position: 'absolute', top: 0,
[No CFG could be retrieved]
Determines the fixed - layer element based on the given element definition. MISSING - INVISIBLE - INVISIBLE - INVISIBLE - INVISIBLE - INVISIBLE.
Remove `_`. It should be instead `const bodyId = doc.body.id;`
@@ -14,6 +14,10 @@ namespace Dynamo.Graph.Nodes /// </summary> public enum PortType { Input, Output }; + /// <summary> + /// PortModel represents Dynamo ports. Dynamo port is little square on node. + /// User can connect and disconnect ports and therefore change value of the node. + /// </summary> public class PortModel : ModelBase { #region private fields
[PortModel->[Disconnect->[ValidateConnections,Contains,Count,Remove,OnPortDisconnected],SetPortData->[RaisePropertyChanged,DefaultValue],Connect->[Add,OnPortConnected],DestroyConnectors->[Any,Delete],OnPortConnected->[RaisePortConnectedEvent],OnPortDisconnected->[RaisePortDisconnectedEvent],PortHeightInPixels,RaisePropertyChanged,X,Output,Height,Input,SetPortData,GetPortModelIndex,Empty,DefaultValue,NickName,GetPortVerticalOffset,Y,LineIndex,Abs,ToolTipString,DefaultValueDisabled,Width]]
A port model is a node in DynamoDb and is used by a DynamoDb region Node Origin.
PortModel represents Dynamo ports. Remove : Dynamo port is little square on node
@@ -397,12 +397,14 @@ func printComment(w io.Writer, comment, deprecationMessage, indent string) { fmt.Fprintf(w, "%s */\n", indent) } -func (mod *modContext) genPlainType(w io.Writer, name, comment string, properties []*schema.Property, input, readonly bool, level int) { +func (mod *modContext) genPlainType(w io.Writer, name, comment string, + properties []*schema.Property, input, readonly, plainType bool, level int) error { indent := strings.Repeat(" ", level) printComment(w, comment, "", indent) fmt.Fprintf(w, "%sexport interface %s {\n", indent, name) + defaults := make([]string, 0) for _, p := range properties { printComment(w, p.Comment, p.DeprecationMessage, indent+" ")
[sdkImports->[getRelativePath],typeAst->[resourceType,details,typeAst,objectType],getImportsForResource->[getTypeImports,getTypeImportsForResource],resourceType->[tokenToModName,namingContext],genResourceModule->[resourceFileName],getTypeImportsForResource->[getTypeImports],objectType->[tokenToModName,namingContext],genNamespace->[genType,details,genNamespace],hasEnums->[hasEnums],configGetter->[typeString],genConfig->[getImports,genHeader,getDefaultValue,configGetter,typeString],genFunctionOutputVersion->[genPlainType],typeString->[typeAst],genResource->[getConstValue,genAlias,getDefaultValue,genPlainType,String,typeString],gen->[genResource,genTypes,sdkImports,genFunction,getImportsForResource,isReservedSourceFileName,getImports,genHeader,add,genConfig,resourceFileName,String],genType->[details,genPlainType],genFunction->[genPlainType],genIndex->[genHeader,getRelativePath,String,sdkImports],genTypes->[genHeader,String,sdkImports,getImports],genPlainType->[typeString],genEnums->[hasEnums,genEnum],gen,details,add,String,typeString]
genPlainType generates the plain type definition for the given properties.
Would it be possible to factor out the `defaults` generation into a separate func? I'm squinting at this and I don't see how the `defaults` computation and code emission does not interact with the previous func much if at all. Also.. `genPlainType` had too many bool parameters before the change, but now after the change with `plainType: bool` parameter it does strike me as very odd, if you're doing `genPlainType`, what does `plainType: false` mean now?
@@ -16,14 +16,14 @@ from numba.core.compiler_lock import global_compiler_lock class _CFuncCompiler(_FunctionCompiler): def _customize_flags(self, flags): - flags.set('no_cpython_wrapper', True) - flags.set('no_cfunc_wrapper', False) + flags.no_cpython_wrapper = True + flags.no_cfunc_wrapper = False # Disable compilation of the IR module, because we first want to # add the cfunc wrapper. - flags.set('no_compile', True) + flags.no_compile = True # Object mode is not currently supported in C callbacks # (no reliable way to get the environment) - flags.set('enable_pyobject', False) + flags.enable_pyobject = False if flags.force_pyobject: raise NotImplementedError("object mode not allowed in C callbacks") return flags
[CFunc->[_compile_uncached->[compile],__init__->[_CFuncCompiler]]]
Customize flags.
At some point, I think some of this should be renamed so that double-negatives aren't present e.g. `flags.cpython_wrapper` is `True` if it's needed, `False` if it's not.
@@ -147,4 +147,14 @@ public class ClientCredentialsGrantType extends AbstractGrantType implements Ini { this.tokenManager = tokenManager; } + + public ProxyConfig getProxyConfig() + { + return proxyConfig; + } + + public void setProxyConfig(ProxyConfig proxyConfig) + { + this.proxyConfig = proxyConfig; + } }
[ClientCredentialsGrantType->[shouldRetry->[refreshAccessToken,getRefreshTokenWhen],refreshAccessToken->[refreshAccessToken],getRefreshTokenWhen->[getRefreshTokenWhen]]]
set token manager.
This is what I mean.
@@ -61,7 +61,15 @@ export function createShadowRoot(hostElement) { // Native support. const shadowDomSupported = getShadowDomSupportedVersion(); if (shadowDomSupported == ShadowDomVersion.V1) { - return hostElement.attachShadow({mode: 'open'}); + const shadowRoot = hostElement.attachShadow({mode: 'open'}); + if (!shadowRoot.styleSheets) { + Object.defineProperty(shadowRoot, 'styleSheets', { + get: function() { + return new StyleSheetListImpl(shadowRoot); + }, + }); + } + return shadowRoot; } else if (shadowDomSupported == ShadowDomVersion.V0) { return hostElement.createShadowRoot(); }
[No CFG could be retrieved]
Creates a shadow root for the specified host element. Creates a ShadowRoot element for a given .
Unfortunately, everywhere we use this property is via array access. E.g. `x.styleSheets[0]`. I'm thinking maybe let's use array instead? Or any other idea?
@@ -59,6 +59,8 @@ namespace Dynamo.DSEngine liveRunnerServices = new LiveRunnerServices(dynamoModel, this, geometryFactoryFileName); liveRunnerServices.ReloadAllLibraries(libraryServices.ImportedLibraries); + codeCompletionServices = new CodeCompletionServices(LiveRunnerCore); + astBuilder = new AstBuilder(dynamoModel, this); syncDataManager = new SyncDataManager();
[EngineController->[ImportLibrary->[ImportLibrary],UpdateGraph->[UpdateGraph],ShowRuntimeWarnings->[GetRuntimeWarnings],ShowBuildWarnings->[GetBuildWarnings],GetRuntimeWarnings->[GetRuntimeWarnings],LibraryLoaded->[GetFunctionGroups],ConvertNodesToCode->[ConvertNodesToCode],GetBuildWarnings->[GetBuildWarnings],Dispose->[Dispose],GetFunctionGroups->[GetFunctionGroups]]]
private static final int MAX_SHORT_VAR_COUNT ; This function is called by the library libraryCore. It disposes the library services and cle.
Just as we instantiate it here, I would recommend setting it to `null` in `EngineController.Dispose` method (this way if some code happens to access it after `EngineController` is disposed of, then things will crash and we'll find out problems in a more visible way.
@@ -389,9 +389,8 @@ public abstract class Label extends Actionable implements Comparable<Label>, Mod @Exported public List<AbstractProject> getTiedJobs() { List<AbstractProject> r = new ArrayList<>(); - for (AbstractProject<?,?> p : Jenkins.get().allItems(AbstractProject.class)) { - if(p instanceof TopLevelItem && this.equals(p.getAssignedLabel())) - r.add(p); + for (AbstractProject<?,?> p : Jenkins.get().allItems(AbstractProject.class, i -> i instanceof TopLevelItem && this.equals(i.getAssignedLabel()))) { + r.add(p); } r.sort(Items.BY_FULL_NAME); return r;
[Label->[contains->[contains],getDescription->[getNodes,getClouds],equals->[matches],hashCode->[hashCode],getTotalConfiguredExecutors->[getNodes],doChildrenContextMenu->[getNodes],isEmpty->[isEmpty],isSelfLabel->[getNodes],onBinary->[accept],isOffline->[getNodes,isOffline],getSortedNodes->[getNodes,NodeSorter],getSearchUrl->[getUrl],compareTo->[compareTo],isAssignable->[getNodes],onParen->[accept],matches->[equals,matches],getIdleExecutors->[getNodes],onNot->[accept],getBusyExecutors->[getNodes],getTotalExecutors->[getNodes],ConverterImpl->[marshal->[getExpression]],getNodes->[getNodes,matches],listAtoms->[accept],toString->[toString,getDisplayName],getTiedJobCount->[matches]]]
Get the tied jobs.
I don't think it improves performance that much, but you could stream the result from allItems() and collect it into a list, avoiding the for loop.
@@ -114,17 +114,6 @@ public final class TableElements implements Iterable<TableElement> { throwOnDuplicateNames(keyColumns, "KEY"); throwOnDuplicateNames(valueColumns, "non-KEY"); - final long numKeyColumns = keyColumns.size(); - - if (numKeyColumns > 1) { - throw new KsqlException("KSQL does not yet support multiple KEY columns"); - } - - if (numKeyColumns == 1 - && keyColumns.get(0).getType().getSqlType().baseType() != SqlBaseType.STRING) { - throw new KsqlException("KEY columns must be of type STRING: " + keyColumns.get(0).getName()); - } - final ImmutableList.Builder<TableElement> builder = ImmutableList.builder(); builder.addAll(keyColumns);
[TableElements->[stream->[stream],equals->[equals],splitByElementType->[of],build->[of,TableElements,build],toString->[toString],iterator->[iterator]]]
Build a TableElements from the given elements.
These checks here are not needed?
@@ -120,7 +120,7 @@ func (eb *ethBroadcaster) monitorEthTxs() { for { pollDBTimer := time.NewTimer(databasePollInterval) - keys, err := eb.store.Keys() + keys, err := eb.store.SendingKeys() if err != nil { logger.Error(errors.Wrap(err, "monitorEthTxs failed getting key"))
[monitorEthTxs->[Stop],tryAgainWithHigherGasPrice->[handleInProgressEthTx]]
monitorEthTxs polls the database for new keys and blocks until all keys have been.
Did you also check EthConfirmer? Might be worth grepping the whole codebase for uses of `Keys`. Or, even better, remove `Keys()` method entirely and create two different methods to replace it - `AllKeys` and `SendKeys`. This seems safest.
@@ -485,7 +485,7 @@ ECPKPARAMETERS *EC_GROUP_get_ecpkparameters(const EC_GROUP *group, ECPARAMETERS_free(ret->value.parameters); } - if (EC_GROUP_get_asn1_flag(group)) { + if (EC_GROUP_get_asn1_flag(group) == OPENSSL_EC_NAMED_CURVE) { /* * use the asn1 OID to describe the elliptic curve parameters */
[No CFG could be retrieved]
get the elliptic curve parameters from an EC_GROUP - - - - - - - - - - - - - - - - - -.
Shouldn't this be `&` not `==` here and throughout? Existing code uses both forms: `==` once and `&` twice.
@@ -34,6 +34,11 @@ function addAdiantUnit(hostname, global, data) { ins.setAttribute('data-tag-type', 1); global.document.getElementById('c').appendChild(ins); + ins.parentNode.addEventListener( + 'eventAdbladeRenderStart', + global.context.renderStart() + ); + // run our JavaScript code to display the ad unit writeScript(global, 'https://' + hostname + '/js/ads/async/show.js'); }
[No CFG could be retrieved]
Adds an adiant unit to the page.
for better readability, could you indent extra two spaces for this line and the following line, that's what we do if they should originally fall in one line. aka, here we should make them indented 6 spaces here.
@@ -209,7 +209,16 @@ public class PageCursorProviderImpl implements PageCursorProvider { } page.open(); final long startedReadPage = System.nanoTime(); - List<PagedMessage> pgdMessages = page.read(storageManager); + List<PagedMessage> pgdMessages; + if (indexCache == null) { + pgdMessages = page.read(storageManager); + } else { + TreeMap<Integer, Integer> messageNumberToOffset = new TreeMap<>(); + pgdMessages = page.read(storageManager, messageNumberToOffset); + indexCache.setMessageIndex(messageNumberToOffset); + indexCache.setNumberOfMessages(pgdMessages.size()); + indexCache.setPage(page); + } final long elapsedReadPage = System.nanoTime() - startedReadPage; if (elapsedReadPage > PAGE_READ_TIMEOUT_NS) { logger.warnf("Page::read for pageNr=%d on cursor %s tooks %d ms to read %d bytes", pageId,
[PageCursorProviderImpl->[resumeCleanup->[scheduleCleanup],flushExecutors->[waitForFuture,flushExecutors],getMessage->[getMessage],onPageModeCleared->[onPageModeCleared],onDeletePage->[cloneSubscriptions,onDeletePage],stop->[stop],processReload->[processReload],getPageCache->[getMessage],finishCleanup->[close]]]
Reads a page from the storage manager. get cache if not found.
If this has to stay, Can we use a primitive map variant.
@@ -128,6 +128,6 @@ class BlobStorageUpload(Task): client = blob_service.get_blob_client(container=container, blob=blob_name) - client.upload_blob(data) + client.upload_blob(data,overwrite=True) return blob_name
[BlobStorageDownload->[__init__->[super],run->[download_blob,get_blob_client,ValueError,Secret,from_connection_string],defaults_from_attrs],BlobStorageUpload->[__init__->[super],run->[upload_blob,uuid4,get_blob_client,str,ValueError,Secret,from_connection_string],defaults_from_attrs]]
This method is called by the task to upload a specific lease.
Hey @4bhishek10 , given that the default behavior of Azure Storage is to not overwrite, I think we want the default to not, but be configurable by users. Could you try making the function accept this argument and then passing it to this call? And then add the relevant docstring? Also for any given task, we try to make the init and run methods match so `overwrite` would need to be added to the init method also.
@@ -295,11 +295,13 @@ class StaticRNN(object): """ StaticRNN class. - The StaticRNN can process a batch of sequence data. The length of each - sample sequence must be equal. The StaticRNN will have its own parameters - like inputs, outputs, memories. **Note that the first dimension of inputs - represents sequence length, and all the sequence length of inputs must be - the same. And the meaning of each axis of input and output are the same.** + The StaticRNN can process a batch of sequence data. The first dimension of inputs + represents sequence length, the length of each input sequence must be equal. + StaticRNN will unfold sequence into time steps, user need to define how to process + each time step during the :code:`with` step. + + Args: + name (str, optional): Please refer to :ref:`api_guide_Name`, Default None. Examples: .. code-block:: python
[Switch->[default->[ConditionalBlock,ConditionalBlockGuard],case->[ConditionalBlock,ConditionalBlockGuard]],IfElseBlockGuard->[__exit__->[__exit__],__enter__->[__enter__],__init__->[block]],DynamicRNN->[_parent_block_->[block],block->[array_write,block,increment,less_than,array_to_lod_tensor],static_input->[_assert_in_rnn_block_,shrink_memory],update_memory->[_assert_in_rnn_block_],__init__->[While],output->[_assert_in_rnn_block_,array_write],step_input->[_assert_in_rnn_block_,array_read],memory->[_assert_in_rnn_block_,memory,shrink_memory,array_read]],ConditionalBlock->[complete->[output,block],block->[ConditionalBlockGuard]],StaticRNN->[_complete_op->[output,_parent_block],step->[BlockGuardWithCompletion],output->[step_output],step_input->[_assert_in_rnn_block_],step_output->[_assert_in_rnn_block_],memory->[_assert_in_rnn_block_,StaticRNNMemoryLink,memory]],While->[_complete->[output,block],block->[WhileGuard]],IfElse->[_parent_block->[block],true_block->[IfElseBlockGuard],__init__->[ConditionalBlock],output->[_parent_block],__call__->[merge_lod_tensor],false_block->[IfElseBlockGuard],input->[_parent_block]]]
This class is used to create a link between two memory cells of a StaticRNN. StaticRNN unfolds the sequence into time steps and returns the result.
A tiny issue. "user need to" should be changed to "user needs to" or "users need to" because "need" is the primary verb here. (When "need" is an auxiliary verb, we can say "user need define xxx", but infinitive "need to define" has to be a third person form here)
@@ -133,6 +133,9 @@ func (e *loginProvision) Run(ctx *Context) error { e.G().KeyfamilyChanged(e.arg.User.GetUID()) + // check to make sure local files stored correctly + e.verifyLocalStorage() + return nil }
[gpgImportKey->[gpgClient],makeEldestDevice->[makeDeviceKeys,makeDeviceWrapArgs],matchingGPGKeys->[gpgPrivateIndex],tryPGP->[pgpProvision],tryGPG->[makeDeviceKeysWithSigner],chooseDevice->[paper,deviceWithType]]
Run executes the loginProvision command.
shouldn't we error out if this doesn't work?
@@ -63,7 +63,7 @@ import static org.apache.nifi.processor.util.list.AbstractListProcessor.REL_SUCC public class ListedEntityTracker<T extends ListableEntity> { private final ObjectMapper objectMapper = new ObjectMapper(); - private volatile Map<String, ListedEntity> alreadyListedEntities; + protected volatile Map<String, ListedEntity> alreadyListedEntities; private static final String NOTE = "Used by 'Tracking Entities' strategy."; public static final PropertyDescriptor TRACKING_STATE_CACHE = new PropertyDescriptor.Builder()
[ListedEntityTracker->[trackEntities->[fetchListedEntities,persistListedEntities],fetchListedEntities->[getCacheKey],persistListedEntities->[getCacheKey],clearListedEntities->[getCacheKey]]]
Creates a ListedEntityTracker which tracks the list of entities. Gzipped JSON string.
The implementation is always a concurrent hashmap which is thread-safe. Is it need to be volatile?
@@ -171,10 +171,10 @@ class Tag extends ApiEntity /** * Set creator * - * @param \Sulu\Component\Security\UserInterface $creator + * @param \Sulu\Component\Security\Authentication\UserInterface $creator * @return Tag */ - public function setCreator(\Sulu\Component\Security\UserInterface $creator = null) + public function setCreator(\Sulu\Component\Security\Authentication\UserInterface $creator = null) { $this->creator = $creator;
[No CFG could be retrieved]
set creator of node.
This should be "used". Is this a deliberate ommission?
@@ -4491,6 +4491,13 @@ def program_guard(main_program, startup_program=None): Layer functions in the Python `"with"` block will append operators and variables to the new main programs. + Args: + main_program(Program): New main program inside `"with"` statement. + startup_program(Program): New startup program inside `"with"` statement. + None means not changing startup program, default_startup_program + is still used. + Default: None. + Examples: .. code-block:: python
[cuda_places->[is_compiled_with_cuda,_cuda_ids],Program->[_construct_from_desc->[Program,_sync_with_cpp,Block],__repr__->[__str__],parse_from_string->[Program,_sync_with_cpp,Block],to_string->[_debug_string_,to_string],_version->[_version],_prune_with_input->[Program,_sync_with_cpp,Block],_copy_param_info_from->[global_block],__init__->[Block],_create_block->[block,Block,current_block],_rollback->[current_block],_copy_data_info_from->[global_block],num_blocks->[num_blocks],_prune->[Program,_sync_with_cpp,Block],_sync_with_cpp->[Block,_sync_with_cpp,num_blocks],_inference_optimize->[type,_remove_var,has_attr,Program,_set_attr,Block,name,op,_remove_op,_sync_with_cpp],clone->[Program,_sync_with_cpp,_copy_param_info_from,Block],__str__->[to_string]],program_guard->[switch_main_program,switch_startup_program],_dygraph_only_->[__impl__->[in_dygraph_mode]],name_scope->[parent,in_dygraph_mode,child],NameScope->[child->[NameScope]],IrNode->[var->[var],append_output->[append_output],node_type->[node_type],remove_output->[remove_output],id->[id],append_input->[append_input],is_ctrl_var->[is_ctrl_var],is_var->[is_var],remove_input->[remove_input],clear_inputs->[clear_inputs],name->[name],op->[op],inputs->[IrNode],outputs->[IrNode],is_op->[is_op],clear_outputs->[clear_outputs]],is_compiled_with_cuda->[is_compiled_with_cuda],IrGraph->[create_op_node->[create_op_node,set_type,_update_desc_attr,name,IrOpNode],update_output_link->[append_output,remove_output,rename_output,append_input,remove_input,name],create_var_node->[create_var_node,set_type,set_shape,IrVarNode],all_var_nodes->[is_var,IrVarNode],build_adjacency_list->[build_adjacency_list,IrNode],_update_desc_attr->[_set_attr],resolve_hazard->[resolve_hazard,is_op,op],_find_node_by_name->[name],all_persistable_nodes->[var,is_var,IrVarNode],link_to->[append_output,append_input],all_op_nodes->[is_op,IrOpNode],has_circle->[has_circle],update_input_link->[append_output,remove_output,append_input,remove_input,name,rename_input],create_persistable_node->[set_type,set_shape,IrVarNode],create_var_node_from_desc->[create_var_node,IrVarNode],draw->[safe_remove_nodes,_convert_to_pdf,all_var_nodes,is_ctrl_var,all_op_nodes],create_op_node_from_desc->[create_op_node,IrOpNode],graph_num->[graph_num],topology_sort->[topology_sort,IrNode],all_nodes->[IrNode],clone->[IrGraph,clone]],Parameter->[__str__->[to_string],to_string->[to_string],__init__->[__init__]],Variable->[type->[type,in_dygraph_mode],lod_level->[lod_level,in_dygraph_mode],persistable->[in_dygraph_mode,persistable],_detectContinuesSlice->[_slice_indices],to_string->[_debug_string_,in_dygraph_mode],_sliceVar->[_cloneVar],shape->[in_dygraph_mode,shape],__init__->[convert_np_dtype_to_dtype_,_dygraph_tracer,in_dygraph_mode],stop_gradient->[in_dygraph_mode],backward->[_dygraph_tracer,in_dygraph_mode],_concatVar->[_cloneVar],set_value->[_current_expected_place,numpy,_var_base_to_np],dtype->[dtype,in_dygraph_mode],name->[name,in_dygraph_mode],__getitem__->[get_new_list_tensor->[fill_constant],get_new_list_tensor,contain_var,fill_constant],_sliceAndConcatVar->[_sliceVar,_cloneVar,_slice_indices,_concatVar],_reconstructSliceinfo->[_detectEllipsis],detach->[in_dygraph_mode]],OpProtoHolder->[update_op_proto->[get_all_op_protos],__init__->[get_all_op_protos]],_full_name_scope->[parent,name],get_all_op_protos->[get_all_op_protos],cuda_pinned_places->[is_compiled_with_cuda,cuda_pinned_places,_cpu_num],cpu_places->[_cpu_num],IrVarNode->[type->[var],persistable->[var],set_shape->[var],shape->[var],__init__->[is_var],dtype->[var]],_get_var->[default_main_program,global_block],_dygraph_not_support_->[__impl__->[in_dygraph_mode]],IrOpNode->[set_attr->[_update_desc_attr],output_arg_names->[op],set_type->[op],rename_output->[op],__init__->[is_op],output->[op],_update_desc_attr->[_set_attr,op],inputs->[IrVarNode],outputs->[IrVarNode],input_arg_names->[op],rename_input->[op],input->[op]],dtype_is_floating->[convert_np_dtype_to_dtype_],Operator->[attr_type->[attr_type],to_string->[_debug_string_],_block_attr->[_block_attr_id],output->[output],_rename_input->[_rename_input],has_attr->[has_attr],_blocks_attr_ids->[_blocks_attr_ids],output_names->[output_names],all_attrs->[attr,attr_type,_block_attr,_blocks_attr],_update_desc_attr->[_set_attr],__str__->[to_string],type->[type,in_dygraph_mode],output_arg_names->[output_arg_names],__init__->[type,in_dygraph_mode,_full_name_scope,find_name,instance],attr->[attr],attr_names->[attr_names],input->[input],_block_attr_id->[_block_attr_id],_rename_output->[_rename_output],input_names->[input_names],input_arg_names->[input_arg_names]],Block->[var->[type],_remove_var->[_remove_var],create_parameter->[_is_inited_by],_find_var_recursive->[var],_clone_variable->[create_var],to_string->[_debug_string_,to_string],append_op->[Operator,_dygraph_tracer,append_op,in_dygraph_mode],_copy_param_info_from->[iter_parameters],_set_forward_block_idx->[_set_forward_block_idx],_rename_var->[has_var,var,type,shape,_rename_var,dtype,Variable],_insert_op->[Operator,_insert_op],_prepend_op->[Operator,_dygraph_tracer,in_dygraph_mode,_prepend_op],_sync_with_cpp->[has_var,type,Operator,name,create_var],_remove_op->[_remove_op],create_var->[Variable],_var_recursive->[_find_var_recursive],__str__->[to_string]],load_op_library->[instance,load_op_library],NameScope,Program]
Context manager for program guarding.
I guess this can be optional?
@@ -227,9 +227,7 @@ define([ */ Rectangle.fromCartographicArray = function(cartographics, result) { //>>includeStart('debug', pragmas.debug); - if (!defined(cartographics)) { - throw new DeveloperError('cartographics is required.'); - } + Check.typeOf.object(cartographics, 'cartographics'); //>>includeEnd('debug'); var west = Number.MAX_VALUE;
[No CFG could be retrieved]
Creates a smallest possible rectangle that encloses all positions in the provided cartographics. region Position Functions.
`cartographics` is an array
@@ -38,7 +38,7 @@ def sync(repo_name, importer_name): """ importer = models.Importer.objects.get(name=importer_name, repository__name=repo_name).cast() if not importer.feed_url: - raise ValueError_("An importer must have a 'feed_url' attribute to sync.") + raise ValueError(_("An importer must have a 'feed_url' attribute to sync.")) with storage.working_dir_context() as working_dir: importer.working_dir = working_dir
[sync->[working_dir_context,ValueError_,get,sync],delete->[filter],shared_task]
Sync on the given importer.
I would have sworn this was fixed.