patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -231,7 +231,7 @@ func (s *evictLeaderScheduler) scheduleOnce(cluster opt.Cluster) []*operator.Ope } op, err := operator.CreateTransferLeaderOperator(EvictLeaderType, cluster, region, region.GetLeader().GetStoreId(), target.GetID(), operator.OpLeader) if err != nil { - log.Debug("fail to create evict leader operator", zap.Error(err)) + log.Debug("failed", zap.Error(errs.ErrCreateOperator.FastGenByArgs("evict leader"))) continue } op.SetPriorityLevel(core.HighPriority)
[EncodeConfig->[EncodeConfig],Schedule->[uniqueAppend,scheduleOnce,GetName],ListConfig->[Clone],DeleteConfig->[Persist,mayBeRemoveStoreFromConfig],UpdateConfig->[getRanges,Persist,BuildWithArgs],ServeHTTP->[ServeHTTP],scheduleOnce->[GetName]]
scheduleOnce creates a new evict leader operator for each region in the cluster.
Original error should be wrapped by "cause" in zap.
@@ -16,6 +16,7 @@ // TODO(malteubl) Move somewhere else since this is not an ad. +import {dev} from '../src/log'; import {loadScript} from './3p'; import {setStyles} from '../src/style'; import {startsWith} from '../src/string';
[No CFG could be retrieved]
Produces a Twitter API object for a given window. The export function.
I don't think you can import `log` here, because this is running in a 3p window context. I would say just typecast.
@@ -123,12 +123,12 @@ public class UnitsDrawer implements IDrawable { null); } break; - case NONE: - // This Method draws the unit Image - graphics.drawImage(img.get(), placementPoint.x - bounds.x, placementPoint.y - bounds.y, null); - break; } } + else{ + // This Method draws the unit Image + graphics.drawImage(img.get(), placementPoint.x - bounds.x, placementPoint.y - bounds.y, null); + } // more then 1 unit of this category if (count != 1) { final int stackSize = mapData.getDefaultUnitsStackSize();
[UnitsDrawer->[draw->[getDefaultUnitsStackSize,getWidth,getHeight,drawImage,displayHitDamage,getSize,displayFactoryDamage,getImage,getSmallFlag,isDamageFromBombingDoneToUnitsInsteadOfTerritories,getUnitImageWidth,valueOf,setFont,getPlayerID,getUnitCounterOffsetHeight,isPresent,IllegalStateException,drawString,getFlag,getUnitType,fillRect,match,getName,getUnitCounterOffsetWidth,getPropertyMapFont,get,getUnitImageHeight,contains,setColor,getPropertyUnitCountColor],setUnitFlagDrawMode->[put,toString],displayFactoryDamage->[getPropertyUnitFactoryDamageColor,getPropertyMapFont,drawString,getUnitImageHeight,getUnitImageWidth,setColor,length,setFont,getSize],isDamageFromBombingDoneToUnitsInsteadOfTerritories->[getDamageFromBombingDoneToUnitsInsteadOfTerritories],getUnits->[getPlayerID,getMatches,of,getTerritory,getUnitType,unitIsOfType,add,unitIsOwnedBy],toString->[pluralize],displayHitDamage->[getPropertyMapFont,drawString,getUnitImageHeight,getUnitImageWidth,setColor,length,getPropertyUnitHitDamageColor,setFont,getSize],asList]]
Method draws a specific unit image. Method draws a unit of this category. Display a miss or damage if there are no misses.
nitpick, formatting is off. `} else {`
@@ -64,6 +64,7 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { storeHandler := newStoreHandler(svr, rd) router.HandleFunc("/api/v1/store/{id}", storeHandler.Get).Methods("GET") router.HandleFunc("/api/v1/store/{id}", storeHandler.Delete).Methods("DELETE") + router.HandleFunc("/api/v1/store/remove-tombstone", storeHandler.RemoveTombStone).Methods("DELETE") router.HandleFunc("/api/v1/store/{id}/state", storeHandler.SetState).Methods("POST") router.HandleFunc("/api/v1/store/{id}/label", storeHandler.SetLabels).Methods("POST") router.HandleFunc("/api/v1/store/{id}/weight", storeHandler.SetWeight).Methods("POST")
[Handle,Handler,Join,NewRouter,New,Subrouter,GetHandler,HandleFunc,PathPrefix,Methods]
Router for all configuration methods. Get - all - the - regions routes.
I think it's better to put it into `stores` instead of `store`?
@@ -50,8 +50,7 @@ type txdata struct { Price *big.Int `json:"gasPrice" gencodec:"required"` GasLimit uint64 `json:"gas" gencodec:"required"` ShardID uint32 `json:"shardID" gencodec:"required"` - ToShardID uint32 `json:"toShardID" rlp:"nil"` // for cross-shard tx's destination shard ID; nil means intra-shard tx - Recipient *common.Address `json:"to" rlp:"nil"` // nil means contract creation + Recipient *common.Address `json:"to" rlp:"nil"` // nil means contract creation Amount *big.Int `json:"value" gencodec:"required"` Payload []byte `json:"input" gencodec:"required"`
[MarshalJSON->[MarshalJSON],Shift->[Pop],UnmarshalJSON->[UnmarshalJSON],Pop->[Pop],Hash]
MISSING - PARAMS - FOR STORAGE - INFORMATION NewTransaction returns new transaction.
why remove ToShardID? we have discussed in discord.
@@ -141,6 +141,8 @@ namespace NServiceBus } var username = GetInstallationUserName(); + await CreateQueuesIfNecessary(receiveInfrastructure, username).ConfigureAwait(false); + foreach (var installer in builder.BuildAll<INeedToInstallSomething>()) { await installer.Install(username).ConfigureAwait(false);
[InitializableEndpoint->[Initialize->[Initialize]]]
Run Installers.
While this was run as an installer before, I'd prefer to have this as a separate method called from `Initialize` directly instead. That would eliminate the need to pass `receiveInfrastructure` into `Runstallers`, which otherwise doesn't need it. Having the call in `Initialize` would also mean the check for send-only could be moved up and be used to skip the call to `CreateQueuesIfNecessary` and `ConfigureReceiveInfrastructure`.
@@ -1108,8 +1108,10 @@ function suggestion_query($uid, $start = 0, $limit = 80) { intval($limit) ); - if(count($r) && count($r) >= ($limit -1)) + if(count($r) && count($r) >= ($limit -1)) { + Cache::set("suggestion_query:".$uid.":".$start.":".$limit, $r, CACHE_FIVE_MINUTES); return $r; + } $r2 = q("SELECT gcontact.* FROM gcontact INNER JOIN `glink` ON `glink`.`gcid` = `gcontact`.`id`
[poco_check->[get_baseurl],update_suggestions->[get_baseurl],poco_last_updated->[item,registerNamespace,query,loadXML],gs_fetch_users->[get_baseurl],poco_load->[get_curl_code]]
Query for suggestion of a user get a list of all non - contact records.
[standards] Please add a space after `if`.
@@ -53,11 +53,14 @@ public class Argon2SecureHasher implements SecureHasher { private final int iterations; private final int saltLength; - private final boolean usingStaticSalt; + private boolean usingStaticSalt; // A 16 byte salt (nonce) is recommended for password hashing private static final byte[] staticSalt = "NiFi Static Salt".getBytes(StandardCharsets.UTF_8); + // Upper boundary for several cost parameters + private static final double upperBoundary = Math.pow(2, 32) - 1; + /** * Instantiates an Argon2 secure hasher using the default cost parameters * ({@code hashLength = }{@link #DEFAULT_HASH_LENGTH},
[Argon2SecureHasher->[hash->[getSalt],getSalt->[isUsingStaticSalt]]]
A class which instantiates a SecureHasher which uses the default cost parameters for the N Argon2SecureHasher constructor.
Should be `long` rather than `double`.
@@ -1276,7 +1276,7 @@ public class InvokeHTTP extends AbstractProcessor { // create a new hashmap to store the values from the connection Map<String, String> map = new HashMap<>(); responseHttp.headers().names().forEach((key) -> { - if (key == null) { + if (key == null || key.trim().isEmpty()) { return; }
[InvokeHTTP->[convertAttributesFromHeaders->[csv],OverrideHostnameVerifier->[verify->[verify]]]]
convert the attributes from the headers to the CSV format.
You would want to use StringUtils.isBlank() see other parts of the code.
@@ -562,7 +562,7 @@ class Media extends ApiWrapper */ public function setCreator($creator) { - $this->entity->setChanger($creator); + $this->entity->setCreator($creator); return $this; }
[Media->[getDescription->[getDescription,getLocale],getCreated->[getCreated],getName->[getName],setStorageOptions->[setStorageOptions],getVersions->[getVersion],getTitle->[getTitle,getLocale],getChanged->[getChanged],getPublishLanguages->[getPublishLanguages,getLocale],toArray->[getDescription,getCreated,getName,getThumbnails,getTitle,getChanged,getPublishLanguages,getMimeType,getId,getDownloadCounter,getSize,getTags,getLocale,getVersion,getChanger,getCollection,getUrl,getCreator,getStorageOptions],setMimeType->[setMimeType],getMimeType->[getMimeType],getId->[getId],setChanger->[setChanger],getDownloadCounter->[getDownloadCounter],getContentLanguages->[getContentLanguages,getLocale],setName->[setName],setType->[setType],setDescription->[setDescription],getFileVersion->[getId,getVersion],setCollection->[setCollection],setSize->[setSize],getSize->[getSize],setChanged->[setChanged],getTags->[getName,getTags],getMeta->[getFileVersion,getMeta,getLocale],removeTags->[removeTags],getVersion->[getVersion],setCreated->[setCreated],getChanger->[getChanger],getCollection->[getCollection,getId],setTitle->[setTitle],addTag->[addTag],getType->[getType],getCreator->[getCreator],getStorageOptions->[getStorageOptions],setCreator->[setChanger]]]
Sets the creator of the node.
why only creator not both?
@@ -179,6 +179,7 @@ export class AmpPanZoom extends AMP.BaseElement { this.element.classList.add('i-amphtml-pan-zoom'); this.content_ = children[0]; this.content_.classList.add('i-amphtml-pan-zoom-child'); + this.setAsOwner(this.content_); this.maxScale_ = this.getNumberAttributeOr_('max-scale', DEFAULT_MAX_SCALE); this.initialScale_ = this.getNumberAttributeOr_('initial-scale', 1); this.initialX_ = this.getNumberAttributeOr_('initial-x', 0);
[No CFG could be retrieved]
Initializes the object with properties needed by the AMPHTMLPanel. Adds a transform to the container.
Ownership requires you to do `pauseCallback`, `resumeCallback`, `setInViewport` and a few others.
@@ -29,7 +29,7 @@ class ConsoleTask(QuietTaskMixin, Task): def __init__(self, *args, **kwargs): super(ConsoleTask, self).__init__(*args, **kwargs) - self._console_separator = self.get_options().sep.decode('unicode_escape') + self._console_separator = self.get_options().sep.encode('ascii').decode('unicode_escape') if self.get_options().output_file: try: self._outstream = safe_open(os.path.abspath(self.get_options().output_file), 'wb')
[ConsoleTask->[console_output->[NotImplementedError],__init__->[abspath,get_options,super,format,safe_open,TaskError],register_options->[register,super],execute->[targets,write,flush,console_output,tuple,get_options,close,encode,_guard_sigpipe]]]
Initialize the console task.
The order matters here. I originally had `encode('unicode_escape').decode('ascii')` and it resulted in Pants outputting `\\\\n` instead of `\n`.
@@ -34,7 +34,7 @@ class DataSource(PlotObject): # List of names of the fields of each tuple in self.data # ordering is incoporated here column_names = List(String) - selected = List(Int) # index of selected points + #selected = List(Int) # index of selected points def columns(self, *columns): """ Returns a ColumnsRef object that points to a column or set of
[CategoricalAxis->[__init__->[CategoricalTickFormatter,CategoricalTicker]],Plot->[add_glyph->[Glyph],column->[column],select->[_list_attr_splat],row->[row]],DatetimeAxis->[__init__->[DatetimeTicker,DatetimeTickFormatter]],LogAxis->[__init__->[LogTickFormatter,LogTicker]],LinearAxis->[__init__->[BasicTicker,BasicTickFormatter]],ColumnDataSource->[remove->[remove]],GridPlot->[select->[_list_attr_splat]],DataRange->[finalize->[ColumnsRef]]]
Returns a ColumnsRef object that points to a column or set of columns on this data.
@mattpap this is temporary.
@@ -5,14 +5,16 @@ package utils import ( - api "code.gitea.io/sdk/gitea" - "encoding/json" "net/http" + "strings" + + api "code.gitea.io/sdk/gitea" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/routers/api/v1/convert" + "code.gitea.io/gitea/routers/utils" "github.com/Unknwon/com" )
[GetWebhookByOrgID,Status,JSON,Error,CreateWebhook,GetWebhookByRepoID,Marshal,UpdateWebhook,UpdateEvent,IsErrWebhookNotExist,ToHookContentType,ToHookTaskType,HomeLink,IsValidHookTaskType,IsSliceContainsStr,ToHook,IsValidHookContentType]
GetOrgHook gets an organization s webhook. If there is an error it will write to CheckCreateHookOption checks if the given form contains valid options for a hook.
add an empty line between internal and external package.
@@ -1746,10 +1746,12 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C allocatedSizeWithTemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl); } } - - if (volumeVO.getState() != Volume.State.Ready) { - totalAskingSize += getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeVO, pool); - + // A ready state volume is already allocated in a pool. so the asking size is zero for it. + // In case the volume is moving across pools or is not ready yet, the asking size has to be computed + if (s_logger.isDebugEnabled()) { + s_logger.debug("pool id for the volume with id: " + volumeVO.getId() + " is " + volumeVO.getPoolId()); + } + if ((volumeVO.getState() != Volume.State.Ready) || (volumeVO.getPoolId() != pool.getId())) { if (ScopeType.ZONE.equals(poolVO.getScope()) && volumeVO.getTemplateId() != null) { VMTemplateVO tmpl = _templateDao.findByIdIncludingRemoved(volumeVO.getTemplateId());
[StorageManagerImpl->[getHypervisorType->[getHypervisorType],discoverImageStore->[getName],StorageGarbageCollector->[runInContext->[cleanupStorage]],getDataObjectSizeIncludingHypervisorSnapshotReserve->[getDataObjectSizeIncludingHypervisorSnapshotReserve],updateStoragePool->[enablePrimaryStoragePool,disablePrimaryStoragePool,updateStoragePool],createCapacityEntry->[createCapacityEntry,getStorageOverProvisioningFactor],storagePoolHasEnoughSpace->[checkUsagedSpace,storagePoolHasEnoughSpace,getStorageOverProvisioningFactor],createSecondaryStagingStore->[getName],setDiskProfileThrottling->[getDiskIopsReadRate,getDiskBytesWriteRate,getDiskIopsWriteRate,getDiskBytesReadRate],sendToPool->[getUpHostsInPool,sendToPool],getBytesRequiredForTemplate->[getBytesRequiredForTemplate],setVolumeObjectTOThrottling->[getDiskIopsReadRate,getDiskBytesWriteRate,getDiskIopsWriteRate,getDiskBytesReadRate],migrateToObjectStore->[discoverImageStore,migrateToObjectStore]]]
Checks if a storage pool has enough space to hold the given volumes. HypervisorSnapshotReserve - reserves a reservation of a volume on a given cluster Checks if there is enough free space on this pool.
Please wrap this `DEBUG` log in an `if (s_logger.isDebugEnabled)` check to prevent unnecessary/expensive string concatenation when `DEBUG` logging is not enabled. Minor nit: grammatically, the `:` character after `is` is unnecessary.
@@ -1274,6 +1274,10 @@ class PackageBase(with_metaclass(PackageMeta, object)): **kwargs ) + if self.try_install_from_binary_cache(explicit): + tty.msg('Installed %s from binary cache' % self.name) + return + tty.msg('Installing %s' % self.name) # Set run_tests flag before starting build.
[PackageBase->[do_install->[build_process->[do_stage,_stage_and_write_lock,do_fake_install,do_patch],remove_prefix,_update_explicit_entry_in_db,do_install,_process_external_package],sanity_check_prefix->[check_paths],do_uninstall->[uninstall_by_spec],build_log_path->[build_log_path],possible_dependencies->[possible_dependencies],_make_stage->[_make_resource_stage,_make_root_stage],_sanity_check_extension->[_check_extendable],stage->[_make_stage],do_patch->[do_stage],check_for_unfinished_installation->[remove_prefix],do_stage->[do_fetch],dependency_activations->[extends],do_activate->[_sanity_check_extension,do_activate],do_deactivate->[_sanity_check_extension],fetcher->[_make_fetcher],nearest_url->[version_urls],url_for_version->[nearest_url,version_urls],on_package_attributes],InstallPhase->[copy->[InstallPhase]],run_before->[register_callback],run_after->[register_callback],PackageMeta->[__new__->[_flush_callbacks->[copy],_flush_callbacks,InstallPhase]],Package->[run_after]]
Installs a package and its dependencies. Continuing from partial install of package. Installs a in the source directory. Add a new package to the system.
Does installation throw an exception if any of download, verification, or database modification fails? Should that be caught here or inside of `try_install_from_binary_cache`?
@@ -175,6 +175,11 @@ class StructuralMechanicsAnalysis(object): # TODO in the future this could deriv for process in self.list_of_processes: process.ExecuteInitialize() + ## Add the processes to the solver + self.solver.AddProcessesList(self.list_of_processes) + if (self.output_post is True): + self.solver.AddPostProcess(self.gid_output) + ## Solver initialization self.solver.Initialize()
[StructuralMechanicsAnalysis->[_ExecuteInitialize->[_InitializeIO,Initialize],_SolveSolutionStep->[_ExecuteBeforeSolve]],StructuralMechanicsAnalysis]
Initialize the Kratos analysis. This function initializes the list of processes and then performs the necessary actions. End of Analysis.
This is a serious blocker Not too long ago and after very long discussions with @KratosMultiphysics/technical-committee and others it was decided that the processes live in the stage, not the solver
@@ -128,7 +128,6 @@ namespace System.IO unsafe { byte* pointer = null; - RuntimeHelpers.PrepareConstrainedRegions(); try { buffer.AcquirePointer(ref pointer);
[No CFG could be retrieved]
Initializes the object with the given arguments. Initialize - This function initializes a stream over a byte*.
@akoeplinger, does mono still need these? If yes, anything under src/libraries/System.Private.Corelib should be reverted.
@@ -86,7 +86,10 @@ namespace System.IO // The Windows implementation uses WriteFile, which ignores the offset if the handle // isn't seekable. We do the same manually with PWrite vs Write, in order to enable // the function to be used by FileStream for all the same situations. - int result = handle.CanSeek ? + + // POSIX requires that pwrite should respect provided offset even for handles opened with O_APPEND. + // But Linux and BSD don't do that, moreover their behaviour is different. So we always use write for O_APPEND. + int result = handle.CanSeek && !handle.IsAppend ? Interop.Sys.PWrite(handle, bufPtr, buffer.Length, fileOffset) : Interop.Sys.Write(handle, bufPtr, buffer.Length); FileStreamHelpers.CheckFileCall(result, handle.Path);
[RandomAccess->[ReadAtOffset->[Path,CheckFileCall,PRead,CanSeek,GetReference,Read,Length],ReadScatterAtOffset->[Path,Length,PReadV,Count,CheckFileCall,Dispose,Pointer,Pin,GetReference],WriteAtOffset->[Write,Path,PWrite,CheckFileCall,CanSeek,GetReference,Length],ReadAtOffsetAsync->[ScheduleSyncReadAtOffsetAsync],WriteGatherAtOffset->[Path,PWriteV,Length,CheckFileCall,Count,Dispose,Pointer,Pin,GetReference],GetFileLength->[Path,CheckFileCall,Size,FStat],ReadScatterAtOffsetAsync->[ScheduleSyncReadScatterAtOffsetAsync],WriteAtOffsetAsync->[ScheduleSyncWriteAtOffsetAsync],WriteGatherAtOffsetAsync->[ScheduleSyncWriteGatherAtOffsetAsync]]]
Write the given buffer to the given file.
Doesn't this add yet another syscall in some cases (in particular for a `new SafeFileHandle(fd, ...)`-constructed handle? Is that really worth it?
@@ -461,8 +461,13 @@ public class InvokeScriptedProcessor extends AbstractSessionFactoryProcessor { // if there was existing validation errors and the processor loaded successfully if (currentValidationResults.isEmpty() && instance != null) { try { - // defer to the underlying processor for validation - final Collection<ValidationResult> instanceResults = instance.validate(context); + // defer to the underlying processor for validation, without the + // invokescriptedprocessor properties + final Set<PropertyDescriptor> innerPropertyDescriptor = new HashSet<PropertyDescriptor>(scriptingComponentHelper.getDescriptors()); + + ValidationContext innerValidationContext = new FilteredPropertiesValidationContextAdapter(context, innerPropertyDescriptor); + final Collection<ValidationResult> instanceResults = instance.validate(innerValidationContext); + if (instanceResults != null && instanceResults.size() > 0) { // return the validation results from the underlying instance return instanceResults;
[InvokeScriptedProcessor->[getRelationships->[getRelationships],customValidate->[customValidate,getLogger,setup],setup->[setup],onPropertyModified->[onPropertyModified],stop->[stop],reloadScript->[getIdentifier->[getIdentifier],getControllerServiceLookup->[getControllerServiceLookup],getNodeTypeProvider->[getNodeTypeProvider],getLogger],onTrigger->[getLogger,onTrigger]]]
Override validate method to defer to the underlying processor if there is a validation error and the script.
I think we should do the same in the `onTrigger` method too
@@ -80,7 +80,7 @@ public class ResourceLocatorHandler implements ServerRestHandler { requestContext.setRemaining(res.remaining); requestContext.setEndpointInstance(locator); requestContext.setResult(null); - requestContext.restart(res.value); + requestContext.restart(res.value, true); requestContext.setMaxPathParams(res.pathParamValues.length); for (int i = 0; i < res.pathParamValues.length; ++i) { String pathParamValue = res.pathParamValues[i];
[ResourceLocatorHandler->[findTargetRecursive->[findTargetRecursive]]]
This method is called when a request is received from a client. missing context.
Is there a reason to overwrite the `pathParamValue` for locators? Is it because they can declare new path parameters? Otherwise we can just keep the original `pathParamValue` and no need to add a field to save them.
@@ -1178,7 +1178,8 @@ public class VolumeServiceImpl implements VolumeService { try { DataObject volumeOnStore = store.create(volume); volumeOnStore.processEvent(Event.CreateOnlyRequested); - snapshot.processEvent(Event.CopyingRequested); + _volumeDetailsDao.addDetail(volume.getId(), SNAPSHOT_ID, Long.toString(snapshot.getId()), false); + CreateVolumeFromBaseImageContext<VolumeApiResult> context = new CreateVolumeFromBaseImageContext<VolumeApiResult>(null, volume, store, volumeOnStore, future, snapshot); AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
[VolumeServiceImpl->[createVolumeFromBaseImageCallBack->[getFuture],copyManagedTemplateCallback->[getVolume,getFuture],copyBaseImageCallback->[getVolume,getFuture],resizeVolumeOnHypervisor->[getPrimaryDataStore],copyVolumeCallBack->[getVolume,expungeVolumeAsync,destroyVolume],revokeAccess->[revokeAccess],expungeVolumeAsync->[canVolumeBeRemoved],registerVolume->[getVolume],updateHypervisorSnapshotReserveForVolume->[getVolume],createVolumeCallback->[getVolume],createManagedTemplateImageCallback->[getVolume,getFuture],copyTemplateToManagedTemplateVolume->[getChapInfo,revokeAccess,grantAccess],copyVolume->[copyVolumeFromPrimaryToImage,getVolume,duplicateVolumeOnAnotherStorage,copyVolumeFromImageToPrimary],resize->[resize],deleteVolumeCallback->[canVolumeBeRemoved,getVolume],migrateVolume->[getVolume],createManagedVolumeCloneTemplateAsync->[createVolumeFromBaseImageCallBack],handleVolumeSync->[createVolumeAsync,getVolume],takeSnapshot->[takeSnapshot],registerVolumeCallback->[getDataStore],destroyVolume->[getVolume],createVolumeAsync->[getVolume],managedCopyBaseImageCallback->[getFuture,getVolumeInfo],createManagedVolumeCopyTemplateAsync->[getVolume,grantAccess,revokeAccess,expungeVolumeAsync,createVolumeAsync,getChapInfo],createManagedStorageVolumeFromTemplateAsync->[createManagedVolumeCopyTemplateAsync,copyTemplateToManagedTemplateVolume,createManagedTemplateVolume,createManagedVolumeCloneTemplateAsync,getPrimaryDataStore],listVolume->[getTemplateInfo],grantAccess->[grantAccess],createVolumeFromTemplateAsync->[getPrimaryDataStore,createBaseImageAsync,createVolumeFromBaseImageAsync],getChapInfo->[getChapInfo]]]
Create a new volume from a snapshot.
Can you please explain the logic here, how removing the processEvent is doing the job.
@@ -47,6 +47,9 @@ type Node struct { cancel context.CancelFunc raftEngine *RaftEngine ioRate int64 + available *uint64 + ToCompactionSize *uint64 + sizeMutex sync.Mutex } // NewNode returns a Node.
[regionHeartBeat->[GetState],storeHeartBeat->[GetState]]
NewNode creates a new node from a Store. Node constructor.
Why we need it?
@@ -37,7 +37,7 @@ class TextField(SequenceField[Dict[str, torch.Tensor]]): def __init__(self, tokens: List[Token], token_indexers: Dict[str, TokenIndexer]) -> None: self.tokens = tokens self._token_indexers = token_indexers - self._indexed_tokens: Optional[Dict[str, TokenList]] = None + self._indexed_tokens: Optional[Dict[str, Any]] = None if not all([isinstance(x, (Token, SpacyToken)) for x in tokens]): raise ConfigurationError("TextFields must be passed Tokens. "
[TextField->[__str__->[sequence_length],count_vocab_items->[count_vocab_items],get_padding_lengths->[get_padding_lengths],empty_field->[TextField]]]
Initialize the object with a list of tokens and token indexer.
so I was actually imagining something like where a BPE indexer would generate two top-level arrays `f"{indexer_name}-byte-pairs"` and `f"{indexer_name}-offsets"` rather than a dict of them. that's clunkier in some ways but again it avoids the `isinstance` checks and having to have two sorts of logic. thoughts?
@@ -0,0 +1,18 @@ +/* + * Copyright (c) MuleSoft, Inc. All rights reserved. http://www.mulesoft.com + * The software in this package is published under the terms of the CPAL v1.0 + * license, a copy of which has been included with this distribution in the + * LICENSE.txt file. + */ +package org.mule.test.module.http.functional; + +import org.mule.extension.http.internal.request.validator.HttpRequesterConfig; +import org.mule.extension.http.internal.request.validator.HttpRequesterProvider; +import org.mule.functional.junit4.MuleArtifactFunctionalTestCase; +import org.mule.functional.junit4.runners.ArtifactClassLoaderRunnerConfig; + +@ArtifactClassLoaderRunnerConfig(exportClasses = {HttpRequesterProvider.class, HttpRequesterConfig.class}) +public abstract class AbstractHttpTestCase extends MuleArtifactFunctionalTestCase +{ + protected static final int DEFAULT_TIMEOUT = 1000; +}
[No CFG could be retrieved]
No Summary Found.
Why is required to export this classes?
@@ -217,7 +217,13 @@ public abstract class Proc { public LocalProc(String[] cmd,String[] env,InputStream in,OutputStream out,OutputStream err,File workDir) throws IOException { this( calcName(cmd), stderr(environment(new ProcessBuilder(cmd),env).directory(workDir), err==null || err== SELFPUMP_OUTPUT), - in, out, err ); + in, out, err, true ); + } + + public LocalProc(String[] cmd, String[] env, InputStream in, OutputStream out, OutputStream err, File workDir, boolean killWhenInterrupted) throws IOException { + this(calcName(cmd), + stderr(environment(new ProcessBuilder(cmd), env).directory(workDir), err == null || err == SELFPUMP_OUTPUT), + in, out, err, killWhenInterrupted); } private static ProcessBuilder stderr(ProcessBuilder pb, boolean redirectError) {
[Proc->[RemoteProc->[join->[isAlive,kill],kill->[isAlive]],joinWithTimeout->[run->[kill],join],LocalProc->[join->[join,isAlive],environment->[environment],kill->[join]]]]
Redirect the stderr stream to the original process builder if redirectError is true.
Would be preferable to just call the new constructor below to avoid the logic duplication
@@ -103,7 +103,11 @@ class OrderFilter extends AbstractContextAwareFilter implements OrderFilterInter } if (null !== $nullsComparison = $this->properties[$property]['nulls_comparison'] ?? null) { - $nullsDirection = self::NULLS_DIRECTION_MAP[$nullsComparison][$direction]; + if (\in_array($nullsComparison, [self::NULLS_ALWAYS_FIRST, self::NULLS_ALWAYS_LAST], true)) { + $nullsDirection = self::NULLS_ALWAYS_FIRST === $nullsComparison ? 'ASC' : 'DESC'; + } else { + $nullsDirection = self::NULLS_DIRECTION_MAP[$nullsComparison][$direction]; + } $nullRankHiddenField = sprintf('_%s_%s_null_rank', $alias, $field);
[OrderFilter->[filterProperty->[addSelect,getRootAliases,isPropertyMapped,addJoinsForNestedProperty,addOrderBy,normalizeValue,isPropertyEnabled,isPropertyNested],extractProperties->[all],apply->[filterProperty,denormalizePropertyName]]]
Filters a property in a query builder.
I think it would be simpler if you reused the direction map instead (clearer and no need to add specific code).
@@ -89,6 +89,17 @@ public class SmallRyeReactiveMessagingProcessor { LOGGER.debugf("Found mediator business method %s declared on %s", method, bean); } } + + for (FieldInfo field : bean.getTarget().get().asClass().fields()) { + if (annotationStore.hasAnnotation(field, NAME_STREAM)) { + if (field.type().name().equals( + DotName.createSimple(Emitter.class.getName()))) { + String name = annotationStore.getAnnotation(field, NAME_STREAM).value().asString(); + LOGGER.debugf("Emitter field '%s' detected, stream name: '%s'", field.name(), name); + emitterFields.produce(new EmitterBuildItem(bean, field, name)); + } + } + } } } }
[SmallRyeReactiveMessagingProcessor->[beans->[AdditionalBeanBuildItem],beanDeploymentValidator->[validate->[debugf,MediatorBuildItem,isClassBean,produce,methods,get,hasAnnotation],BeanDeploymentValidatorBuildItem,FeatureBuildItem,produce,BeanDeploymentValidator],removalExclusions->[UnremovableBeanBuildItem,asList,BeanClassAnnotationExclusion],build->[getValue,produce,ReflectiveClassBuildItem,registerMediators,toString,getIdentifier,containsKey,put],getLogger,getName,createSimple]]
Bean deployment validator.
We should create a constant for `Emitter` dot name.
@@ -175,6 +175,10 @@ free_io_params_cb(tse_task_t *task, void *data) while (io_list) { struct io_params *current = io_list; + if (current->iom.iom_recxs) { + D_FREE(current->iom.iom_recxs); + current->iom.iom_recxs = NULL; + } if (current->iod.iod_recxs) { D_FREE(current->iod.iod_recxs); current->iod.iod_recxs = NULL;
[No CFG could be retrieved]
- > array_hdl2ptr - > array_hdl This function is called from the DAOS daemon when a new array object is created.
D_FREE sets the pointer to NULL
@@ -39,10 +39,14 @@ public class ReflectionWindowFunctionSupplier<T extends WindowFunction> public ReflectionWindowFunctionSupplier(Signature signature, Class<T> type) { super(signature, getDescription(requireNonNull(type, "type is null"))); + this.canIgnoreNulls = ValueWindowFunction.class.isAssignableFrom(type); try { if (signature.getArgumentTypes().isEmpty()) { constructor = type.getConstructor(); } + else if (canIgnoreNulls) { + constructor = type.getConstructor(List.class, boolean.class); + } else { constructor = type.getConstructor(List.class); }
[ReflectionWindowFunctionSupplier->[newWindowFunction->[RuntimeException,newInstance,isEmpty],getDescription->[getAnnotation,value],RuntimeException,Signature,getConstructor,getDescription,getTypeSignature,requireNonNull,isEmpty,transform]]
Produces a supplier for a window function. Description - Get the description of the element.
This will break any existing plugins that implement `ValueWindowFunction`. Instead, if the type extends `ValueWindowFunction` (as you already check), let's check if the new constructor form exists, otherwise fallback to the old one.
@@ -1507,7 +1507,8 @@ obj_local_rw_internal(crt_rpc_t *rpc, struct obj_io_context *ioc, rc = dss_sleep(3100); } } else if (orw->orw_sgls.ca_arrays != NULL) { - rc = bio_iod_copy(biod, orw->orw_sgls.ca_arrays, orw->orw_nr); + rc = bio_iod_copy(biod , ioc->ioc_zc_fetch, + orw->orw_sgls.ca_arrays, orw->orw_nr); } if (rc) {
[No CFG could be retrieved]
END of DSS get the next object in the list of objects.
(style) space prohibited before that ',' (ctx:WxW)
@@ -18,8 +18,15 @@ from mypy.nodes import COVARIANT, CONTRAVARIANT, ArgKind from mypy.argmap import ArgTypeExpander from mypy.typestate import TypeState -SUBTYPE_OF: Final = 0 -SUPERTYPE_OF: Final = 1 + +@enum.unique +class SubType(enum.IntEnum): + SUBTYPE_OF = 0 + SUPERTYPE_OF = 1 + + +SUBTYPE_OF: Final = SubType.SUBTYPE_OF +SUPERTYPE_OF: Final = SubType.SUPERTYPE_OF class Constraint:
[ConstraintBuilderVisitor->[visit_callable_type->[infer_constraints],visit_typeddict_type->[infer_constraints],infer_against_overloaded->[infer_constraints],visit_instance->[infer_constraints],visit_tuple_type->[infer_constraints],visit_type_type->[infer_constraints],visit_overloaded->[infer_constraints],infer_constraints_from_protocol_members->[infer_constraints],infer_against_any->[infer_constraints]],infer_constraints_if_possible->[infer_constraints],_infer_constraints->[Constraint,infer_constraints],any_constraints->[any_constraints]]
A class to infer constraints for a given type variable. Infer type variable constraints for a callable and actual arguments.
If I recall correctly, mypyc currently only supports enum.Enum
@@ -112,7 +112,7 @@ namespace System.Xml.Linq /// <summary> /// Gets the XML declaration for this document. /// </summary> - public XDeclaration Declaration + public XDeclaration? Declaration { get { return _declaration; } set { _declaration = value; }
[XDocument->[ValidateString->[IsWhitespace],Save->[Save]]]
initializes a new instance of the XDocument class from an existing XDocument object. - A property that returns the node type for this node.
constructors taking declaration should take nullable declaration
@@ -1079,7 +1079,7 @@ void ByteCodeGenerator::DefineCachedFunctions(FuncInfo *funcInfoParent) auto fillEntries = [&](ParseNode *pnodeFnc) { Symbol *sym = pnodeFnc->AsParseNodeFnc()->GetFuncSymbol(); - if (sym != nullptr && (pnodeFnc->AsParseNodeFnc()->IsDeclaration())) + if (sym != nullptr && (pnodeFnc->AsParseNodeFnc()->IsDeclaration() || pnodeFnc->AsParseNodeFnc()->IsDefaultModuleExport())) { AssertMsg(!pnodeFnc->AsParseNodeFnc()->IsGenerator(), "Generator functions are not supported by InitCachedFuncs but since they always escape they should disable function caching"); Js::FuncInfoEntry *entry = &info->elements[slotCount];
[No CFG could be retrieved]
private int frameDisplayLoc = 0 ; EndEmitBlock - Ends emit block if it has no scope.
Why is the default module export allowed here now?
@@ -3225,7 +3225,9 @@ int dt_exif_xmp_attach(const int imgid, const char *filename) // last but not least attach what we have in DB to the XMP. in theory that should be // the same as what we just copied over from the sidecar file, but you never know ... - dt_exif_xmp_read_data(xmpData, imgid); + // the previous comment is not ok. First for exportation tags (and metadata) can be different + // second we could save time not reading the xmp file, just using the database data + dt_exif_xmp_read_data_export(xmpData, imgid); img->writeMetadata(); return 0;
[No CFG could be retrieved]
dt_image_path_append_version - appends version to input file read the neccesary sidecar file from disk.
Not visible in the diff here, can you tell me what is happening for dt_exif_xmp_read_data(). The header is removed, is the body still needed?
@@ -500,3 +500,10 @@ class OrderEvent(models.Model): def __repr__(self): return f"{self.__class__.__name__}(type={self.type!r}, user={self.user!r})" + + +class Invoice(models.Model): + order = models.ForeignKey(Order, null=True, on_delete=models.SET_NULL) + number = models.CharField(max_length=64) + created = models.DateTimeField(auto_now_add=True) + url = models.URLField(max_length=256)
[OrderLine->[is_digital->[is_digital]],Order->[can_capture->[can_capture,get_last_payment],get_payment_status_display->[get_last_payment],can_void->[can_void,get_last_payment],total_authorized->[get_last_payment],total_captured->[get_last_payment],can_refund->[can_refund,get_last_payment],get_payment_status->[get_last_payment]]]
Return a string representation of the object.
Are you sure that 256 is enough in any cases?
@@ -92,7 +92,7 @@ public class CompressedPools } ); - private static final NonBlockingPool<ByteBuffer> littleEndByteBufPool = new StupidPool<ByteBuffer>( + private static final NonBlockingPool<ByteBuffer> LITTLE_END_BYTE_BUF_POOL = new StupidPool<ByteBuffer>( "littleEndByteBufPool", new Supplier<ByteBuffer>() {
[CompressedPools->[getBufferRecycler->[take],getByteBuf->[take],get->[info,BufferRecycler,order,incrementAndGet],getOutputBytes->[take],AtomicLong,Logger]]
Get the output bytes from the pool.
Please call `BIG_ENDIAN_BYTE_BUF_POOL`.
@@ -90,7 +90,9 @@ def main(): print('# Epoch {} #'.format(epoch)) train(model, device, train_loader, optimizer) test(model, device, test_loader) + pruner.export_model('model.pth', 'mask.pth', 'model.onnx', [1, 1, 28, 28]) if __name__ == '__main__': main() + \ No newline at end of file
[train->[train,step,to,nll_loss,print,backward,len,format,item,enumerate,model,zero_grad],main->[manual_seed,train,Normalize,Compose,Mnist,parameters,device,print,pruner,AGP_Pruner,update_epoch,SGD,format,test,ToTensor,range,DataLoader,MNIST],Mnist->[forward->[conv1,fc2,fc1,log_softmax,max_pool2d,conv2,view,relu],__init__->[Linear,super,Conv2d]],test->[to,nll_loss,print,len,eval,argmax,no_grad,eq,format,model],main]
This is the main function of the algorithm.
What's this? Maybe some incorrect `CRLF` like stuff?
@@ -128,9 +128,9 @@ func NewResourceMetadataEnricher( cfg, _ := common.NewConfigFrom(&metaConfig) - metaGen := metadata.NewResourceMetadataGenerator(cfg) + metaGen := metadata.NewResourceMetadataGenerator(cfg, watcher.Client()) podMetaGen := metadata.NewPodMetadataGenerator(cfg, nil, watcher.Client(), nil, nil) - serviceMetaGen := metadata.NewServiceMetadataGenerator(cfg, nil, nil) + serviceMetaGen := metadata.NewServiceMetadataGenerator(cfg, nil, nil, watcher.Client()) enricher := buildMetadataEnricher(watcher, // update func(m map[string]common.MapStr, r kubernetes.Resource) {
[Start->[Start,Lock,Unlock,Warn],Stop->[Stop,Lock,Unlock],Enrich->[DeepUpdate,RLock,Clone,RUnlock,index],Value,Unlock,NewConfigFrom,GroupVersionKind,GetObjectMeta,NewServiceMetadataGenerator,ParseQuantity,Info,Set,GetKubernetesClient,NewPodMetadataGenerator,AddEventHandler,NewResourceMetadataGenerator,GetName,GetValue,Lock,MilliValue,Accessor,GetNamespace,Debugf,Join,IsInCluster,NewLogger,NewWatcher,Module,Err,UnpackConfig,GetObjectKind,Client,DiscoverKubernetesNode,Generate,String]
NewResourceMetadataEnricher creates a new enricher for the given metricset and resource if node has memory.
It doesn't seem good to have to add a client as a dependency of all these metadata generators that so far didn't need it, and already have many options. But I don't have a much better idea. Something that could be explored would be to have something like a single cluster metadata object, that is the only one querying this metadata, and then use it as a `FieldOption` in calls to `Generate`. This would have the benefit of not continuing to add dependencies and responsibilities to all generators, but it would add a parameter to basically all the calls to `Generate`, so not sure if it would be better.
@@ -7388,7 +7388,7 @@ void ModuleProtocol(EvalContext *ctx, char *command, const char *line, int print else if (StringMatchFullWithPrecompiledRegex(context_name_rx, content)) { Log(LOG_LEVEL_VERBOSE, "Module changed variable context from '%s' to '%s'", context, content); - strcpy(context, content); + strlcpy(context, content, context_size); } else {
[No CFG could be retrieved]
This function is called when a module protocol is given. Parse extended module protocol line.
'filename' is based on a parameter 'command', how do you know it's less than 50 chars? Perhaps you did not see that one..? For the second strlcpy() of context in this function it looks to be safe to make sure it's larger than 50 chars and use strcpy(), but what if someone changes the function in the future? Is it really worth risking? I think we should keep strlcpy() here, why are you so insistent on strcpy()? I added a ProgrammingError() in case < 51 char buffer is passed, though.
@@ -556,5 +556,6 @@ func (mt modeTest) QuotaReclamationMinHeadAge() time.Duration { } func (mt modeTest) DoLogObfuscation() bool { - return false + e := os.Getenv("KEYBASE_TEST_OBFUSCATE_LOGS") + return e != "" && e != "0" && strings.ToLower(e) != "false" }
[Sprintf]
DoLogObfuscation returns true if the mode is a test mode and false otherwise.
can we also check for "0" and "FALSE"?
@@ -557,7 +557,7 @@ public class SCMTrigger extends Trigger<Item> { if (actions == null) { additionalActions = new Action[0]; } else { - additionalActions = actions; + additionalActions = Arrays.copyOf(actions, actions.length); } }
[SCMTrigger->[getDescriptor->[getDescriptor],SCMTriggerCause->[onAddedTo->[BuildAction,getPollingLogFile]],SCMAction->[getLog->[getLogFile],getDisplayName->[getDisplayName,getDescriptor]],BuildAction->[getPollingLogText->[getPollingLogFile]],run->[run],Runner->[hashCode->[hashCode],runPolling->[getLogFile],run->[runPolling,getLogFile],getLogFile->[getLogFile]],DescriptorImpl->[isPollingThreadCountOptionVisible->[getPollingThreadCount],configure->[setPollingThreadCount],clogCheck->[isClogged],resizeThreadPool->[threadFactory],getItemsBeingPolled->[getRunners],threadFactory]]]
Provides a runner that will run in a background thread. poll job for changes and log them.
This technically causes an issue where if a modification to `actions` occurs outside of the ctor this class would no longer see that effect. But i don't think we support that case
@@ -88,7 +88,7 @@ function pt ($db, $sql, $date) print '<td class="nowrap">'.$obj->dm."</td>\n"; $total = $total + $obj->mm; - print '<td class="nowrap" align="right">'.price($obj->mm)."</td><td >&nbsp;</td>\n"; + print '<td class="nowrap" align="right">'.price(price2num($obj->mm,1))."</td><td >&nbsp;</td>\n"; print "</tr>\n"; $i++;
[pt->[free,query,trans,num_rows,fetch_object],executeHooks,trans,initHooks,close,idate,load]
prints a table of free free node ranks.
We should not make any rounding here. But here there is no reason to make rounding, rounding must be done before inserting data into table (rounding must be 'MT' if a unit price, it must be 'MU' before providing data to table).
@@ -94,7 +94,7 @@ test_run(d_rank_t my_rank) if (my_rank == 0) { rc = crt_group_config_remove(NULL); D_ASSERTF(rc == 0, - "crt_group_config_remove() failed. rc: %d\n", rc); + "crt_group_config_remove() failed. rc: %d\n", rc); } rc = crt_finalize();
[test_run->[pthread_join,d_fault_attr_lookup,D_DEBUG,DBG_PRINT,D_ASSERTF,pthread_create,sleep,tc_srv_start_basic,fprintf,d_log_fini,crt_group_config_remove,crt_finalize,crt_group_config_save,sem_init,sem_destroy,crt_proto_register,crt_context_create],main->[test_run,DBG_PRINT,getenv,atoi,test_parse_args,tc_test_init,fprintf]]
run the n - node protocol This function is called from the server side of the test. It is called from the server.
(style) line over 80 characters
@@ -41,12 +41,13 @@ def gc( used = NamedCache() for repo in all_repos + [self]: + has_scm = not isinstance(repo.scm, NoSCM) used.update( repo.used_cache( - all_branches=all_branches, + all_branches=not all_branches and has_scm, with_deps=with_deps, - all_tags=all_tags, - all_commits=all_commits, + all_tags=not all_tags and has_scm, + all_commits=not all_commits and has_scm, remote=remote, force=force, jobs=jobs,
[gc->[get_remote,update,Repo,_do_gc,NamedCache,enter_context,used_cache,ExitStack],_do_gc->[info,func,format],getLogger]
Garbage collect all of the objects in this cache.
I decided not to change the arguments name, as it makes sense to be more aggressive by setting the arguments here. and looks to be good name. The other idea I had was `scm_exclude=None` where we could pass *Collections* (set/tuple/list/set/dict, etc).
@@ -294,9 +294,12 @@ func TestMasterConfig(t *testing.T) { {Provider: runtime.EmbeddedObject{Object: &internal.OpenIDIdentityProvider{}}}, }, SessionConfig: &internal.SessionConfig{}, + Templates: &internal.OAuthTemplates{}, + }, + AssetConfig: &internal.AssetConfig{ + Extensions: []internal.AssetExtensionsConfig{{}}, }, - AssetConfig: &internal.AssetConfig{}, - DNSConfig: &internal.DNSConfig{}, + DNSConfig: &internal.DNSConfig{}, } serializedConfig, err := writeYAML(config) if err != nil {
[JSONToYAML,Fatal,Errorf,StringDiff,Encode]
TestMasterConfig tests that the serialized master config is the same as the serialized one. Get the node id from the node list.
I haven't seen this before. Reviewed in some other pull?
@@ -1612,6 +1612,10 @@ func (i *Ingester) compactionLoop(ctx context.Context) error { ticker := time.NewTicker(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval) defer ticker.Stop() + // Apply positive jitter only to ensure that the minimum timeout is adhered to. + i.TSDBState.compactionIdleTimeout = util.DurationWithPositiveJitter(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout, compactionIdleTimeoutJitter) + level.Info(i.logger).Log("msg", "TSDB idle compaction timeout set", "timeout", i.TSDBState.compactionIdleTimeout) + for ctx.Err() == nil { select { case <-ticker.C:
[v2LabelValues->[Close,Querier],Head->[Head],Blocks->[Blocks],Close->[Close],getMemorySeriesMetric->[Head],createTSDB->[Compact,Head,updateCachedShippedBlocks,setLastUpdate],PreCreation->[Head],v2LifecyclerFlush->[shipBlocks,compactBlocks],closeAllTSDB->[Close],v2QueryStream->[Close,Querier],shipBlocks->[casState,getTSDBUsers,updateCachedShippedBlocks,getTSDB],getOldestUnshippedBlockTime->[getCachedShippedBlocks,Blocks],closeAndDeleteIdleUserTSDBs->[getTSDBUsers],StartTime->[StartTime],getOldestUnshippedBlockMetric->[getOldestUnshippedBlockTime],v2LabelNames->[Close,Querier],Appender->[Appender],shouldCloseTSDB->[Head,isIdle,getOldestUnshippedBlockTime],openExistingTSDB->[Close,createTSDB],Querier->[Querier],closeAndDeleteUserTSDBIfIdle->[casState,shouldClose,getTSDB,shouldCloseTSDB,Close],v2Query->[Close,Querier],v2Push->[setLastUpdate,Appender],getOrCreateTSDB->[getTSDB],compactBlocks->[getTSDB,Compact,Head,compactHead,getTSDBUsers,isIdle],compactHead->[casState,Head],Compact->[Compact],v2MetricsForLabelMatchers->[Close,Querier],Head,StartTime]
compactionLoop is the main loop that runs in a goroutine. It runs in a loop.
Up for discussion here: Should we change the timeout e.g. each time the timeout is hit? This would mean that if each ingester picked similar timeouts, then we we would diverge next time around. The tricky bit here, is that we would need a timeout per-user, as each user can hit the timeout at different times (depending on `lastUpdateTime`).
@@ -231,8 +231,11 @@ public class XceiverClientGrpc extends XceiverClientSpi { try { return sendCommandWithTraceIDAndRetry(request, null). getResponse().get(); - } catch (ExecutionException | InterruptedException e) { + } catch (ExecutionException e) { throw new IOException("Failed to execute command " + request, e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; } }
[XceiverClientGrpc->[reconnect->[connectToDatanode,isConnected],sendCommandAsync->[sendCommandAsync,onCompleted,onNext],checkOpen->[isConnected],isConnected->[isConnected]]]
Sends a command to the remote node that returns a .
Can we log an error here? Same apply to other places?
@@ -1871,7 +1871,10 @@ void Game::processKeyInput() } else if (wasKeyDown(KeyType::CMD)) { openConsole(0.2, L"/"); } else if (wasKeyDown(KeyType::CMD_LOCAL)) { - openConsole(0.2, L"."); + if (client->moddingEnabled()) + openConsole(0.2, L"."); + else + m_game_ui->showStatusText(wgettext("CSM Disabled")); } else if (wasKeyDown(KeyType::CONSOLE)) { openConsole(core::clamp(g_settings->getFloat("console_height"), 0.1f, 1.0f)); } else if (wasKeyDown(KeyType::FREEMOVE)) {
[No CFG could be retrieved]
This function processes key input and updates runData. jump_timer if necessary. Private functions - Check if the user has pressed the key in the flags and if so update.
possibly `CSM is disabled`, unlike other hotkeys, one can not simply switch it by pressing it.
@@ -29,6 +29,7 @@ $platforms = array( 'win95' => 'Windows 95', 'windows phone' => 'Windows Phone', 'windows' => 'Unknown Windows OS', + 'CrOS' => 'Chrome OS', 'android' => 'Android', 'blackberry' => 'BlackBerry', 'iphone' => 'iOS',
[No CFG could be retrieved]
This function defines the user agent class to help identify the browser platform robot and mobile device. Returns the name of the sub - type of the given browser type.
You're using spaces for indentation here ... we use tabs.
@@ -1014,7 +1014,7 @@ class PoolingTest(test.TestCase): output_sizes, x_init_value=x_init_value, delta=1e-2) - tf_logging.info("%s gradient error = " % func_name, err) + tf_logging.info("%s gradient error = %.4f" % (func_name, err)) self.assertLess(err, err_tolerance) def _ConstructAndTestSecondGradient(self,
[GetMaxPoolFwdTest->[Test->[_CompareMaxPoolingFwd]],GetMaxPoolGradTest->[Test->[_CompareMaxPoolingBk]],GetMaxPoolGradGradTest->[Test->[_CompareMaxPoolingGradBk]],PoolingTest->[_testMaxPoolSamePadding->[_VerifyValues],testMaxPoolGradDirect->[_testMaxPoolGradDirectWithNans2_2,_testMaxPoolGradDirect1_2,_testMaxPoolGradDirect1_1,_testMaxPoolGradDirect1_3,_testMaxPoolGradDirectWithNans2_1],_testAvgPoolGradValidPadding1_1->[_ConstructAndTestGradient],testKernelSmallerThanStrideSame->[_VerifyValues],_testMaxPoolSamePaddingPacket4->[_VerifyValues],_testMaxPoolGradDirect->[_MaxPoolGrad],_testMaxPoolGradGradSamePadding3_1->[_ConstructAndTestSecondGradient],testMaxPoolGrad->[_testMaxPoolGradValidPadding2_2,_testMaxPoolGradValidPadding1_2,_testMaxPoolGradValidPadding2_1_7,_testMaxPoolGradSamePadding2_2,_testMaxPoolGradValidPadding2_1_6,_testMaxPoolGradValidPadding1_1,GetTestConfigs,_testMaxPoolGradSamePadding2_1,_testMaxPoolGradSamePadding3_1,_testMaxPoolGradSamePadding1_1,_testMaxPoolGradSamePadding1_2],_testMaxPoolValidPaddingUnevenStride->[_VerifyValues],_testMaxPoolGradValidPadding2_1_6->[_ConstructAndTestGradient],testAvgPooling->[_testAvgPoolValidPadding,_testAvgPoolSamePadding4,_testAvgPoolValidPaddingUnevenStride,_testAvgPoolSamePaddingNonSquareWindowMultiBatch,_testAvgPoolEmptyInput,_testAvgPoolSamePaddingNonSquareWindow,_testAvgPoolSamePadding,_testAvgPoolSamePaddingPacket8,_testAvgPoolSamePaddingPacket4],testMaxPoolGradGrad->[_testMaxPoolGradGradValidPadding2_1_6,_testMaxPoolGradGradSamePadding3_1,_testMaxPoolGradGradValidPadding2_1_7,GetTestConfigs,_testMaxPoolGradGradSamePadding1_1,_testMaxPoolGradGradSamePadding2_2,_testMaxPoolGradGradValidPadding2_2,_testMaxPoolGradGradSamePadding2_1,_testMaxPoolGradGradValidPadding1_1],_testMaxPoolGradGradValidPadding2_2->[_ConstructAndTestSecondGradient],_testMaxPoolSamePaddingNonSquareWindow->[_VerifyValues],_testAvgPoolSamePaddingPacket8->[_VerifyValues],_testAvgPoolSamePaddingPacket4->[_VerifyValues],_testMaxPoolGradGradValidPadding1_1->[_ConstructAndTestSecondGradient],_testAvgPoolGradSamePadding2_2->[_ConstructAndTestGradient],_testMaxPoolGradValidPadding2_2->[_ConstructAndTestGradient],_testAvgPoolGradValidPadding2_2->[_ConstructAndTestGradient],_VerifyOneTest->[_VerifyOneType],testKernelSmallerThanStrideValid->[_VerifyValues],_VerifyValues->[GetTestConfigs,_VerifyOneTest],_testAvgPoolSamePadding4->[_VerifyValues],_testAvgPoolGradValidPadding2_1->[_ConstructAndTestGradient],_testAvgPoolGradSamePadding3_1->[_ConstructAndTestGradient],_testAvgPoolSamePaddingNonSquareWindowMultiBatch->[_VerifyValues],testMaxPoolingGradWithArgmax->[GetDeviceScope],_testMaxPoolGradGradValidPadding2_1_7->[_ConstructAndTestSecondGradient],testMaxPoolingWithArgmax->[GetDeviceScope],_testMaxPoolGradValidPadding1_1->[_ConstructAndTestGradient],_testMaxPoolGradGradSamePadding1_1->[_ConstructAndTestSecondGradient],_testMaxPoolGradSamePadding2_1->[_ConstructAndTestGradient],_testMaxPoolGradSamePadding3_1->[_ConstructAndTestGradient],_testMaxPoolGradGradSamePadding2_1->[_ConstructAndTestSecondGradient],_testMaxPoolSamePaddingPacket8->[_VerifyValues],testAvgPoolGrad->[GetTestConfigs],testDepthwiseMaxPool2x2DepthWindow3->[_VerifyValues],_testAvgPoolValidPadding->[_VerifyValues],testMaxPoolingGradGradWithArgmax->[GetDeviceScope],testMaxPooling->[_testMaxPoolValidPadding,_testMaxPoolSamePaddingPacket8,_testMaxPoolSamePadding,_testMaxPoolSamePaddingPacket4,_testMaxPoolValidPaddingUnevenStride,_testMaxPoolEmptyInput,_testMaxPoolSamePaddingNonSquareWindow],_testMaxPoolGradValidPadding1_2->[_ConstructAndTestGradient],_testAvgPoolValidPaddingUnevenStride->[_VerifyValues],_testAvgPoolEmptyInput->[_VerifyValues],_testMaxPoolGradDirectWithNans2_2->[_testMaxPoolGradDirect],_testMaxPoolEmptyInput->[_VerifyValues],_testAvgPoolGradSamePadding2_1->[_ConstructAndTestGradient],_testMaxPoolGradSamePadding1_1->[_ConstructAndTestGradient],_testMaxPoolGradSamePadding1_2->[_ConstructAndTestGradient],_testAvgPoolGradValidPadding1_2->[_ConstructAndTestGradient],_testAvgPoolGradSamePadding1_2->[_ConstructAndTestGradient],_testMaxPoolValidPadding->[_VerifyValues],testDepthwiseMaxPool1x1DepthWindow1->[_VerifyValues],testDepthwiseMaxPoolInvalidConfigs->[_testDepthwiseMaxPoolInvalidConfig],_testMaxPoolGradGradValidPadding2_1_6->[_ConstructAndTestSecondGradient],_testMaxPoolGradValidPadding2_1_7->[_ConstructAndTestGradient],_testMaxPoolGradSamePadding2_2->[_ConstructAndTestGradient],_testMaxPoolGradDirect1_2->[_testMaxPoolGradDirect],_testAvgPoolGradSamePadding1_1->[_ConstructAndTestGradient],_testMaxPoolGradDirect1_1->[_testMaxPoolGradDirect],_testMaxPoolGradGradSamePadding2_2->[_ConstructAndTestSecondGradient],_testAvgPoolSamePaddingNonSquareWindow->[_VerifyValues],_testMaxPoolGradDirect1_3->[_testMaxPoolGradDirect],_testAvgPoolSamePadding->[_VerifyValues],_testMaxPoolGradDirectWithNans2_1->[_testMaxPoolGradDirect]],GetMaxPoolGradTest,GetMaxPoolGradGradTest,GetMaxPoolFwdTest,GetShrunkInceptionMaxPoolShapes]
Constructs a new tensor and tests the gradient of the average pooling function. Creates a function that checks the second - order gradients of the pooling function. Compute the second - order pool.
@deven-amd please check if these logging changes are truly required in this PR
@@ -308,6 +308,11 @@ func newServerRunner(ctx context.Context, args serverRunnerParams) (*serverRunne if cfg.DataStreams.Enabled && args.KibanaConfig != nil { cfg.Kibana.ClientConfig = *args.KibanaConfig } + if args.Beat.Manager != nil && args.Beat.Manager.Enabled() { + // If we're running in managed mode we do not want to + // communicate with kibana. + cfg.Kibana.Enabled = false + } runServerContext, cancel := context.WithCancel(ctx) return &serverRunner{
[Create->[MustNewConfigFrom,NewIntegrationConfig,Merge],Stop->[Unlock,Infof,Seconds,Do,stopServer,Lock,Wait],start->[Unlock,Stop,NewRunnerList,AfterFunc,Close,Start,Enabled,Lock,MustRegisterList,Wait],Wait->[Wait],CheckConfig->[NewIntegrationConfig],ProcessBatch->[Value,reporter],Start->[Done,Add,run],registerPipelineCallback->[NewConnectedClient,New,IsEnabled,RegisterConnectCallback,ShouldOverwrite,RegisterPipelines,Info],run->[wrapRunServer,Stop,WithACKer,Enabled,NewPublisher,Open,NewDiscardUnsampledBatchProcessor,wrapRunServerWithPreprocessors],Run->[start,Background,WithCancel,Wait],Warn,NewConfigFrom,SetChild,Enabled,NewStore,NewConfig,Tracer,SetBlockProfileRate,Listener,Chained,New,Go,IsEnabled,Errorf,NewClient,MustCompile,Wait,serve,Named,Unpack,NewLogger,Name,WithContext,registerPipelineCallback,Config,ReplaceAll,Active,WithCancel,String,SetMutexProfileFraction,SetString,Load]
Creates a new serverRunner object. Stop starts the server.
For some reason I can't find the code again, but our default unmarshal behavior for bool type is to set it to true if it's not found. Even if it is set, apm-server doesn't communicate with kibana if running in managed mode.
@@ -194,6 +194,10 @@ void ParallelFillCommunicator::PrintData(std::ostream& rOStream) const void ParallelFillCommunicator::ComputeCommunicationPlan(ModelPart& rModelPart) { + KRATOS_TRY; + + KRATOS_ERROR_IF_NOT(rModelPart.HasNodalSolutionStepVariable(PARTITION_INDEX)) << "\"PARTITION_INDEX\" missing as solution step variable in ModelPart \"" << rModelPart.Name() << "\"!" << std::endl; + constexpr unsigned root_id = 0; Communicator::Pointer pnew_comm = Kratos::make_shared< MPICommunicator >(&rModelPart.GetNodalSolutionStepVariablesList(), DataCommunicator::GetDefault());
[No CFG could be retrieved]
Find all ghost nodes on this process and mark the corresponding neighbour process for communication. - - - - - - - - - - - - - - - - - -.
Note that DataCommunicator should have an OrAll option for bool (and, if it is not there yet, I probably should add it)
@@ -50,7 +50,8 @@ public class TestParquetTimestampUtils { Timestamp timestamp = Timestamp.valueOf(timestampString); Binary timestampBytes = getNanoTime(timestamp, false).toBinary(); - long decodedTimestampMillis = getTimestampMillis(timestampBytes); - assertEquals(decodedTimestampMillis, timestamp.toEpochMilli()); + DecodedTimestamp decodedTimestamp = decode(timestampBytes); + assertEquals(decodedTimestamp.getEpochSeconds() * MILLISECONDS_PER_SECOND + decodedTimestamp.getNanosOfSecond() / NANOSECONDS_PER_MILLISECOND, timestamp.toEpochMilli()); + assertEquals(decodedTimestamp.getNanosOfSecond(), timestamp.getNanos()); } }
[TestParquetTimestampUtils->[testGetTimestampMillis->[assertTimestampCorrect],assertTimestampCorrect->[getTimestampMillis,toEpochMilli,toBinary,assertEquals,valueOf],testInvalidBinaryLength->[getMessage,getTimestampMillis,toErrorCode,fromByteArray,assertEquals,getErrorCode]]]
Checks if the given timestamp string is correct.
convert `timestampString` to LDTime so that the assertion does not need multiplcation and division
@@ -83,7 +83,7 @@ abstract class AbstractEpollChannel extends AbstractChannel implements UnixChann } @Override - public final FileDescriptor fd() { + public final Socket fd() { return fileDescriptor; }
[AbstractEpollChannel->[modifyEvents->[isOpen],newDirectBuffer->[newDirectBuffer],doDisconnect->[doClose],doBeginRead->[setFlag],isOpen->[isOpen],doWriteBytes->[setFlag],AbstractEpollUnsafe->[epollOutReady->[flush0],shutdownInput->[isOpen,shutdown],checkResetEpollIn->[run->[epollInReady],isInputShutdown0],recvBufAllocHandle->[recvBufAllocHandle],epollRdHupReady->[isActive,epollInReady],flush0->[isFlagSet,flush0],clearEpollIn0->[clearFlag],clearEpollRdHup->[clearFlag]]]]
Returns the file descriptor of the file being polled.
I'm still not sure about this change...
@@ -782,6 +782,12 @@ public class KsqlConfig extends AbstractConfig { KSQL_ACTIVE_PERSISTENT_QUERY_LIMIT_DEFAULT, Importance.MEDIUM, KSQL_ACTIVE_PERSISTENT_QUERY_LIMIT_DOC + ).define( + KSQL_FOREIGN_KEY_JOINS_ENABLED, + Type.BOOLEAN, + false, + Importance.MEDIUM, + "Feature flag for foreign key joins, currently under development." ).define( KSQL_SHUTDOWN_TIMEOUT_MS_CONFIG, Type.LONG,
[KsqlConfig->[ConfigValue->[isResolved->[isResolved]],buildConfigDef->[defineLegacy,defineCurrent],getKsqlStreamConfigProps->[getKsqlStreamConfigProps],buildStreamingConfig->[applyStreamsConfig],streamTopicConfigNames->[getName],getAllConfigPropsWithSecretsObfuscated->[getKsqlConfigPropsWithSecretsObfuscated],overrideBreakingConfigsWithOriginalValues->[KsqlConfig],empty->[KsqlConfig],cloneWithPropertyOverwrite->[KsqlConfig,getKsqlStreamConfigProps,buildStreamingConfig],getKsqlStreamConfigPropsWithSecretsObfuscated->[convertToObfuscatedString],resolveStreamsConfig->[ConfigValue,empty],CompatibilityBreakingConfigDef->[defineCurrent->[define],defineLegacy->[define],define->[define]],configDef,buildStreamingConfig]]
Builds a config definition for the given generation. Protected members of the configuration object are not defined in the schema registry. Enable the security manager for UDFs. Default is true and will stop UDFs from executing region MetricsConfig methods This method defines all configuration options that are not specified in the schema.
Not sure if this should be `true` for now? And we only disable by default is we don't make it?
@@ -33,7 +33,7 @@ public final class RestTemplateInterceptor implements ClientHttpRequestIntercept private final Tracer tracer; - public RestTemplateInterceptor(Tracer tracer) { + public RestTemplateInterceptor(final Tracer tracer) { this.tracer = tracer; }
[RestTemplateInterceptor->[intercept->[withSpan,inject,onRequest,onResponse,current,end,getOrCreateSpan,execute]]]
Intercepts a request with a .
Think we're not supposed to have this per #921 (though we can delete at the same time so no worries)
@@ -68,7 +68,7 @@ module AlaveteliExternalCommand search_path = AlaveteliConfiguration::utility_search_path search_path.each do |d| program_path = File.join(d, program_name) - return program_name if File.file? program_path and File.executable? program_path + return program_path if File.file? program_path and File.executable? program_path end raise "Could not find #{program_name} in any of #{search_path.join(', ')}" end
[find_program->[file?,executable?,raise,join,each,utility_search_path],run->[status,new,empty?,is_a?,print,has_key?,err,find_program,message,out,run,join,exited,timed_out,last,puts],require]
Finds the program in the system if it exists.
Use && instead of and.<br>Line is too long. [90/80]
@@ -94,4 +94,12 @@ public class SalesforceConnector extends RestApiConnector { public String getFullUri(String resourcePath) { return StringUtils.removeEnd(getServiceBaseUrl(), "/") + StringUtils.removeEnd(resourcePath, "/"); } + + public String getAccessToken() { + return accessToken; + } + + public String getInstanceUrl() { + return instanceUrl; + } }
[SalesforceConnector->[getFullUri->[getServiceBaseUrl]]]
Get full uri.
The following two can be package-level access.
@@ -10,13 +10,17 @@ setuptools.setup( python_requires = '>=3.5', install_requires = [ + 'azureml', + 'azureml-sdk', 'requests', 'ruamel.yaml', 'psutil', 'astor', 'schema', 'PythonWebHDFS', - 'colorama' + 'colorama', + 'netifaces', + 'websockets' ], author = 'Microsoft NNI Team',
[find_packages,setup]
Creates a new object.
remove this azureml requirements too.
@@ -30,8 +30,11 @@ import {parseJson} from '../../../src/json'; import {setStyles} from '../../../src/style'; import {triggerAnalyticsEvent} from '../../../src/analytics'; -/** @const */ -const MIN_INTERVAL = 4; +/** @const {number} */ +const FIRST_AD_MIN = 7; + +/** @const {number} */ +const MIN_INTERVAL = 8; /** @const */ const TAG = 'amp-story-auto-ads';
[No CFG could be retrieved]
Creates an element with a specific unique identifier. The i - AMPHTML - specific class for the node.
does this mean the 8th page can be an ad?
@@ -18,6 +18,9 @@ from superdesk.errors import SuperdeskApiError logger = logging.getLogger(__name__) +subscriber_types = ['broadcast', 'digital', 'wire'] +SUBSCRIBER_TYPES = namedtuple('SUBSCRIBER_TYPES', ['BROADCAST', 'DIGITAL', 'WIRE'])(*subscriber_types) + class SubscribersResource(Resource): schema = {
[SubscribersService->[on_update->[_validate_seq_num_settings],generate_sequence_number->[update_key,format,get,set_key],on_create->[_validate_seq_num_settings],_validate_seq_num_settings->[badRequestError,get]],SubscribersResource->[rel],getLogger]
Create a new object that represents a single unique unique identifier in the system. The base schema for all the relations of a single node.
Just to be sure that I understand this WIRE - publish to wire clients DIGITAL - publish to digital clients, used mostly for takes/master files BROADCAST - same as digital but in the case of takes will receive the take stories beside the take package, the only broadcast subscriber we know of currently, is PublicAPI Are these assumptions correct?
@@ -448,13 +448,13 @@ public class Route implements Serializable, Iterable<Territory> { private Tuple<ResourceCollection, Boolean> getFuelCostsAndIfChargedFlatFuelCost( final Unit unit, final GameData data, final boolean ignoreFlat) { final ResourceCollection resources = new ResourceCollection(data); - boolean chargedFlatFuelCost = false; if (Matches.unitIsBeingTransported().test(unit)) { - return Tuple.of(resources, chargedFlatFuelCost); + return Tuple.of(resources, false); } final UnitAttachment ua = UnitAttachment.get(unit.getType()); resources.add(ua.getFuelCost()); resources.multiply(getMovementCost(unit).setScale(0, RoundingMode.CEILING).intValue()); + boolean chargedFlatFuelCost = false; if (!ignoreFlat && Matches.unitHasNotBeenChargedFlatFuelCost().test(unit)) { resources.add(ua.getFuelFlatCost()); chargedFlatFuelCost = true;
[Route->[getMiddleSteps->[numberOfSteps],getFuelCostsAndIfChargedFlatFuelCost->[add],equals->[equals],join->[Route,add],getScrambleFuelCostCharge->[getMovementFuelCostCharge,Route,add],getFuelChanges->[add],getTerritoryBeforeEnd->[getStart,getTerritoryAtStep],getSteps->[numberOfSteps],anyMatch->[anyMatch],hasNoSteps->[hasSteps],findMovementCost->[getMovementCost,add],getMatches->[getMatches],hasWater->[anyMatch],getFuelCostsAndUnitsChargedFlatFuelCost->[getStart,getMatches,add],isUnload->[hasSteps],isLoad->[hasSteps],hasNeutralBeforeEnd->[getMiddleSteps,equals],add->[add],getAllTerritories->[add],toString->[toString],iterator->[iterator]]]
Get the Fuel cost for a unit and if the unit has a charged flat f.
The more tuples I see, the worse they seem to get. No idea what kind of value this method actually returns
@@ -785,6 +785,12 @@ class CustomFormModelInfo(object): Date and time (UTC) when model training was started. :ivar ~datetime.datetime training_completed_on: Date and time (UTC) when model training completed. + :ivar display_name: Optional user defined model name (max length: 1024). + :vartype display_name: str + :ivar properties: Optional model properties. + :vartype properties: ~azure.ai.formrecognizer.CustomFormModelProperties + .. versionadded:: v2.1-preview + The *display_name* and *properties* properties """ def __init__(self, **kwargs):
[TrainingDocumentInfo->[_from_generated->[_from_generated]],FormField->[_from_generated->[get_field_value,_from_generated],_from_generated_unlabeled->[_from_generated_unlabeled]],FormWord->[_from_generated->[get_bounding_box]],FormLine->[_from_generated->[_from_generated,get_bounding_box]],FormPage->[_from_generated_receipt->[_from_generated]],CustomFormModel->[_from_generated->[_from_generated,_from_generated_unlabeled]],CustomFormSubmodel->[_from_generated_labeled->[_from_generated_labeled],_from_generated_unlabeled->[_from_generated_unlabeled]],FieldData->[_from_generated->[resolve_element,get_bounding_box],_from_generated_unlabeled->[resolve_element,get_bounding_box]],FormTableCell->[_from_generated->[resolve_element,get_bounding_box]]]
Initialize a Cluster object.
return False for is_composed if not returned
@@ -612,6 +612,12 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu } if i > 28 { log.Printf("Error while running kubectl top nodes:%s\n", err) + pods, err := pod.GetAllByPrefix("metrics-server", "kube-system") + Expect(err).NotTo(HaveOccurred()) + for _, p := range pods { + err = p.Logs() + Expect(err).NotTo(HaveOccurred()) + } log.Println(string(out)) } }
[ValidateOmsAgentLogs,ParseInput,GetAllByPrefix,Should,GetAll,Expose,New,CreateDeploymentHPA,MustCompile,RequiresDocker,ValidateResources,CreateIfNotExist,PrintCommand,CreateServiceFromFile,ScaleDeployment,RationalizeReleaseAndVersion,NotTo,Println,GetByPrefix,ReplaceContainerImageFromFile,WaitForExternalIP,DeleteNetworkPolicy,Now,HasZonesForAllAgentPools,CheckLinuxOutboundConnection,CombinedOutput,IsRBACEnabled,IsKubernetesVersionGe,Validate,HasWindowsAgents,FindString,UnixNano,CreateNetworkPolicyFromFile,Printf,ValidatePVC,Getwd,GetWindowsTestImages,ValidateCurlConnection,Pods,Sleep,GetSSHKeyPath,CreateWindowsDeploy,WaitForReplicas,ValidateAzureFile,HasLinuxAgents,HasNetworkPolicy,CreateLinuxDeployIfNotExist,To,CreatePersistentVolumeClaimsFromFile,CreateLinuxDeploy,WaitOnReady,CreateJobFromFile,Get,GetEnvironmentVariable,CheckWindowsOutboundConnection,NodeCount,GetServerName,NewSource,Sprintf,HasAddon,Intn,GetNodePort,CreatePodFromFile,Delete,RunCommandMultipleTimes,RunLinuxDeploy,ParseOutput,HasAvailabilityZones,HasNSeriesSKU,Join,Contains,ParseConfig,Remove,CreateStorageClassFromFile,Command,GetAddressByType,Version,ToNot]
98 - 15 - 15 NewSource returns a new source that captures the current time of the last run of the php.
I would argue we shouldn't return an error for logging debugging data in this "if not successful" block since that will hide the important error which is `Expect(success).To(BeTrue())` on line 624. The logs are bonus, not part of the actual test.
@@ -212,7 +212,7 @@ def main(): try: start_time = timeit.default_timer() subprocess.check_output(fixed_args + dev_arg + case_args, - stderr=subprocess.STDOUT, universal_newlines=True) + stderr=subprocess.STDOUT, universal_newlines=True, encoding='utf-8') execution_time = timeit.default_timer() - start_time except subprocess.CalledProcessError as e: print(e.output)
[main->[option_to_args->[resolve_arg],collect_result,option_to_args,temp_dir_as_path,prepare_models,parse_args],parse_args->[parse_args],main]
Entry point for the missing - nag - sequence command. Check if a missing key is found in the model_info. Exit if there is no n - ary case in the device.
FWIW, this should be unnecessary once I fix the CI environment (my MR is on review).
@@ -57,8 +57,9 @@ if ($_SESSION['userlevel'] == 11) { <label for="id" class="col-sm-2 control-label">Device:</label> <div class="col-sm-10"> <select name="id" class="form-control" id="id"> + <option>Please select</option> <?php - foreach (dbFetchRows("SELECT * FROM `devices` ORDER BY `hostname`") as $data) { + foreach (dbFetchRows("SELECT `device_id`, `hostname` FROM `devices` ORDER BY `hostname`") as $data) { echo("<option value='".$data['device_id']."'>".$data['hostname']."</option>"); }
[No CFG could be retrieved]
Displays a hidden input that selects all of the devices in the system and then displays a checkbox.
Worth adding `disabled="disabled"` here?
@@ -143,6 +143,14 @@ public abstract class AbstractX509PrincipalResolver extends PersonDirectoryPrinc } catch (final CertificateParsingException e) { LOGGER.warn("Error parsing subject alternative names to get rfc822 email [{}]", e.getMessage()); } + try { + val x509subjectUPN = X509UPNExtractor.extractUPNString(certificate); + if (x509subjectUPN != null) { + attributes.put("x509subjectUPN", CollectionUtils.wrapList(x509subjectUPN)); + } + } catch (final CertificateParsingException e) { + LOGGER.warn("Error parsing subject alternative names to get User Principal Name as an attribute [{}]", e.getMessage()); + } } return attributes; }
[AbstractX509PrincipalResolver->[retrievePersonAttributes->[retrievePersonAttributes,extractPersonAttributes]]]
Extract person attributes.
log the stack trace please, perhaps under debug or trace levels
@@ -275,6 +275,13 @@ class CMake(object): def test(self, args=None, build_dir=None, target=None, output_on_failure=False): if not self._conanfile.should_test: return + if cross_building(self._conanfile.settings): + settings = self._conanfile.settings + os_build, arch_build, _, _ = get_cross_building_settings(settings) + os_host = settings.get_safe("os") + arch_host = settings.get_safe("arch") + if (os_host and os_build and os_host != os_build) or (arch_host and arch_build and arch_host != arch_build): + return if not target: target = "RUN_TESTS" if self.is_multi_configuration else "test"
[CMake->[test->[_build],is_multi_configuration->[is_multi_configuration],install->[_build],configure->[_run,_get_dirs],_build->[_run],_get_dirs->[get_dir]]]
Test if a node is missing a configuration.
Is it possible that someone has test functionality in their CMakeLists that is able to run their test in some kind of emulator/virtualbox? Maybe able to run 32bits apps in 64bits environment (like Windows)? Just wondering if this should be explicitly disabled by users in the recipes, not internally, or at least possible to opt-in/out this behavior with an argument to the ``cmake.test()`` method.
@@ -120,4 +120,16 @@ class Result < ActiveRecord::Base end true end + + def create_marks + @assignment = self.submission.assignment + @marks_map = Hash.new + @assignment.get_criteria.each do |criterion| + mark = criterion.marks.find_or_create_by(result_id: self.id) + @marks_map[criterion.id] = mark + + mark.save(validate: false) + self.update_total_mark + end + end end
[Result->[unrelease_partial_results->[released_to_students],check_for_nil_marks->[add,t,find_by,count],get_negative_extra_points->[reduce],get_total_extra_percentage_as_points->[total_mark],unrelease_results->[released_to_students,save],get_subtotal->[reduce],mark_as_partial->[released_to_students,save,marking_state],student_marks_by_assignment->[group,pluck],get_total_extra_percentage->[reduce],get_positive_extra_points->[reduce],update_total_mark->[update_attributes,max],get_total_extra_points->[reduce],belongs_to,before_update,before_save,scope,not_eq,validates_numericality_of,arel_table,or,where,validates_presence_of,has_many,validates_inclusion_of,lambda,not,values]]
Checks if a is present in the current assignment and that it is not incomplete.
Don't use `@` here, all these variables should be local, not instance variables.
@@ -78,8 +78,12 @@ class LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha): class SignupForm(forms.ModelForm, FormWithReCaptcha): password = forms.CharField( - widget=forms.PasswordInput) + widget=forms.PasswordInput, + label=pgettext( + 'Password', 'Password')) email = forms.EmailField( + label=pgettext( + 'Email', 'Email'), error_messages={ 'unique': pgettext_lazy( 'Registration error',
[logout_on_password_change->[update_session_auth_hash],LoginForm->[__init__->[super,get],pgettext,EmailField],FormWithReCaptcha->[__new__->[super,ReCaptchaField]],PasswordResetForm->[get_users->[filter],send_mail->[delay]],ChangePasswordForm->[__init__->[super]],get_address_form->[AddressMetaForm,dict,address_form_class,get_address_form_class,country_code_for_region,format,is_valid],SignupForm->[save->[save,super,set_password],Meta->[pgettext_lazy],__init__->[fields,super],CharField,pgettext_lazy,EmailField]]
The address form and preview form are the same as the user s address form and the form The password reset form class.
Can we put this on one line, please? Same with the above's
@@ -181,8 +181,9 @@ class Address: :param generated_base_target_name: If this Address refers to a generated subtarget, this stores the target_name of the original base target. """ - self._spec_path = self.sanitize_path(spec_path) + self.validate_path(spec_path) self.check_target_name(spec_path, target_name) + self._spec_path = spec_path self._target_name = target_name self.generated_base_target_name = generated_base_target_name self._hash = hash((self._spec_path, self._target_name, self.generated_base_target_name))
[BuildFileAddress->[to_address->[Address]],Address->[sanitize_path->[InvalidSpecPath],check_target_name->[InvalidTargetName],parse->[parse_spec],__init__->[sanitize_path,check_target_name]],parse_spec->[prefix_subproject,normalize_absolute_refs]]
Initialize the object with the given parameters.
Should probably remove the return value from this method too.
@@ -406,7 +406,7 @@ plt.colorbar(im, ax=ax) # ========== # .. [1] Jean-Rémi King et al. (2018) "Encoding and Decoding Neuronal Dynamics: # Methodological Framework to Uncover the Algorithms of Cognition", -# in press. https://hal.archives-ouvertes.fr/hal-01848442/ +# 2018. The Cognitive Neurosciences VI. # .. [2] Zoltan J. Koles. The quantitative extraction and topographic mapping # of the abnormal components in the clinical EEG. Electroencephalography # and Clinical Neurophysiology, 79(6):440--447, December 1991.
[set_xlabel,set_ylabel,plot_patterns,plot_joint,filter,SlidingEstimator,StandardScaler,EvokedArray,axvline,make_pipeline,imshow,dict,get_data,set_title,read_raw_fif,axhline,Scaler,plot_filters,plot,LinearModel,print,diag,find_events,data_path,mean,get_coef,subplots,arange,cross_val_multiscore,LogisticRegression,legend,Vectorizer,CSP,GeneralizingEstimator,colorbar,Epochs,pick_types,fit]
Much larger than in the sensor space or can be much smaller than in the source space Additional details about the object.
why removing this link?
@@ -82,7 +82,14 @@ class Libxsmm(MakefilePackage): ] # JIT (AVX and later) makes MNK, M, N, or K spec. superfluous -# make_args += ['MNK=1 4 5 6 8 9 13 16 17 22 23 24 26 32'] + # make_args += ['MNK=1 4 5 6 8 9 13 16 17 22 23 24 26 32'] + + if '%aocc' in spec: + make_args += ['GNU=1'] + make_args += ['AVX=2'] + make_args += ['INTEL=0'] + make_args += ['MIC=0'] + make_args += ['LDFLAGS+=-Wl,-z,muldefs'] # include call trace as the build is already de-optimized if '+debug' in spec:
[Libxsmm->[build->[make,format],install->[install,join_path,basename,install_tree,rename,glob,join,mkdirp],libs->[find_libraries,len],variant,conflicts,depends_on,version]]
Builds a sequence of objects from the given specification.
I think this option should be conditional on the target.
@@ -182,9 +182,15 @@ public class QueryableIndexStorageAdapter implements StorageAdapter } @Override + @Nullable public String getColumnTypeName(String columnName) { final ColumnHolder columnHolder = index.getColumnHolder(columnName); + + if (columnHolder == null) { + return null; + } + try (final BaseColumn col = columnHolder.getColumn()) { if (col instanceof ComplexColumn) { return ((ComplexColumn) col).getTypeName();
[QueryableIndexStorageAdapter->[getMaxIngestedEventTime->[getMaxTime],getColumnCapabilities->[getCapabilities,getColumnCapabilities],analyzeFilter->[getNumRows],computeCursorInterval->[getMinTime,getMaxTime],getAvailableMetrics->[getAvailableDimensions],getMetadata->[getMetadata],getNumRows->[getNumRows],getAvailableDimensions->[getAvailableDimensions],makeVectorCursor->[canVectorize]]]
Gets the type name of the column.
Given this seems like new behavior, Can I suggest returning Optional to force the refactor to consider all the places where this method is called and ensure the proper null checks are in place?
@@ -386,6 +386,7 @@ class OrderAddNote(BaseMutation): app=info.context.app, message=cleaned_input["message"], ) + transaction.on_commit(lambda: info.context.plugins.order_updated(order)) return OrderAddNote(order=order, event=event)
[clean_refund_payment->[clean_payment],OrderVoid->[perform_mutation->[OrderVoid,clean_void_payment,try_payment_action]],clean_order_capture->[clean_payment],OrderLineUpdate->[clean_input->[validate_order]],OrderLinesCreate->[perform_mutation->[OrderLinesCreate,validate_order,validate_variants,validate_lines,add_lines_to_order]],OrderMarkAsPaid->[perform_mutation->[clean_billing_address,try_payment_action,OrderMarkAsPaid]],OrderAddNote->[Arguments->[OrderAddNoteInput],perform_mutation->[OrderAddNote,clean_input]],OrderCancel->[perform_mutation->[OrderCancel,clean_order_cancel]],clean_void_payment->[clean_payment],OrderLineDelete->[perform_mutation->[OrderLineDelete,validate_order]],OrderUpdateShipping->[Arguments->[OrderUpdateShippingInput],perform_mutation->[OrderUpdateShipping,save,clean_order_update_shipping,validate_order]],OrderConfirm->[perform_mutation->[get_instance,save,OrderConfirm]],OrderRefund->[perform_mutation->[clean_refund_payment,OrderRefund,try_payment_action]],OrderUpdate->[save->[save],Arguments->[OrderUpdateInput]],OrderCapture->[perform_mutation->[clean_order_capture,OrderCapture,try_payment_action]]]
Perform a mutation on a node.
I am not convinced, that adding a note to Order should trigger an event `order updated`. I wouldn't say that this is something which changes an order.
@@ -18,7 +18,16 @@ const fetch = require('node-fetch'); const fs = require('fs'); const path = require('path'); +const EXTRA_URL_PARAM = { + 'analytics': 'amp-analytics-performance-param', +}; +const ANALYTICS_PARAM = Object.keys(EXTRA_URL_PARAM) + .map((key) => `${key}=${EXTRA_URL_PARAM[key]}`) + .toString(); const CDN_URL = 'https://cdn.ampproject.org/'; +const AMP_JS_PATH = '/dist/amp.js'; +const LOCAL_PATH_REGEXP = /dist\/(v0\/amp-[A-Za-z\-0-9\.]+).max(.js)/; +const CDN_ANALYTICS_REGEXP = /https:\/\/cdn.ampproject.org\/rtv\/\d{15}\/v0\/analytics-vendors\/([\.\-\_0-9A-Za-z]+\.json)/; const CONTROL = 'control'; const EXPERIMENT = 'experiment'; const CACHE_PATH = path.join(__dirname, './cache');
[No CFG could be retrieved]
Creates a cache directory if it does not exist. Download a file from the cache and return the path to the cache file.
Could you explain when would it fetch non compiled script? My understanding is that this should never happen.
@@ -37,6 +37,9 @@ type Alias struct { Stack StringInput // The previous project of the resource. If not provided, defaults to `context.GetProject()`. Project StringInput + // There is no parent resource. We need to because go does not + // allow distinguishing if no parent is passed from passing `nil` to parent. + Unparent BoolInput } func (a Alias) collapseToURN(defaultName, defaultType string, defaultParent Resource,
[collapseToURN->[ToURNOutput,URN,New],HasPrefix,LastIndex,ApplyT,ToStringOutput]
collapseToURN collapses an alias into a URN.
This does not look very neat in the public API. Can we discuss a few alternatives ?
@@ -342,11 +342,11 @@ module AutomatedTestsHelper # Create TestResult object # (Build failures and errors will be logged and stored as well for diagnostic purposes) - TestResult.create(:filename => filename, - :file_content => data, - :submission_id => result.submission.id, - :status => status, - :user_id => @current_user.id) + TestResult.create(filename: filename, + file_content: data, + submission_id: result.submission.id, + status: status, + user_id: @current_user.id) end # Send output to parser(s) if any
[run_ant_file->[short_identifier,system,each_line,new,exists?,cd,create,id,raise,pwd,now,join,close,parse_test_output,exitstatus,t,open,l],delete_test_repo->[exists?,markus_config_automated_tests_repository,rm_rf,repo_name,join],add_parser_file_link->[new,render,link_to_function,escape_javascript,t],export_repository->[exists?,export,markus_config_automated_tests_repository,rm_rf,message,repo_name,join,mkdir],add_lib_file_link->[new,render,link_to_function,escape_javascript,t],add_test_file_link->[new,render,link_to_function,escape_javascript,t],copy_ant_files->[short_identifier,exists?,cd,student?,mv,markus_config_automated_tests_repository,filename,raise,pwd,glob,cp,join,mkdir,each,t,cp_r],parse_test_output->[system,each_line,new,cd,find_by_filetype,pwd,join,close,open],create_ant_test_files->[short_identifier,new,empty?,filetype,markus_config_automated_tests_repository,filename,join,reload,save,assignment,makedirs],process_test_form->[nil?,enable_test,test_files_attributes,size,original_filename,include?,raise,find_by_id,tokens_per_day,t,each,respond_to?,each_key,delete],can_run_test?->[admin?,tokens,token,student?,include?,raise,ta?,decrease_tokens,t],export_configuration_files->[short_identifier,write,group_name,markus_config_automated_tests_repository,touch,join,close,open,api_key]]
Runs Ant and returns the status of the last build in the system Parse the test output of an unknown node in the repository.
Align the elements of a hash literal if they span more than one line.
@@ -12,9 +12,11 @@ from pymongo.collection import Collection from pymongo.errors import AutoReconnect, OperationFailure from pymongo.son_manipulator import NamespaceInjector +from pulp.common import error_codes + from pulp.server import config from pulp.server.compat import wraps -from pulp.server.exceptions import PulpException +from pulp.server.exceptions import PulpCodedException, PulpException _CONNECTION = None
[get_collection->[PulpCollectionFailure,PulpCollection],PulpCollection->[__init__->[decorate_instance]]]
Creates a new object. Process the size of the if.
I think this gets grouped with the imports above since this module is not inside the same package (`pulp.server`) like the ones in this group.
@@ -23,13 +23,14 @@ from sklearn.svm import SVC import logging import numpy as np - LOG = logging.getLogger('sklearn_classification') + def load_data(): '''Load dataset, use 20newsgroups dataset''' digits = load_digits() - X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, random_state=99, test_size=0.25) + X_train, X_test, y_train, y_test = train_test_split( + digits.data, digits.target, random_state=99, test_size=0.25) ss = StandardScaler() X_train = ss.fit_transform(X_train)
[run->[score,report_final_result,debug,fit],load_data->[fit_transform,train_test_split,load_digits,StandardScaler,transform],get_model->[SVC,get],load_data,debug,exception,getLogger,update,get_model,get_next_parameter,get_default_parameters,run]
Load dataset with 20newsgroups dataset.
please remove empty lines
@@ -74,7 +74,7 @@ class Test3rdPartyContext(SerialMixin, unittest.TestCase): def tearDown(self): cuda.close() - def test_attached_primary(self): + def test_attached_primary(self, extra_work=lambda: None): # Emulate primary context creation by 3rd party the_driver = driver.driver hctx = driver.drvapi.cu_context()
[TestContextAPI->[test_forbidden_context_switch->[switch_gpu],test_accepted_context_switch->[switch_gpu]]]
Tear down the device.
It doesn't look like any of the callers use this `extra_work` argument. Is this left over from some experiment?
@@ -114,11 +114,12 @@ class Receiver: fragments. ''' self._protocol = protocol - self._current_consumer = self._HEADER + self._current_consumer = self._HEADER # type: ignore[assignment] # https://github.com/python/mypy/issues/2427 self._message = None + self._partial = None self._buf_header = None - async def consume(self, fragment: Fragment) -> Message[Any]: + async def consume(self, fragment: Fragment) -> Message[Any]|None: ''' Consume individual protocol message fragments. Args:
[Receiver->[_HEADER->[_assume_text],consume->[_current_consumer],_assume_text->[ValidationError,isinstance],_BUFFER_HEADER->[_assume_text],_CONTENT->[_assume_text,assemble,_check_complete,append],_METADATA->[_assume_text,append],_BUFFER_PAYLOAD->[assemble_buffer,_check_complete,_assume_binary],_assume_binary->[ValidationError,isinstance]],getLogger]
Configure a receiver with a specific Bokeh protocol object to assemble collected message fragments.
Same as `create`, we would need to make changes in the approach to have better experience with MyPy
@@ -290,6 +290,8 @@ function isExperimentOn_(id) { return [AMP_OPT_IN_COOKIE.BETA, _LEGACY_AMP_OPT_IN_COOKIE.BETA].includes( optInCookieValue ); + case NIGHTLY_CHANNEL_ID: + return optInCookieValue == AMP_OPT_IN_COOKIE.NIGHTLY; case RTV_CHANNEL_ID: return RTV_PATTERN.test(optInCookieValue); default:
[No CFG could be retrieved]
Updates the data - on attribute of a single experiment. The function that sets the cookie and reloads the window.
Curious: Can the legacy cookie stuff go away?
@@ -151,6 +151,16 @@ class Node(object): source_node = dependant.src return source_node.check_downstream_exists(down_require) + def check_loops(self, new_node): + if self.ref == new_node.ref: + return self + if not self.dependants: + return + assert len(self.dependants) == 1 + dependant = self.dependants[0] + source_node = dependant.src + return source_node.check_loops(new_node) + @property def package_id(self): return self._package_id
[DepsGraph->[_order_levels->[neighbors,inverse_neighbors],add_edge->[Edge,add_edge]],Node->[check_downstream_exists->[check_downstream_exists],propagate_downstream->[TransitiveRequirement,propagate_downstream]]]
Check if a node has a dependency that is downstream of a node.
Is this because iterating "Depth First"?
@@ -586,6 +586,11 @@ class CustomFormModel(object): List of any training errors. :ivar list[~azure.ai.formrecognizer.TrainingDocumentInfo] training_documents: Metadata about each of the documents used to train the model. + :ivar str display_name: Optional user defined model name (max length: 1024). + :ivar properties: Optional model properties. + :vartype properties: ~azure.ai.formrecognizer.CustomFormModelProperties + .. versionadded:: v2.1-preview + The *display_name* and *properties* properties. """ def __init__(self, **kwargs):
[TrainingDocumentInfo->[_from_generated->[_from_generated]],FormField->[_from_generated->[get_field_value,_from_generated],_from_generated_unlabeled->[_from_generated_unlabeled]],FormWord->[_from_generated->[get_bounding_box]],FormLine->[_from_generated->[_from_generated,get_bounding_box]],FormPage->[_from_generated_receipt->[_from_generated]],CustomFormModel->[_from_generated->[_from_generated,_from_generated_unlabeled]],CustomFormSubmodel->[_from_generated_labeled->[_from_generated_labeled],_from_generated_unlabeled->[_from_generated_unlabeled]],FieldData->[_from_generated->[resolve_element,get_bounding_box],_from_generated_unlabeled->[resolve_element,get_bounding_box]],FormTableCell->[_from_generated->[resolve_element,get_bounding_box]]]
Initialize a Cluster object.
Are this docstring the user can see? I would recommend to removing the size restriction because the service could change this at any point and forget to update the swagger. so better let them manage that.
@@ -400,7 +400,7 @@ def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, labels=None): exclude_labels = list() for ii in picks: if ii in exclude: - line_label = 'ICA %03d' % (ii + 1) + line_label = 'ICA %03d' % (ii) if labels is not None: annot = list() for this_label in labels_used:
[plot_ica_properties->[_create_properties_layout,set_title_and_labels]]
Plots average over epochs in ICA space. Plots the missing - block block number. Adds a button press event to the plot.
it was on purpose as channel names start at 1 not 0. Otherwise we write `"ICA #%d" % i` ie with the # sign. if you agree please close or suggest something to make it clearer.
@@ -358,11 +358,10 @@ def save(obj, filename=None, resources=None, title=None, state=None, validate=Tr ``/foo/myplot.html``) Args: - obj (Document or model object) : a plot object to save + obj (LayoutDOM object) : a Layout (Row/Column), Plot or Widget object to display filename (str, optional) : filename to save document under (default: None) - If None, use the default state configuration, otherwise raise a - ``RuntimeError``. + If None, use the default state configuration. resources (Resources, optional) : A Resources config to use (default: None) If None, use the default state configuration, if there is one.
[output_server->[output_server],output_notebook->[output_notebook],push_notebook->[update],push->[_push_to_server],output_file->[output_file],_get_save_args->[_detect_filename],_show_notebook_with_state->[_CommsHandle]]
Saves an HTML file with the data for the current document.
This docstring is out-of-date. The implementation now will create a tempfile matching the `__file__` name instead of raising a RuntimeError.
@@ -32,9 +32,11 @@ public final class ReconConfigKeys { public static final String OZONE_RECON_DATANODE_ADDRESS_KEY = "ozone.recon.datanode.address"; + public static final String OZONE_RECON_ADDRESS_KEY = + "ozone.recon.address"; public static final String OZONE_RECON_DATANODE_BIND_HOST_KEY = "ozone.recon.datanode.bind.host"; public static final String OZONE_RECON_DATANODE_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int OZONE_RECON_DATANODE_PORT_DEFAULT = 9865; + public static final int OZONE_RECON_DATANODE_PORT_DEFAULT = 9891; }
[No CFG could be retrieved]
region OZONE_RECON_DATANODE_ADDRESS OZONE_RECON_.
Suggest rename: ozone.recon.server.address
@@ -1,6 +1,8 @@ # frozen_string_literal: true Rails.application.configure do + # Verifies that versions and hashed value of the package contents in the project's package.json + config.webpacker.check_yarn_integrity = false # Settings specified here will take precedence over those in config/application.rb. # Code is not reloaded between requests.
[fetch,new,formatter,log_tags,lambda,consider_all_requests_local,asset_host,log_formatter,cache_classes,imgix,deprecation,r301,dump_schema_after_migration,logger,day,fallbacks,enabled,force_ssl,delivery_method,eager_load,default_url_options,service,present?,except,cache_store,perform_caching,split,custom_options,insert_before,proc,log_level,configure,slice,smtp_settings]
Configuration for a single application. Config for a single object.
Layout/IndentationWidth: Use 2 (not 4) spaces for indentation.
@@ -42,12 +42,14 @@ public class BloomDimFilter implements DimFilter private final BloomKFilter bloomKFilter; private final HashCode hash; private final ExtractionFn extractionFn; + private final FilterTuning filterTuning; @JsonCreator public BloomDimFilter( @JsonProperty("dimension") String dimension, @JsonProperty("bloomKFilter") BloomKFilterHolder bloomKFilterHolder, - @JsonProperty("extractionFn") ExtractionFn extractionFn + @JsonProperty("extractionFn") ExtractionFn extractionFn, + @JsonProperty("filterTuning") FilterTuning filterTuning ) { Preconditions.checkArgument(dimension != null, "dimension must not be null");
[BloomDimFilter->[hashCode->[hashCode],toString->[toString],equals->[equals]]]
PUBLIC METHODS ARE GENERATED BY DIMENSION - FILTER AND FUTURE This method is used to build a DimFilter from a DimensionFilter.
Are you missing a `@JsonProperty` getter for this?
@@ -1374,6 +1374,10 @@ class ProperSubtypeVisitor(TypeVisitor[bool]): def visit_union_type(self, left: UnionType) -> bool: return all([self._is_proper_subtype(item, self.orig_right) for item in left.items]) + def visit_type_guard_type(self, left: TypeGuardType) -> bool: + # TODO: What's the right thing to do here? + return False + def visit_partial_type(self, left: PartialType) -> bool: # TODO: What's the right thing to do here? return False
[are_args_compatible->[is_different],is_protocol_implementation->[build_subtype_kind,pop_on_exit,is_subtype],ProperSubtypeVisitor->[visit_union_type->[_is_proper_subtype],visit_callable_type->[is_callable_compatible,_is_proper_subtype],visit_typeddict_type->[_is_proper_subtype],visit_instance->[check_argument->[_is_proper_subtype],find_member,is_protocol_implementation,_is_proper_subtype,check_argument],visit_tuple_type->[_is_proper_subtype],visit_type_type->[_is_proper_subtype],__init__->[build_subtype_kind],_is_proper_subtype->[is_proper_subtype],visit_type_var->[_is_proper_subtype],visit_literal_type->[_is_proper_subtype]],is_more_precise->[is_proper_subtype],is_subtype_ignoring_tvars->[is_subtype],restrict_subtype_away->[restrict_subtype_away],is_callable_compatible->[_incompatible],SubtypeVisitor->[visit_union_type->[_is_subtype],visit_callable_type->[_is_subtype],visit_typeddict_type->[_is_subtype,is_equivalent],visit_instance->[check_argument->[],_is_subtype,check_type_parameter],visit_tuple_type->[_is_subtype],visit_type_type->[_is_subtype],visit_overloaded->[_is_subtype],visit_type_var->[_is_subtype],_is_subtype->[is_subtype],visit_literal_type->[_is_subtype]],_is_proper_subtype->[is_proper_subtype],is_equivalent->[is_subtype],is_proper_subtype->[pop_on_exit],_is_subtype->[is_subtype],non_method_protocol_members->[find_member]]
Check if the union type is a proper subtype of the type in the union.
I guess since we don't expect type guard types to be used for subtype checks, we can raise an error here as well?
@@ -49,6 +49,11 @@ func TestReplaceOrRollbackStore(t *testing.T) { require.True(t, bytes.Equal(writtenContent, replaceWith)) requireFilesCount(t, dir, 2) + + info, err := os.Stat(target) + require.NoError(t, err) + + require.Equal(t, perms, info.Mode()) }) t.Run("when save is not successful", func(t *testing.T) {
[RemoveAll,Dir,TempFile,Now,TempDir,Close,Error,Save,ReadFile,New,ReadDir,Unix,Equal,Join,Name,Remove,NoError,Write,True,NewReader,Sprintf,ReadAll,Run,Load]
TestReplaceOrRollbackStore imports a single object from storage. NoError - test when saving a file.
please include windows fix in this cherry pick so we dont break 7.x build process
@@ -146,10 +146,6 @@ class DrqaAgent(Agent): saved_params = torch.load(fname, map_location=lambda storage, loc: storage) - if 'word_dict' in saved_params: - # for compatibility with old saves - self.word_dict.copy_dict(saved_params['word_dict']) - self.feature_dict = saved_params['feature_dict'] self.state_dict = saved_params['state_dict'] config.override_args(self.opt, saved_params['config'])
[DrqaAgent->[add_cmdline_args->[add_cmdline_args],save->[save],_find_target->[_positions],__init__->[dictionary_class]],SimpleDictionaryAgent->[add_cmdline_args->[add_cmdline_args]]]
Initialize the DocReaderModel from a saved file.
this section needs to stay
@@ -56,4 +56,11 @@ public class DecksTest extends RobolectricTest { } } } + + @Test + public void trim() { + assertThat(Decks.strip("A\nB C\t D"), is("A\nB C\t D")); + assertThat(Decks.strip("\n A\n\t"), is("A")); + assertThat(Decks.strip("Z::\n A\n\t::Y"), is("Z::A::Y")); + } }
[DecksTest->[ensureDeckList->[childMap,all,parents,getLong,save,notNull,addDeck,getDecks,byName,put],duplicateName->[assertThat,notNull,getString,checkIntegrity,is,getDecks,byName,load]]]
This method ensures that the deck list is populated.
I was referring to a test to ensure that a deck with a trailing newline in the parent won't break the system from a higher level Something along the lines of: Build: * `A::B\n` * `A::B\n::C` * `A::B\n::D\n` * Ensure that `deckDueTree` works And ensure that the deck list can be built.
@@ -157,6 +157,10 @@ export class IframeTransport { } ampDoc.body.removeChild(frameData.frame); delete IframeTransport.crossDomainIframes_[type]; + if (this.longTaskObserver_) { + this.longTaskObserver_.disconnect(); + delete this.longTaskObserver_; + } } /**
[No CFG could be retrieved]
Creates a unique value for a specific type of cross - domain iframe or to differentiate messages Sends an AMP Analytics trigger event to a vendor s cross - domain iframe or queues the.
Let's set to null to be consistent with constructor
@@ -52,6 +52,10 @@ def interactive(opt, print_parser=None): opt = opt.parse_args() opt['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent' + if opt.get('eval_candidates', '') in ['inline', 'batch']: + print('[ Warning: using candidate type unavailable during interactive.' + ' Consider switching to "-ecands vocab" or "-ecands fixed". ]') + # Create model and assign it to the specified task agent = create_agent(opt, requireModelExists=True) world = create_task(opt, agent)
[setup_args->[ParlaiParser,add_cmdline_args,add_argument],interactive->[print_args,print,create_agent,parse_args,epoch_done,get,create_task,isinstance,parley,display],setup_args,parse_args,seed,interactive]
Display a single in interactive mode.
nit i prefer spelling out long options in error messages `--eval-candidates` . It seems weird to have this exception for rankers in interactive. should we not have the error message in TorchRankerAgent?
@@ -69,6 +69,9 @@ export class ConfigurationPage extends React.Component<IConfigurationPageProps, }); }; + getContextList = contexts => Object.values(contexts).map(v => v['beans']).reduce((acc, e) => ({ ...acc, ...e })); + + render() { const { configuration } = this.props; const { filter } = this.state;
[No CFG could be retrieved]
A component that renders a single configuration page. XmlNode for .
why not just `v => v.beans`
@@ -1067,7 +1067,10 @@ public class InterpreterFactory implements InterpreterGroupFactory { public InterpreterSetting getDefaultInterpreterSetting(String noteId) { return getDefaultInterpreterSetting(getInterpreterSettings(noteId)); } - + + public boolean isBinded(String noteId, String replName) { + return getInterpreter(noteId, replName) != null; + } private InterpreterSetting getInterpreterSettingByGroup(List<InterpreterSetting> settings, String group) {
[InterpreterFactory->[getDefaultInterpreterSetting->[getDefaultInterpreterSetting,get,getInterpreterSettings],createRepl->[get],getInterpreterListFromJson->[getInterpreterListFromJson],remove->[saveToFile,remove],get->[get,remove,add],close->[add],removeRepository->[saveToFile],restart->[get],createOrGetInterpreterList->[createInterpretersForNote,get,getInterpreterInstanceKey],setInterpreters->[putNoteInterpreterSettingBinding],getDevInterpreter->[connectToRemoteRepl,createInterpreterGroup,add],createNewSetting->[createFromInterpreterSettingRef,saveToFile],closeNote->[removeInterpretersForNote,getInterpreterSettings],createInterpretersForNote->[add],getInterpreters->[getNoteInterpreterSettingBinding],createFromInterpreterSettingRef->[createFromInterpreterSettingRef],removeNoteInterpreterSettingBinding->[removeInterpretersForNote,get,remove],getNoteInterpreterSettingBinding->[get],getInterpreterSettings->[add,get,remove,getNoteInterpreterSettingBinding],setPropertyAndRestart->[loadInterpreterDependencies,saveToFile,get],addRepository->[saveToFile],putNoteInterpreterSettingBinding->[removeInterpretersForNote,saveToFile,get,add],getInterpreter->[getInterpreterClassFromInterpreterSetting,getDefaultInterpreterSetting,createOrGetInterpreterList,get,getInterpreter,getInterpreterSettingByGroup,getInterpreterSettings],add->[loadInterpreterDependencies,findDefaultInterpreter,add],recursiveBuildLibList->[recursiveBuildLibList]]]
get default interpreter setting by note id.
I think this can be removed now, in favor of `Note.isBinded()`, no?
@@ -1119,6 +1119,11 @@ CELERY_TASK_ROUTES = { 'olympia.scanners.tasks.run_customs': {'queue': 'devhub'}, 'olympia.scanners.tasks.run_wat': {'queue': 'devhub'}, 'olympia.scanners.tasks.run_yara': {'queue': 'devhub'}, + 'olympia.scanners.mark_yara_query_rule_as_completed': {'queue': 'devhub'}, + 'olympia.scanners.run_yara_query_rule': {'queue': 'devhub'}, + 'olympia.scanners.run_yara_query_rule_on_versions_chunk': { + 'queue': 'devhub' + }, # Activity (goes to devhub queue). 'olympia.activity.tasks.process_email': {'queue': 'devhub'},
[path->[join],get_raven_release->[fetch_git_sha,get,read,join,exists,open,loads],read_only_mode->[get,Exception],get_db_config->[update,db],,gethostname,join,env,dict,Env,r'^,bool,list,datetime,dirname,format,items,exists,float,read_env,Queue,path,keys,get,get_db_config,get_raven_release,lower]
This method is used to register all of the tasks that are available on the hub. This is currently only used by validation tasks.
Shouldn't we have a new queue for the code search (so that the regular devhub tasks are not impacted)?