patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -402,15 +402,16 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar String copyPasswd = _configDao.getValue("secstorage.copy.password"); setupCmd.setCopyPassword(copyPasswd); setupCmd.setCopyUserName(TemplateConstants.DEFAULT_HTTP_AUTH_USER); + Answer answer = _agentMgr.easySend(ssAHostId, setupCmd); if (answer != null && answer.getResult()) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully programmed http auth into " + secStorageVm.getHostName()); + s_logger.debug(String.format("Successfully created http auth into secondary storage VM [%s].", hostName)); } return true; } else { if (s_logger.isDebugEnabled()) { - s_logger.debug("failed to program http auth into secondary storage vm : " + secStorageVm.getHostName()); + s_logger.debug(String.format("Failed to create http auth into secondary storage VM [%s] due to [%s].", hostName, answer == null ? "answer null" : answer.getDetails())); } return false; }
[SecondaryStorageManagerImpl->[stopSecStorageVm->[stop],isPoolReadyForScan->[isZoneReady],rebootSecStorageVm->[startSecStorageVm],createSecStorageVmInstance->[getDefaultNetworkForCreation],allocCapacity->[assignSecStorageVmFromStoppedPool,startNew,isSecondaryStorageVmRequired,startSecStorageVm],assignSecStorageVmFromRunningPool->[getCurrentAllocator],stop->[stop],startNew->[isSecondaryStorageVmRequired],pickSsvmHost->[listUpAndConnectingSecondaryStorageVmHost],onScanStart->[getZoneHostInfo],expandPool->[allocCapacity]]]
Generates a command to program the HTTP authentication into the secondary storage VM.
I agree that `programmed` can be indeed changed to something that best describes the logged operation. `Created` is an option, but I think that `set` fits better. Additionally, I would replace `http` with `HTTP`. Something as: 1. `Successfully set HTTP auth into secondary storage VM [s-123-VM]` 2. or `Successfully configured HTTP auth into secondary storage VM [s-123-VM]`
@@ -763,6 +763,17 @@ function item_store($arr,$force_parent = false, $notify = false, $dontcache = fa return 0; } + // Check for already added items. + // There is a timing issue here that sometimes creates double postings. + // An unique index would help - but the limitations of MySQL (maximum size of index values) prevent this. + if ($arr["uid"] == 0) { + $r = qu("SELECT `id` FROM `item` WHERE `uri` = '%s' AND `uid` = 0 LIMIT 1", dbesc(trim($arr['uri']))); + if (dbm::is_result($r)) { + logger('Global item already stored. URI: '.$arr['uri'].' on network '.$arr['network'], LOGGER_DEBUG); + return 0; + } + } + // Store the unescaped version $unescaped = $arr;
[item_add_language_opt->[detect],tag_deliver->[get_baseurl],new_follower->[get_item_tags,get_baseurl],fix_private_photos->[scaleImage,get_baseurl,imageString,getType,is_valid],add_page_keywords->[get_baseurl],item_is_remote_self->[get_hostname],drop_item->[get_baseurl],item_body_set_hashtags->[get_baseurl],tgroup_check->[get_baseurl],subscribe_to_hub->[get_curl_code],add_page_info_data->[get_baseurl],item_store->[get_baseurl,format]]
store an item in the database Checks if an item is already in the item store. This function is a code parser that can handle with it - and it destroys posts with - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - This function is used to store a single item in the system.
I'm able to add a UNIQUE index on `uid` and `uri` of th `item` table. Of course the `uri` field still is in `utf8_general_ci` collation because we hopefully don't need emojis in item uri.
@@ -18,6 +18,13 @@ if ($config['enable_inventory']) { dbDelete('entPhysical', 'entPhysical_id = ?', array ($test['entPhysical_id'])); } } + + if (count($valid) > 0) { + $module_state = true; + } else { + $module_state = false; + } + unset( $sql, $test,
[No CFG could be retrieved]
Check if there are any neccesary neccesary neccesary n.
What's your reasoning for not doing: `$module_state = count($valid) > 0;
@@ -227,11 +227,7 @@ public class HoodieTableMetaClient implements Serializable { */ public String getArchivePath() { String archiveFolder = tableConfig.getArchivelogFolder(); - if (archiveFolder.equals(HoodieTableConfig.ARCHIVELOG_FOLDER.defaultValue())) { - return getMetaPath(); - } else { - return getMetaPath() + "/" + archiveFolder; - } + return getMetaPath() + "/" + archiveFolder; } /**
[HoodieTableMetaClient->[initializeBootstrapDirsIfNotExists->[getHadoopConf,initializeBootstrapDirsIfNotExists,getFs],Builder->[build->[HoodieTableMetaClient]],PropertyBuilder->[setPayloadClass->[setPayloadClassName],fromMetaClient->[setPayloadClassName],initTable->[initTableAndGetMetaClient,build],fromProperties->[setArchiveLogFolder,setTimelineLayoutVersion,setTableName,setPreCombineField,setPayloadClassName,setTableCreateSchema,setKeyGeneratorClassProp,setPartitionFields,setTableType,setRecordKeyFields,setBootstrapBasePath,setBootstrapIndexClass,setPopulateMetaFields,setBaseFileFormat],setTableType->[setTableType],build->[toString]],equals->[equals],getFs->[getFs],getMarkerFolderPath->[getTempFolderPath],getArchivePath->[getMetaPath],getCommitTimeline->[getTableType,getCommitTimeline],scanHoodieInstantsFromFileSystem->[scanHoodieInstantsFromFileSystem],initTableAndGetMetaClient->[getFs,getTableType],toString->[toString],getCommitsTimeline->[getTableType,getCommitsTimeline],getCommitActionType->[getTableType,getCommitActionType],getCommitsAndCompactionTimeline->[getTableType]]]
Returns the archive path.
@danny0405 this is more of a debt due to how we did this at uber originally. Since HDFS supports append, we started writing out these archived logs into a single log file (with rollover) under `.hoodie` itself. But on cloud storage, there is no append and it leads to explosion of small files. So we moved it to the `.archived` subfolder. My take would be leave this alone here and override it at the higher layers like cli, spark, flink for backwards compat reasons. No other reasons. cc @n3nash , do you have a preference ?
@@ -2837,11 +2837,12 @@ obj_shard_list_key_cb(struct shard_auxi_args *shard_auxi, } rc = merge_key(&merge_arg->merge_list, key, key_size); + /* free key first regardless of rc */ + if (alloc_key) + D_FREE(key); if (rc) return rc; - if (alloc_key) - D_FREE(key); } return 0;
[No CFG could be retrieved]
finds the next key in the shard list. This is the main entry point for the list - comp. It is called by the user.
this flag isn't actually necessary. If key != NULL, it is allocated and D_FREE sets it back to NULL
@@ -17,7 +17,7 @@ import org.infinispan.commons.util.Util; * @author Lukasz Moren * @author Sanne Grinovero */ -public final class ChunkCacheKey implements Serializable, IndexScopedKey { +public final class ChunkCacheKey implements IndexScopedKey { /** The serialVersionUID */ private static final long serialVersionUID = 4429712073623290126L;
[ChunkCacheKey->[generatedHashCode->[hashCode],Externalizer->[readObject->[ChunkCacheKey]],equals->[equals]]]
Package private for testing purposes. Returns the name of the object in the cache.
I guess serialVersionUID field should be removed too.
@@ -30,11 +30,6 @@ type encryptStream struct { func (es *encryptStream) Write(plaintext []byte) (int, error) { - if !es.didHeader { - es.didHeader = true - es.err = es.encoder.Encode(es.header) - } - if es.err != nil { return 0, es.err }
[Close->[encryptBlock],init->[checkReceivers],Write->[Write],writeFooter->[encryptBytes],Close,init,Write]
Write encrypts plaintext and writes it to the stream.
Writing the header here was causing a bug where we never wrote it in the case of an empty message.
@@ -45,6 +45,7 @@ export default function Sidebar( { settings, store, theme } ) { <StoreProvider store={ store }> <SeoAnalysis shouldUpsell={ settings.shouldUpsell } + shouldUpsellWordFormRecognition={ settings.shouldUpsellWordFormRecognition } location="sidebar" /> </StoreProvider>
[No CFG could be retrieved]
The default sidebar.
I'm not seeing the upsell notice in the sidebar any longer. I guess this should be `settings.isWordFormRecognitionActive`.
@@ -63,6 +63,13 @@ public class JavaScriptBufferAggregator implements BufferAggregator return (float)buf.getDouble(position); } + + @Override + public long getLong(ByteBuffer buf, int position) + { + return (long) buf.getDouble(position); + } + @Override public void close() { script.close();
[JavaScriptBufferAggregator->[close->[close],aggregate->[aggregate]]]
read float.
should this be getLong?
@@ -395,10 +395,16 @@ func hashPassword(passwd, salt, algo string) string { return fmt.Sprintf("%x", tempPasswd) } -// HashPassword hashes a password using the algorithm defined in the config value of PASSWORD_HASH_ALGO. -func (u *User) HashPassword(passwd string) { +// SetPassword hashes a password using the algorithm defined in the config value of PASSWORD_HASH_ALGO +// change passwd, salt and passwd_hash_algo fields +func (u *User) SetPassword(passwd string) (err error) { + if u.Salt, err = GetUserSalt(); err != nil { + return err + } u.PasswdHashAlgo = setting.PasswordHashAlgo u.Passwd = hashPassword(passwd, u.Salt, setting.PasswordHashAlgo) + + return nil } // ValidatePassword checks if given password matches the one belongs to the user.
[NewGitSig->[GetEmail],GetAccessRepoIDs->[GetRepositoryIDs,GetOrgRepositoryIDs],GetOrganizationCount->[getOrganizationCount],IsPasswordSet->[ValidatePassword],GenerateActivateCode->[GenerateEmailActivateCode],getOrganizationCount,HashPassword,EmailNotifications,IsOrganization,IsMailable,toConds]
HashPassword hashes the given password using the user s password hash algorithm and salt.
This doesn't appear to prevent empty passwords being set.
@@ -124,7 +124,16 @@ public interface StorageManager extends StorageService { "Storage", "60", "Timeout (in secs) for the storage pool client connection timeout (for managed pools). Currently only supported for PowerFlex.", - true, + false, + ConfigKey.Scope.StoragePool, + null); + + ConfigKey<Integer> STORAGE_POOL_CLIENT_MAX_CONNECTIONS = new ConfigKey<>(Integer.class, + "storage.pool.client.max.connections", + "Storage", + "100", + "Maximum connections for the storage pool client (for managed pools). Currently only supported for PowerFlex.", + false, ConfigKey.Scope.StoragePool, null);
[No CFG could be retrieved]
Creates a list of tags that can be used to create a new instance of an SSVM Returns a list of Strings with tags for the specified storage pool.
Just out of curiosity. Why is `STORAGE_POOL_CLIENT_TIMEOUT` changed from dynamic to static?
@@ -1664,6 +1664,10 @@ define([ var shadowsEnabled = scene.frameState.shadowHints.shadowsEnabled; var lightShadowsEnabled = shadowsEnabled && (scene.frameState.shadowHints.lightShadowMaps.length > 0); + if (scene._logDepthBuffer && defined(command.derivedCommands.logDepth)) { + command = command.derivedCommands.logDepth.logDepthCommand; + } + if (scene.debugShowCommands || scene.debugShowFrustums) { executeDebugCommand(command, scene, passState); } else if (lightShadowsEnabled && command.receiveShadows && defined(command.derivedCommands.shadows)) {
[No CFG could be retrieved]
Executes a debug command. Debug code to draw command bounding volume.
I don't know the complete context, but perhaps `logDepth.logDepthCommand` -> `logDepth.command`?
@@ -42,6 +42,12 @@ public class CreateNetworkCmdByAdmin extends CreateNetworkCmd implements AdminCm @Parameter(name=ApiConstants.HIDE_IP_ADDRESS_USAGE, type=CommandType.BOOLEAN, description="when true ip address usage for the network will not be exported by the listUsageRecords API") private Boolean hideIpAddressUsage; + @Parameter(name = ApiConstants.ROUTER_IP, type = CommandType.STRING, description = "IPV4 address to be assigned to a router in a shared network", since = "4.16") + private String routerIp; + + @Parameter(name = ApiConstants.ROUTER_IPV6, type = CommandType.STRING, description = "IPV6 address to be assigned to a router in a shared network", since = "4.16") + private String routerIpv6; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// /////////////////////////////////////////////////////
[CreateNetworkCmdByAdmin->[getLogger,getName]]
Get the vlan.
can add validator "_ApiArgValidator.NotNullOrEmpty_" for both args ?
@@ -86,7 +86,7 @@ class SecretClient(KeyVaultClientBase): Keyword arguments - **enabled** (bool): Whether the secret is enabled for use. - **tags** (dict[str, str]): Application specific metadata in the form of key-value pairs. - - **content_type** (str): An arbitrary string indicating the type of the secret, e.g. 'password' + - **content_type** (str): A descriptive string indicating the type of the secret, e.g. 'password' - **not_before** (:class:`~datetime.datetime`): Not before date of the secret in UTC - **expires_on** (:class:`~datetime.datetime`): Expiry date of the secret in UTC
[SecretClient->[set_secret->[set_secret],purge_deleted_secret->[purge_deleted_secret],delete_secret->[delete_secret],get_secret->[get_secret],get_deleted_secret->[get_deleted_secret],recover_deleted_secret->[recover_deleted_secret],backup_secret->[backup_secret]]]
Creates a new secret with the given name and value.
'Arbitrary' is apt because there are no illegal values. 'An arbitrary string describing'?
@@ -170,6 +170,15 @@ class ContentRouteProvider implements RouteProviderInterface } elseif (!$this->checkResourceLocator($resourceLocator, $prefix)) { return $collection; } else { + if ($document instanceof ExtensionBehavior) { + $documentSegmentKey = $document->getExtensionsData()['excerpt']['segment']; + $segment = $this->requestAnalyzer->getSegment(); + + if ($segment && $segment->getKey() !== $documentSegmentKey) { + $this->requestAnalyzer->changeSegment($documentSegmentKey); + } + } + // convert the page to a StructureBridge because of BC $metadata = $this->documentInspector->getStructureMetadata($document); if (!$metadata) {
[ContentRouteProvider->[getStructureRoute->[getPathInfo,getController,decodePathInfo,get],getRouteCollectionForRequest->[getResourceSegment,find,getStrategyByWebspaceKey,getQueryString,getStructureMetadata,checkResourceLocator,getRedirectRoute,getPathInfo,getRedirectType,getKey,getLocale,getStructureType,wrapStructure,getAttribute,loadByResourceLocator,getNewResourceLocator,getNewResourceLocatorUuid,getRequestFormat,findUrlByResourceLocator,getAlias,getRedirectExternal,decodePathInfo,get,getUuid,getStructureRoute,add,getWebspaceName,getTitle,setDocument],getRedirectRoute->[getPathInfo,decodePathInfo]]]
Returns a RouteCollection for the given request. Checks if a page has a specific node in the collection. add a new route to the collection if the exception was moved.
this have also to be done in the ArticleBundle - can we maybe add this behaviour to the RouteBundle?
@@ -2874,13 +2874,14 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap, zfs_handle_t *zhp; /* - * Destination fs exists. Therefore this should either + * Destination fs exists (and we have not been asked to + * skip existing snapshots). Therefore this should either * be an incremental, or the stream specifies a new fs * (full stream or clone) and they want us to blow it * away (and have therefore specified -F and removed any * snapshots). */ - if (stream_wantsnewfs) { + if (stream_wantsnewfs && !flags->skip) { if (!flags->force) { zcmd_free_nvlists(&zc); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
[int->[zfs_prop_get_int,changelist_remove,nvpair_value_int32,estimate_ioctl,zfs_name_valid,VERIFY,printf,zfs_setprop_error,zfs_iter_snapshots_sorted,bzero,changelist_postfix,zfs_error_fmt,DMU_GET_STREAM_HDRTYPE,send_iterate_prop,make_dataset_handle,zfs_name_to_prop,zfs_iter_children,nvlist_add_boolean,strcpy,read,free,nvlist_lookup_string,P2ROUNDUP,assert,strlcat,gather_holds,recv_read_nvlist,zfs_spa_version,nvlist_add_string,fsavl_find,ZIO_CHECKSUM_EQUAL,zfs_open,dump_filesystem,fletcher_4_incremental_byteswap,created_before,recv_read,strrchr,fsavl_create,zfs_dataset_exists,strchr,pthread_cancel,changelist_free,zfs_receive_impl,nvlist_lookup_nvlist,zfs_standard_error,nvlist_add_uint64,gather_nvlist,BSWAP_64,ASSERT,zfs_error_aux,trunc_prop_errs,nvpair_value_nvlist,nvlist_free,zfs_get_type,zfs_strdup,strlcpy,zcmd_write_src_nvlist,zfs_nicenum,zfs_close,fsavl_destroy,fprintf,zfs_prop_to_name,strlen,malloc,dump_ioctl,nvlist_next_nvpair,zfs_iter_filesystems,zcmd_free_nvlists,send_iterate_fs,changelist_prefix,DMU_GET_FEATUREFLAGS,ioctl,nvlist_alloc,guid_to_name,dgettext,write,recv_incremental_replication,pthread_create,recv_rename,zfs_error,nvlist_lookup_uint64,strerror,recv_destroy,create_parents,strcat,strncmp,strcspn,zfs_alloc,nvlist_add_nvlist,time,nvlist_unpack,zfs_receive_package,strstr,sprintf,nvpair_name,DMU_STREAM_SUPPORTED,BSWAP_32,strcmp,changelist_rename,zfs_receive_one,pthread_join,nvlist_exists,nvlist_remove_all,zfs_ioctl,recv_skip,changelist_gather,getpid,nvpair_value_uint64,fflush,snprintf,nvlist_lookup_boolean,fletcher_4_incremental_native],size_t->[fread],nvlist_t->[avl_find],avl_tree_t->[free,avl_find,nvpair_name,malloc,nvpair_value_uint64,VERIFY,nvlist_lookup_nvlist,avl_add,avl_create,fsavl_destroy,nvlist_next_nvpair,nvpair_value_nvlist],zfs_send->[zfs_spa_version,nvlist_add_string,zfs_prop_get_int,strerror,fnvlist_alloc,zfs_nicenum,socketpair,VERIFY,fsavl_destroy,fprintf,bzero,dump_filesystems,nvlist_pack,pthread_cancel,nvlist_add_nvlist,cksum_and_write,nvlist_add_boolean,close,fnvlist_free,nvlist_alloc,gather_nvlist,open,zfs_standard_error,free,dgettext,pthread_join,write,DMU_SET_STREAM_HDRTYPE,DMU_SET_FEATUREFLAGS,zfs_get_pool_handle,pthread_create,zfs_error_aux,zfs_error,getpid,zpool_get_prop_int,snprintf,zfs_hold_nvl,nvlist_free],boolean_t->[ddt_hash_append,BF64_GET,ZIO_CHECKSUM_EQUAL],zfs_send_one->[dgettext,strerror,lzc_send,zfs_error_aux,zfs_standard_error,zfs_error,snprintf,lzc_exists],void->[nvlist_add_string,nvlist_lookup_uint64,strlcpy,MAX,ZIO_CHECKSUM_EQUAL,ISP2,zfs_realloc,avl_destroy_nodes,zfs_nicenum,VERIFY,printf,fprintf,verify,assert,DMU_GET_STREAM_HDRTYPE,high_order_bit,localtime,calloc,nvlist_next_nvpair,zfs_alloc,avl_destroy,zfs_prop_readonly,zfs_name_to_prop,time,perror,sysconf,sleep,DMU_GET_FEATUREFLAGS,cksum_and_write,umem_cache_destroy,nvpair_name,ZIO_SET_CHECKSUM,strcmp,zfs_prop_get_type,BE_64,nvlist_add_uint64,ssread,free,dgettext,ferror,write,DMU_SET_FEATUREFLAGS,DRR_IS_DEDUP_CAPABLE,ASSERT,zfs_ioctl,fclose,zfs_error_aux,nvlist_lookup_string,zio_checksum_SHA256,umem_cache_alloc,fnvlist_add_string,P2ROUNDUP,fdopen,zfs_prop_user,umem_cache_create,ddt_update,nvpair_value_nvlist],zfs_receive->[fstat,changelist_postfix,perror,free,fscanf,close,S_ISFIFO,fclose,changelist_gather,changelist_free,zfs_receive_impl,VERIFY,zfs_close,fopen,open,zfs_open,fcntl]]
receive one object from zfs ZFS specific functions ZFS specific functions Determine the name of the destination snapshot store in zc_value. This function is used to create a log entry based on the contents of a log entry.
Let's update this error message to reference the optional `-s' option. Something like "destination '%s' exists\nmust specify -F to overwrite it or -s to skip it"
@@ -602,7 +602,7 @@ export class AmpStoryPlayer { // TODO(enriqe): sanitize URLs for matching. const storyIdx = findIndex(this.stories_, ({href}) => href === storyUrl); - // TODO(proyectoramirez): replace for add() once implemented. + // TODO(Enriqe): replace for add() once implemented. if (!this.stories_[storyIdx]) { throw new Error(`Story URL not found in the player: ${storyUrl}`); }
[No CFG could be retrieved]
Callback for the layout of the current story. Gets the current story state.
Prefer TODOs with issue numbers over TODOs with usernames
@@ -18,6 +18,10 @@ def get_list(text): def get_bool_from_env(name, default_value): if name in os.environ: value = os.environ[name] + # Enable Pythonic environment variables, + # i.e. DEBUG='' or DEBUG= is Falsy but would generate an error + if value == '': + return False try: return ast.literal_eval(value) except ValueError as e:
[get_list->[strip,split],get_bool_from_env->[literal_eval,ValueError,format],get_host->[get_current],get_currency_fraction,bool,int,pgettext_lazy,_,parse,append,get_list,dirname,insert,get,getenv,config,setdefault,join,normpath,get_bool_from_env,CACHES]
Get a boolean value from environment variables.
The value should be defaulting to whatever is `default_value`. If the value is \*not\* set. Which was the expected behavior from him. Otherwise it doesn't make sense (for me at least). I would suggest to replace `if name in os.environ: ...` by a `if value: ...` (where value is from a `environ.get`) which would handle the case where the value is fully unset (not in the environment) or partially unset (an empty placeholder in the environment). I also think that we probably should not use `ast.literal_eval(value)` for parsing a boolean, as we don't have any use to compile the argument to Python code. A simple string check that would say whether the value is considered to be the `True` or `False` literal would be enough. Maybe we could drop the case sensitivity.
@@ -1235,7 +1235,7 @@ func formatVideoDuration(ms int) string { return fmt.Sprintf("%d:%02d", minutes, seconds) } -func formatVideoSize(bytes int64) string { +func PresentBytes(bytes int64) string { const ( BYTE = 1.0 << (10 * iota) KILOBYTE
[showLog->[showVerbose],Debug->[showLog],Less->[Less],Trace->[showLog],Len,Debug,GetLog]
PresentThreadView returns a list of UIMessages that have a specific . PresentAttachmentAssetInfo returns an attachment asset url info if the message is valid.
could just use `humanize.Bytes` throughout
@@ -158,6 +158,7 @@ public class JdbcExtractionNamespace implements ExtractionNamespace result = 31 * result + table.hashCode(); result = 31 * result + keyColumn.hashCode(); result = 31 * result + valueColumn.hashCode(); + result = 31 * result + (filter != null ? filter.hashCode() : 0); result = 31 * result + (tsColumn != null ? tsColumn.hashCode() : 0); result = 31 * result + pollPeriod.hashCode(); return result;
[JdbcExtractionNamespace->[hashCode->[hashCode],toString->[toString],equals->[equals]]]
This method returns the hashCode of the missing values in the table.
Please migrate this hashCode() to use `Objects.hash()`
@@ -215,10 +215,8 @@ class GlobalOptions(Subsystem): default=[], advanced=True, help="Regexps matching warning strings to ignore, e.g. " - '["DEPRECATED: scope some_scope will be removed"]. The regexps will be matched ' - "from the start of the warning string, and will always be case-insensitive. " - "See the `warnings` Python module documentation for more background " - "on how these are used.", + '["DEPRECATED: the option `--my-opt` will be removed"]. The regex patterns will be ' + "matched from the start of the warning string, and are case-insensitive.", ) register(
[OwnersNotFoundBehavior->[to_glob_match_error_behavior->[GlobMatchErrorBehavior]],FilesNotFoundBehavior->[to_glob_match_error_behavior->[GlobMatchErrorBehavior]],GlobalOptions->[register_options->[register_bootstrap_options]],ExecutionOptions]
Register bootstrap options. Registers options for a single failure of a reserved key. Register a sequence of flags that are required to satisfy a specific constraint. Register a single missing block of configuration.
This seemed like a leak of the implementation.
@@ -814,9 +814,14 @@ func (g *generator) genApply(w io.Writer, expr *model.FunctionCallExpression) { isInput := false retType := g.argumentTypeName(nil, then.Signature.ReturnType, isInput) // TODO account for outputs in other namespaces like aws - typeAssertion := fmt.Sprintf(".(%sOutput)", retType) - if !strings.HasPrefix(retType, "pulumi.") { - typeAssertion = fmt.Sprintf(".(pulumi.%sOutput)", Title(retType)) + var typeAssertion string + if retType == "[]string" { + typeAssertion = ".(pulumi.StringArrayOutput)" + } else { + typeAssertion = fmt.Sprintf(".(%sOutput)", retType) + if !strings.HasPrefix(retType, "pulumi.") { + typeAssertion = fmt.Sprintf(".(pulumi.%sOutput)", Title(retType)) + } } if len(applyArgs) == 1 {
[rewriteThenForAllApply->[argumentTypeName],genApply->[genAnonymousFunctionExpression,argumentTypeName],argumentTypeName->[argumentTypeName],genTemplateExpression->[genLiteralValueExpression],GenUnaryOpExpression->[GetPrecedence],GenBinaryOpExpression->[GetPrecedence],literalKey->[GenTemplateExpression,GenLiteralValueExpression]]
genApply generates the. Apply function call.
Calling this out, something is really patchy/incomplete here even after the fix in the PR. Does it ring any bells?
@@ -206,7 +206,7 @@ func Test_UpkeepExecuter_PerformsUpkeep_Error(t *testing.T) { head := newHead() executer.OnNewLongestChain(context.TODO(), head) - g.Eventually(wasCalled).Should(gomega.Equal(atomic.NewBool(true))) + g.Eventually(wasCalled.Load()).Should(gomega.Equal(true)) cltest.AssertCountStays(t, db, bulletprooftxmanager.EthTx{}, 0) ethMock.AssertExpectations(t) }
[AssertCountStays,NewHash,Test,ItHappened,SetDB,MatchedBy,GWei,NewGomegaWithT,MustInsertKeeperRegistry,MockMatchedResponse,GetGasEstimator,Close,False,Eth,IntFrom,CreateProductionLogger,NewUpkeepExecuter,Eventually,NewChainSet,Error,MustGetDefaultChain,Should,NewContractMockReceiver,HeadBroadcaster,NewKeyStore,NewGormDB,Start,Hex2Bytes,NewTestGeneralConfig,KeeperRegistryPerformGasOverhead,NewBigI,Address,MustInsertUpkeepForRegistry,Len,On,Once,Store,NewAwaiter,Cleanup,Equal,NewORM,KeeperGasPriceBufferPercent,MockResponse,NewJobPipelineV2,Head,Parallel,NoError,HasErrors,Maybe,AssertExpectations,Config,AwaitOrFail,TODO,NewBool,Return,Background,TxManager,MockRevertResponse,NewInt,NewEthClientMockWithDefaultChain,Div,OnNewLongestChain,NewHead,Run,Mul,WaitForPipelineComplete]
head creates a new head object and asserts that the head object is not nil.
Doesn't the arg of an `Eventually(...)` need to be a closure returning `wasCalled.Load()` so that the `Eventually` can repeatedly call it until it succeeds? Otherwise it'd only call it once and retest the returned value over and over.
@@ -271,7 +271,17 @@ func base64EncodeNoPad(values ...string) string { return base64.RawStdEncoding.EncodeToString([]byte(data)) } -func hmacString(hmacType string, hmacKey string, data string) []byte { +func base64Decode(enc string) string { + dec, _ := base64.StdEncoding.DecodeString(enc) + return string(dec) +} + +func base64DecodeNoPad(enc string) string { + dec, _ := base64.RawStdEncoding.DecodeString(enc) + return string(dec) +} + +func hmacString(hmacType string, hmacKey []byte, data string) []byte { if data == "" { return nil }
[Unpack->[Option,New,Delims,Funcs,Parse],Execute->[cursorMap,lastResponseClone,Execute,Contains,templateValues,String,Clone,Debugf,lastEventClone,firstEventClone,Put],Index,ValueOf,In,Add,Int,Format,MatchString,Sprint,Sum,FindStringSubmatch,New,UTC,Len,MustCompile,Unix,Float,Join,TypeOf,LoadLocation,Write,NewRandom,Kind,Uint,String,Parse,EncodeToString,ParseFloat,ParseDuration]
Get the next unique identifier for a given float type string int64 int32 int32 int hmacString returns a random string with the given values.
Maybe emit and obviously incorrect string if err != nil? (also above)
@@ -1662,6 +1662,11 @@ class TorchAgent(ABC, Agent): def act(self): """Call batch_act with the singleton batch.""" + if self._replies_are_shared: + raise RuntimeError( + 'act() will misbehave in batching mode. Set batchsize to 1, or ' + '--interactive-mode true' + ) return self.batch_act([self.observation])[0] def batch_act(self, observations):
[History->[_update_vecs->[parse],update_history->[_update_raw_strings,_update_vecs,reset,_update_strings]],TorchAgent->[_vectorize_text->[_add_start_end_tokens],_set_text_vec->[get_history_str,_check_truncate,get_history_vec],vectorize->[_set_text_vec,_set_label_cands_vec,_set_label_vec],reset->[reset],build_history->[history_class],build_dictionary->[dictionary_class],_set_label_vec->[_vectorize_text,_check_truncate],batch_act->[batchify,match_batch],batchify->[is_valid,Batch],zero_grad->[zero_grad],backward->[backward],load->[load,load_state_dict],init_optim->[optim_opts],observe->[update_history,last_reply,vectorize],state_dict->[state_dict],_set_label_cands_vec->[_vectorize_text,_check_truncate],save->[save,state_dict],update_params->[_is_lr_warming_up],load_state_dict->[load_state_dict],_copy_embeddings->[_get_embtype,_project_vec],add_cmdline_args->[optim_opts,dictionary_class],receive_metrics->[_is_lr_warming_up]]]
Process a batch of observations and return a sequence of messages.
why are we raising a runtime error here??? am i misunderstanding something, or will this raise a runtime error every time we call act on an interactive agent
@@ -43,10 +43,14 @@ namespace Microsoft.Win32.SafeHandles { using (DisableMediaInsertionPrompt.Create()) { - SafeFileHandle fileHandle = new SafeFileHandle( - NtCreateFile(fullPath, mode, access, share, options, preallocationSize), - ownsHandle: true, - options); + // we don't use NtCreateFile as there is no public and reliable way + // of converting DOS to NT file paths (RtlDosPathNameToRelativeNtPathName_U_WithStatus is not documented) + SafeFileHandle fileHandle = CreateFile(fullPath, mode, access, share, options); + + if (FileStreamHelpers.ShouldPreallocate(preallocationSize, access, mode)) + { + Preallocate(fullPath, preallocationSize, fileHandle); + } fileHandle.InitThreadPoolBindingIfNeeded();
[SafeFileHandle->[InitThreadPoolBindingIfNeeded->[IO_BindHandleFailed,Dispose,BindHandle],IntPtr->[STATUS_FILE_TOO_LARGE,Format,Append,AsSpan,STATUS_DISK_FULL,IO_DiskFull_Path_AllocationSize,STATUS_INVALID_PARAMETER,Ordinal,Dispose,NtCreateFile,GetExceptionForWin32Error,StartsWith,IO_FileTooLarge_Path_AllocationSize,STATUS_SUCCESS,RtlNtStatusToDosError],GetFileType->[FILE_TYPE_PIPE,FILE_TYPE_CHAR,GetFileType,Assert,FILE_TYPE_DISK],FileOptions->[DeleteOnClose,FILE_DELETE_ON_CLOSE,NtQueryInformationFile,Asynchronous,FILE_RANDOM_ACCESS,None,FILE_SYNCHRONOUS_IO_ALERT,RandomAccess,FILE_SEQUENTIAL_ONLY,SequentialScan,FILE_SYNCHRONOUS_IO_NONALERT,GetExceptionForWin32Error,FileModeInformation,FILE_WRITE_THROUGH,FILE_NO_INTERMEDIATE_BUFFERING,STATUS_SUCCESS,RtlNtStatusToDosError,WriteThrough],SetHandle,FILE_TYPE_PIPE,Asynchronous,GetFileOptions,GetFileType,FILE_TYPE_DISK]]
Opens a file with the given path mode access share and options.
I assume this is all just reverting to what was there previously?
@@ -735,7 +735,7 @@ public abstract class Row implements Serializable { return this; } - public Builder addValues(Object... values) { + public Builder addValues(@Nullable Object... values) { return addValues(Arrays.asList(values)); }
[Row->[getBytes->[getBytes,getValue],equals->[getSchema,getFieldCount,getValue,equals],getIterable->[getIterable,getValue],hashCode->[getFieldCount,getValue],getDouble->[getValue,getDouble],getByte->[getByte,getValue],fromRow->[getSchema],getInt64->[getInt64,getValue],Builder->[withFieldValues->[withFieldValues],addArray->[addArray],attachValues->[attachValues],build->[getFieldCount],withFieldValue->[withFieldValue],addValues->[addValues]],nullRow->[build],Equals->[deepHashCode->[deepHashCode,hashCode],deepEqualsForMap->[getValue,deepEquals],deepEqualsForIterable->[deepEquals],deepHashCodeForIterable->[deepHashCode],deepEquals->[equals,deepEquals],deepHashCodeForMap->[deepHashCode,getValue]],getDateTime->[getDateTime,getValue],getValue->[getValue],getArray->[getArray,getValue],getMap->[getMap,getValue],toRow->[getFieldCount,build],getFloat->[getFloat,getValue],getInt16->[getInt16,getValue],getBaseValue->[getBaseValue,getValue],FieldValueBuilder->[withFieldValue->[withFieldValue,getSchema],withFieldAccessDescriptors->[getValue],build->[getSchema],withFieldValues->[getSchema,getValue]],getInt32->[getValue,getInt32],getBoolean->[getValue,getBoolean],getRow->[getValue,getRow],toString->[getValues],getDecimal->[getDecimal,getValue],getLogicalTypeValue->[getLogicalTypeValue,getValue],getString->[getValue,getString]]]
Add values to the list.
Can it be Nullable ?
@@ -123,6 +123,9 @@ def extract_sub_graph(graph_def, dest_nodes): if not isinstance(graph_def, graph_pb2.GraphDef): raise TypeError("graph_def must be a graph_pb2.GraphDef proto.") + if not isinstance(dest_nodes, list): + raise TypeError("dest_nodes must be a list.") + edges = {} # Keyed by the dest node name. name_to_node_map = {} # Keyed by node name.
[convert_variables_to_constants->[extract_sub_graph],must_run_on_cpu->[_is_variable_op],extract_sub_graph->[_node_name]]
Extract the subgraph that can reach any of the nodes in dest_nodes. out is a copy of out. library out. versions.
This will break existing code where dest_nodes is a non-list iterable, such as a tuple or set. Instead, you should raise an error if its not an instance of a collections.Iterable, or is an instance of six.string_types as I described above.
@@ -28,8 +28,14 @@ class OgnDistributor { // Transfer NUM_TOKENS to the specified wallet. const value = this.token.toNaturalUnit(NUM_TOKENS) const contractAddress = this.token.contractAddress(networkId) - const receipt = await this.token.credit(networkId, wallet, value) - const txHash = receipt.transactionHash + const txHash = await this.token.credit(wallet, value) + const { status } = await this.token.waitForTxConfirmation(txHash, { + numBlocks: NumBlockConfirmation, + timeoutSec: ConfirmationTimeoutSec + }) + if (status !== 'confirmed') { + throw new Error(`Failure. status=${status} txHash=${txHash}`) + } logger.info(`${NUM_TOKENS} OGN -> ${wallet} TxHash=${txHash}`) // Send response back to client.
[No CFG could be retrieved]
OGN token credit process.
I'm not sure this needs to be dealt with now, but you might want to consider what happens in production if the tx takes longer than 2 minutes to get mined. For instance, will this record the error, assume it won't get mined(but it does), then retry later creating multiple transactions?
@@ -1353,8 +1353,12 @@ func TestAccAWSRDSCluster_GlobalClusterIdentifier_PrimarySecondaryClusters(t *te resourceNameSecondary := "aws_rds_cluster.secondary" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories(&providers), + PreCheck: func() { + testAccPreCheck(t) + testAccMultipleRegionPreCheck(t, 2) + testAccPreCheckAWSRdsGlobalCluster(t) + }, + ProviderFactories: testAccProviderFactoriesAlternate(&providers), CheckDestroy: testAccCheckAWSClusterDestroy, Steps: []resource.TestStep{ {
[ParallelTest,DeleteDBCluster,RandString,Code,TestCheckResourceAttrSet,RandInt,New,RemoveFromGlobalCluster,RootModule,ComposeTestCheckFunc,Errorf,Skip,MustCompile,Bool,DeleteDBClusterSnapshot,TestMatchResourceAttr,TestCheckTypeSetElemAttr,TimeValue,RandomWithPrefix,AddTestSweepers,TestCheckResourceAttrPair,Printf,StringValue,Meta,Sprintf,TestCheckResourceAttr,DescribeDBClustersPages,String,DescribeDBClusters]
TestAccAWSRDSCluster_GlobalClusterIdentifier_PrimarySecondaryClusters tests that the given secondaryDbCluster is a helper to create a test for the name of the resource in the.
Previously missing PreCheck for this configuration.
@@ -384,6 +384,7 @@ class ADMMPruner(IterativePruner): for i, wrapper in enumerate(self.get_modules_wrapper()): z = wrapper.module.weight.data + self.U[i] self.Z[i] = self._projection(z, wrapper.config['sparsity'], wrapper) + torch.cuda.empty_cache() self.U[i] = self.U[i] + wrapper.module.weight.data - self.Z[i] # apply prune
[IterativePruner->[compress->[_fresh_calculated]],ADMMPruner->[compress->[_projection],_projection->[calc_mask]],AGPPruner->[calc_mask->[calc_mask],compress->[update_epoch]]]
Compress the model with Adam. Returns the model with specified modules compressed.
could you briefly explain the reason of adding this line?
@@ -95,7 +95,7 @@ class UploadTest(unittest.TestCase): "my_lib/debug/libd.a": "//copy", "my_data/readme.txt": "//copy", "my_bin/executable": "//copy"}, path=reg_folder) - mkdir(self.client.cache.export_sources(self.ref)) + mkdir(self.client.cache.package_layout(self.ref).export_sources()) manifest = FileTreeManifest.create(reg_folder) manifest.time = '123123123' manifest.save(reg_folder)
[UploadTest->[upload_json_test->[_get_client],upload_error_test->[_get_client],setUp->[_get_client]]]
Setup the cache. save the package folder.
`self._cache.export_sources(ref, short_paths=False)` vs `self._cache.package_layout(ref, short_paths=None).export_sources()`
@@ -292,8 +292,9 @@ func newCompactor( Help: "Number of tenants failed processing during the current compaction run. Reset to 0 when compactor is idle.", }), blocksMarkedForDeletion: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Name: "cortex_compactor_blocks_marked_for_deletion_total", - Help: "Total number of blocks marked for deletion in compactor.", + Name: "cortex_compactor_blocks_marked_for_deletion_total", + Help: "Total number of blocks marked for deletion in compactor.", + ConstLabels: prometheus.Labels{"reason": "compaction"}, }), garbageCollectedBlocks: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_compactor_garbage_collected_blocks_total",
[running->[Stop,compactUsers,NewTicker,Chan,DurationWithJitter,Wrap,Done],compactUsers->[RemoveAll,Warn,compactUserWithRetries,Inc,Info,Set,IsNotExist,Error,Stat,ownUser,discoverUsersWithRetries,listTenantsWithMetaSyncDirectories,metaSyncDirForUser,TenantDeletionMarkExists,Debug,SetToCurrentTime,Log,Err,IsDir,Shuffle],discoverUsers->[TrimSuffix,Iter],compactUserWithRetries->[NewBackoff,Wait,Ongoing,compactUser],Validate->[String,Errorf],ownUser->[Write,New32a,Sum32,Errorf,Get],RegisterFlags->[StringVar,Sprintf,RegisterFlags,BoolVar,DurationVar,Var,IntVar],discoverUsersWithRetries->[NewBackoff,Wait,Ongoing,discoverUsers],stopping->[StopAndAwaitTerminated,StopManagerAndAwaitStopped,Background],listTenantsWithMetaSyncDirectories->[ReadDir,HasPrefix,Name,IsDir],starting->[StartAndAwaitRunning,Warn,WatchManager,NewUsersScanner,NewNoopFlushTransferer,bucketClientFactory,BucketWithGlobalMarkers,Wrap,Info,WaitRingStability,StartManagerAndAwaitHealthy,StopAsync,ToLifecyclerConfig,New,NewManager,DurationWithJitter,NewFailureWatcher,Log,WaitInstanceState,String,NewLifecycler,blocksCompactorFactory],metaSyncDirForUser->[Join],compactUser->[NewDeduplicateFilter,NewConsistencyDelayMetaFilter,NewMetaFetcher,Join,NewIgnoreDeletionMarkFilter,WithUserID,Seconds,gatherThanosSyncerMetrics,blocksGrouperFactory,Duration,Compact,Wrap,NewRegistry,NewUserBucketClient,metaSyncDirForUser,NewSyncer,NewBucketCompactor],NewTSDBBasedPlanner,With,NewDefaultGrouper,NewPool,Info,Join,NewBasicService,ToMilliseconds,NewGauge,NewLeveledCompactor,Log,NewClient,NewCounter,Wrap,NewOp]
Metrics related to the current n - th tenant. c parses the national network element from the configuration and stores it in the disabledUsers.
Mention in the CHANGELOG that we've added the `reason` label to this metric and its values can be `compaction` or `retention`.
@@ -30,6 +30,11 @@ var ( Name: "cortex_ingester_ring_tokens_to_own", Help: "The number of tokens to own in the ring.", }) + shutdownDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "cortex_shutdown_duration_seconds", + Help: "Duration (in seconds) of cortex shutdown procedure (ie transfer or flush).", + Buckets: prometheus.DefBuckets, + }, []string{"op", "status"}) ) // LifecyclerConfig is the config to build a Lifecycler.
[ClaimTokensFor->[setTokens],loop->[GetState],changeState->[updateConsul,setState,GetState],autoJoin->[setTokens,setState,GetState],RegisterFlags->[RegisterFlags],initRing->[setTokens,setState,GetState],updateConsul->[getTokens,GetState]]
Config for a specific n - tuple of n - tuple of n - tuple of n - RegisterFlags registers the flags for the given FlagSet.
This tops out at 10 seconds? My ingesters take much longer than that to transfer or flush.
@@ -163,12 +163,6 @@ const EXPERIMENTS = [ spec: 'https://github.com/ampproject/amphtml/issues/6106', cleanupIssue: 'https://github.com/ampproject/amphtml/pull/6351', }, - { - id: 'amp-action-macro', - name: 'AMP extension for defining action macros', - spec: 'https://github.com/ampproject/amphtml/issues/19494', - cleanupIssue: 'https://github.com/ampproject/amphtml/pull/19495', - }, { id: 'ios-fixed-no-transfer', name: 'Remove fixed transfer from iOS 12.2 and up',
[No CFG could be retrieved]
A list of possible AMP - specific identifiers. A custom iframe that allows the browser to scroll to the current frame when the AMP loads.
This just removes it from the list on the experiments page. To actually launch the extension, you have to add `"amp-action-macro: 1` to prod-config.json and canary-config.json. I'd revert this change.
@@ -226,7 +226,8 @@ namespace R2RDump _writer.WriteLine(); } - if (!_options.HideTransitions && rtf.Method.GcInfo?.Transitions != null && rtf.Method.GcInfo.Transitions.TryGetValue(codeOffset, out List<BaseGcTransition> transitionsForOffset)) + BaseGcInfo gcInfo = (_options.HideTransitions ? null : rtf.Method?.GcInfo()); + if (gcInfo != null && gcInfo.Transitions.TryGetValue(codeOffset, out List<BaseGcTransition> transitionsForOffset)) { string[] formattedTransitions = new string[transitionsForOffset.Count]; for (int transitionIndex = 0; transitionIndex < formattedTransitions.Length; transitionIndex++)
[TextDumper->[DumpHeader->[WriteDivider,SkipLine],DumpMethod->[SkipLine,WriteSubDivider],DumpSectionContents->[DumpBytes],DumpQueryCount->[SkipLine],DumpBytes->[SkipLine],DumpEntryPoints->[WriteDivider],DumpRuntimeFunction->[SkipLine],DumpAllMethods->[WriteDivider,SkipLine],DumpSection->[SkipLine,WriteSubDivider]]]
Dumps the disassembly instructions for the given instruction and instruction offsets. adjustment for the next non - zero instruction.
GC info should be cached instead of recreating it on every instruction.
@@ -126,7 +126,7 @@ func (c clientMock) GetGateways() []*v1alpha1.Gateway { } func (c clientMock) GetHTTPRoutes(namespace string, selector labels.Selector) ([]*v1alpha1.HTTPRoute, error) { - httpRoutes := make([]*v1alpha1.HTTPRoute, len(c.httpRoutes)) + var httpRoutes []*v1alpha1.HTTPRoute for _, httpRoute := range c.httpRoutes { if httpRoute.Namespace == namespace && selector.Matches(labels.Set(httpRoute.Labels)) {
[UpdateGatewayStatus->[Errorf],GetHTTPRoutes->[Matches,Set],AddToScheme,ReadFile,Sprintf,FromSlash,MustParseYaml]
GetHTTPRoutes returns a list of HTTPRoutes that match the selector.
why this change? AFAICS, it was probably more efficient before. Yes, it may overallocate as maybe we don't want all of the c.httpRoutes, but so will append (most of the time), since AFAIR, it simply doubles the already allocated memory.
@@ -95,6 +95,13 @@ class AmrWind(CMakePackage, CudaPackage): self.define_from_variant('AMR_WIND_TEST_WITH_FCOMPARE', 'tests'), ] + if '+mpi' in self.spec: + args += [ + '-DCMAKE_C_COMPILER=%s' % self.spec['mpi'].mpicc, + '-DCMAKE_CXX_COMPILER=%s' % self.spec['mpi'].mpicxx, + '-DCMAKE_Fortran_COMPILER=%s' % self.spec['mpi'].mpifc, + ] + if '+cuda' in self.spec: amrex_arch = ['{0:.1f}'.format(float(i) / 10.0) for i in self.spec.variants['cuda_arch'].value]
[AmrWind->[cmake_args->[upper,append,format,float,define,define_from_variant],depends_on,conflicts,process_amrex_constraints,version,variant],process_amrex_constraints->[product,join]]
Get CMakePackage arguments for .
We actually need to delete this block. This causes problems on Summit.
@@ -29,7 +29,7 @@ static const char errormagic[] PROGMEM = "Error:"; static const char echomagic[] PROGMEM = "echo:"; #if NUM_SERIAL > 1 - int8_t serial_port_index = SERIAL_PORT; + int8_t serial_port_index = 0; #endif void serialprintPGM(PGM_P str) {
[No CFG could be retrieved]
Reads the contents of a n - tuple from the PGM. Prints a PGM symbol to the serial output.
Whatever value is set here will act as the default serial port for all output. 0 is `SERIAL_PORT`, 1 is `SERIAL_PORT_2` and 0x7F will cause all general output to go to both serial ports. This is guaranteed because all changes to `serial_port_index` are temporary; it is always reset back to this initial value after the current g-code command completes.
@@ -51,7 +51,7 @@ func (e *Event) SetID(id string) { if e.Meta == nil { e.Meta = common.MapStr{} } - e.Meta["id"] = id + e.Meta["_id"] = id } func (e *Event) GetValue(key string) (interface{}, error) {
[Delete->[Delete],GetValue->[GetValue],PutValue->[Time,Put],HasPrefix,New]
SetID sets the id of the event.
Given the special nature of this field name and the desire to keep it consistent in multiple places, do you think we should make it an exported const?
@@ -290,6 +290,9 @@ class ContractService { // Convert money object to correct units for blockchain async moneyToUnits(money) { + if (!money) { + return '0' + } if (money.currency === 'ETH') { return Web3.utils.toWei(money.amount, 'ether') } else {
[No CFG could be retrieved]
Get the timestamp from the blockchain.
Should this throw an exception rather than returning '0' ? Under which scenario would it be valid to call this method with an undefined money argument ?
@@ -323,6 +323,11 @@ func (o DeployOptions) retry(config *deployapi.DeploymentConfig) error { return nil } +// nowFn is extracted from the cancel method for unit testing. +var nowFn = func() time.Time { + return unversioned.Now().Time +} + // cancel cancels any deployment process in progress for config. func (o DeployOptions) cancel(config *deployapi.DeploymentConfig) error { if config.Spec.Paused {
[getLogs->[Copy,Stream,Get,Close,DeploymentLogs],Validate->[New],deploy->[Fprintf,LatestDeploymentNameForConfig,getLogs,DeploymentConfigs,ReplicationControllers,Errorf,IsNotFound,DeploymentStatusFor,Get,Update],retry->[NewDeleteOptions,Fprintf,Sprintf,LatestDeploymentNameForConfig,getLogs,List,Delete,ReplicationControllers,Errorf,IsNotFound,DeploymentStatusFor,Get,DeployerPodSelector,Pods,Update],RunDeploy->[Describe,Object,NamespaceParam,ResourceNames,Do,getLogs,Fprint,NewLatestDeploymentsDescriber,deploy,SingleResourceType,retry,Errorf,cancel,reenableTriggers],cancel->[Fprintf,Join,ByLatestVersionDesc,FormatInt,HumanDuration,Sort,ToLower,List,DeploymentVersionFor,ConfigSelector,ReplicationControllers,Errorf,IsTerminatedDeployment,Now,DeploymentStatusFor,IsDeploymentCancelled,Sub,Update],reenableTriggers->[Fprintf,Fprintln,Join,DeploymentConfigs,Update],Complete->[DefaultNamespace,Object,UniversalDecoder,ClientMapperFunc,New,NewBuilder,Clients],Flags,Error,Sprintf,Validate,UsageError,RunDeploy,CheckErr,Complete,BoolVar]
retry attempts to retry a deployment cancel deletes all pods and pods for a given config cancel all the deployments in the list timeAt - > nil.
i wish we have more formal version of this (i think I saw something similar in kube code)... something like an interface
@@ -175,12 +175,14 @@ public abstract class AbstractMessageChannel extends IntegrationObjectSupport this.managementOverrides.loggingConfigured = true; } - protected AbstractMessageChannelMetrics getMetrics() { + protected org.springframework.integration.support.management.AbstractMessageChannelMetrics getMetrics() { return this.channelMetrics; } @Override - public void configureMetrics(AbstractMessageChannelMetrics metrics) { + public void configureMetrics( + org.springframework.integration.support.management.AbstractMessageChannelMetrics metrics) { + Assert.notNull(metrics, "'metrics' must not be null"); this.channelMetrics = metrics; this.managementOverrides.metricsConfigured = true;
[AbstractMessageChannel->[getSendCountLong->[getSendCountLong],getInterceptors->[getInterceptors],ChannelInterceptorList->[postSend->[postSend],preReceive->[add,preReceive],afterReceiveCompletion->[afterReceiveCompletion],remove->[remove],preSend->[preSend,add],add->[add],afterSendCompletion->[afterSendCompletion],postReceive->[postReceive]],reset->[reset],getMeanErrorRatio->[getMeanErrorRatio],getTimeSinceLastSend->[getTimeSinceLastSend],getMeanSendRate->[getMeanSendRate],getErrorRate->[getErrorRate],getSendErrorCount->[getSendErrorCount],getStandardDeviationSendDuration->[getStandardDeviationSendDuration],getMaxSendDuration->[getMaxSendDuration],getSendRate->[getSendRate],send->[send],getMeanErrorRate->[getMeanErrorRate],getSendDuration->[getSendDuration],onInit->[onInit],getMeanSendDuration->[getMeanSendDuration],getSendErrorCountLong->[getSendErrorCountLong],getMinSendDuration->[getMinSendDuration],getSendCount->[getSendCount]]]
This method is used to configure the logging and metrics for this channel.
Shouldn't methods like this be deprecated as well? Kinda what is going to happen to them when we remove classes for their signature?..
@@ -275,7 +275,11 @@ func (i *Image) createNamesToPull() ([]*pullStruct, error) { } for _, registry := range searchRegistries { decomposedImage.registry = registry - srcRef, err := alltransports.ParseImageName(decomposedImage.assembleWithTransport()) + imageName := decomposedImage.assembleWithTransport() + if i.HasShaInInputName() { + imageName = fmt.Sprintf("%s%s/%s", decomposedImage.transport, registry, i.InputName) + } + srcRef, err := alltransports.ParseImageName(imageName) if err != nil { return nil, errors.Wrapf(err, "unable to parse '%s'", i.InputName) }
[getPullListFromRef->[getPullStruct],pullImage->[getPullListFromRef]]
createNamesToPull creates a list of names to pull from the input image. This is a helper function that returns the pullNames and nil .
There are exactly two callers to `assembleWithTransport`, and _both_ are now conditionalized with `HasShaInInputName`. I think this _very clearly_ shows that the digests should be the responsibility of the `decompose().assembleWithTransport()` function pair, and should be fixed in there instead of the callers fixing this up after them.
@@ -60,6 +60,11 @@ if sys.version_info < (3, 4): # pylint: disable=line-too-long CONSOLE_SCRIPTS = [ 'saved_model_cli = tensorflow.python.tools.saved_model_cli:main', + # We need to keep the TensorBoard command, even though the console script + # is now declared by the tensorboard pip package. If we remove the + # TensorBoard command, pip will inappropriately remove it during install, + # even though the command is not removed, just moved to a different wheel. + 'tensorboard = tensorboard.main:main', ] # pylint: enable=line-too-long
[InstallCommand->[finalize_options->[finalize_options]],InstallHeaders->[run->[mkdir_and_copy_file]],find_files]
Creates a class which can be used to install a single header. Creates a command that installs C ++ header files.
nit: I'd write either "the TensorBoard pip package" or "the tensorflow-tensorboard pip package" (because `tensorflow-tensorboard` is the pip package for the TensorBoard application).
@@ -180,9 +180,15 @@ func (t *UIThreadLoader) groupThreadView(ctx context.Context, uid gregor1.UID, t } if activeMap == nil && len(usernames) > 0 { - activeMap = make(map[string]struct{}) - for _, uid := range conv.Conv.Metadata.AllList { - activeMap[uid.String()] = struct{}{} + allList, err := t.G().ParticipantsSource.Get(ctx, uid, convID, dataSource) + if err == nil { + activeMap = make(map[string]struct{}) + for _, uid := range allList { + activeMap[uid.String()] = struct{}{} + } + } else { + t.Debug(ctx, "groupGeneric: failed to form active map, could not get participants: %s", + err) } }
[waitForOnline->[IsOffline],Load->[messageIDControlToPagination],LoadNonblock->[setUIStatus,singleFlightConv,applyPagerModeOutgoing,waitForOnline,isConsolidateMsg,dispatchOldPagesJob,mergeLocalRemoteThread,applyPagerModeIncoming,shouldIgnoreError,messageIDControlToPagination,groupThreadView],mergeLocalRemoteThread->[isConsolidateMsg],shouldIgnoreError->[shouldIgnoreError],groupThreadView->[groupGeneric]]
groupThreadView groups messages in a thread view. This is a helper function that is used to filter out messages that are not part of the groupSystemMessagesWithValid groups messages from a single adder system messages with a system message chat1.
should break out of here or we can error on a nil map
@@ -238,8 +238,8 @@ static JsonElement* VarRefValueToJson(EvalContext *ctx, const FnCall *fp, const break; case RVAL_TYPE_CONTAINER: - convert = value; - *allocated = false; + convert = JsonCopy(value); + *allocated = true; break; case RVAL_TYPE_SCALAR:
[No CFG could be retrieved]
Reads a variable from the EvalContext and converts it to a JSON object. indices - the number of indices in the ref.
This will slow down all function calls that use this path. Why is it necessary to always copy?
@@ -5,6 +5,7 @@ using Xunit; namespace System.Net.Mail.Tests { + [PlatformSpecific(~TestPlatforms.Browser)] // SmtpClient is not supported on Browser public class EhloParseExtensionsTest { private SmtpConnection _smtpConnection;
[EhloParseExtensionsTest->[ParseExtensions_WithOnlyAuthGssapi_AuthTypesShouldBeCorrect->[ParseExtensions,AuthSupported,False,DSNEnabled,True],ParseExtensions_WithOnlyAuthEqualsLogin_ShouldSupportAuthLogin->[ParseExtensions,AuthSupported,False,DSNEnabled,True],ParseExtensions_WithOnlyAuth_ShouldSupportAllAuthTypesAdvertised->[ParseExtensions,AuthSupported,False,DSNEnabled,True],ParseExtensions_WithOnlyAuthLogin_AuthTypesShouldBeCorrect->[ParseExtensions,AuthSupported,False,DSNEnabled,True],ParseExtensions_WithRandomGarbage_ShouldNotFail_AndShouldNotSupportAnyAuthTypes->[ParseExtensions,DSNEnabled,False,AuthSupported],ParseExtensions_WithBothAuthEqualsLoginAndAuth_ShouldTakeSettingsFromAuth->[ParseExtensions,AuthSupported,False,DSNEnabled,True],ParseExtensions_WithBothAuthAndAuthEqualsLogin_ShouldTakeSettingsFromAuth->[ParseExtensions,AuthSupported,False,DSNEnabled,True],ParseExtensions_WithNoAuthSpecified_ShouldNotSupportAnyAuthentcation->[ParseExtensions,DSNEnabled,False,AuthSupported],ParseExtensions_WithOnlyAuthNtlm_AuthTypesShouldBeCorrect->[ParseExtensions,AuthSupported,False,DSNEnabled,True],ParseExtensions_WithBothAuthTypes_AndExtraExtensions_AuthTypesShouldBeCorrect->[ParseExtensions,AuthSupported,False,DSNEnabled,True],ParseExtensions_WithNoMechanismsAdvertised_ShouldNotSupportAnyAuthTypes->[ParseExtensions,ServerSupportsEai,AuthSupported,False,DSNEnabled,True]]]
Ehlo tests for the various authentication modules. Checks that SMTP authentication is supported by all modules.
Just clarifying, does SmtpClient come into play because of SmtpConnection, that there is only a SmtpConnection when there is a SmtpClient?
@@ -62,10 +62,10 @@ func (d *Dispatcher) NewVCHFromID(id string) (*vm.VirtualMachine, error) { d.op.Errorf("Failed to find VM %q: %s", moref, err) return nil, err } - vmm = vm.NewVirtualMachine(d.op, d.session, ovm.Reference()) + d.appliance = vm.NewVirtualMachine(d.op, d.session, ovm.Reference()) // check if it's VCH - if ok, err = d.isVCH(vmm); err != nil { + if ok, err = d.isVCH(d.appliance); err != nil { d.op.Error(err) return nil, err }
[FetchAndMigrateVCHConfig->[Reference,Begin,OptionValueMap,FetchExtraConfigBaseOptions,decryptVCHConfig,End,Errorf,MigrateApplianceConfig,Debugf],listResourcePools->[Join,Do,ResourcePoolList,IsNotFoundError,IsTransientError],GetVCHConfig->[Reference,Error,Begin,OptionValueMap,FetchExtraConfigBaseOptions,IsCreating,decryptVCHConfig,End,Errorf,SetMoref],search->[listResourcePools,searchResourcePools,Begin,Name,End,Errorf,ComputeResourceList],searchResourcePools->[findVCHs,Join,VirtualAppList,Warnf,Errorf],findVCHs->[Reference,GetChildrenVMs,NewResourcePool,Join,isVCH,Begin,Name,End,GetChildVM,Errorf,Debugf],NewVCHFromComputePath->[Reference,InitDiagnosticLogsFromVCH,NewResourcePool,Join,Error,isVCH,Begin,Sprintf,ResourcePool,End,ResourcePoolHelper,Errorf,GetChildVM,GetCluster,findVirtualApp,Debugf],GetNoSecretVCHConfig->[Reference,Error,Begin,OptionValueMap,FetchExtraConfigBaseOptions,Decode,IsCreating,End,Errorf,SetMoref,MapSource],NewVCHFromID->[Reference,InitDiagnosticLogsFromVCH,NewResourcePool,NewVirtualMachine,isVCH,Error,Begin,ResourcePool,End,Errorf,Debugf,ObjectReference,GetCluster,Debug],SearchVCHs->[Join,search,Begin,SetDatacenter,Name,Warnf,DatacenterList,End,Errorf]]
NewVCHFromID creates a new VCH from a given ID. Initialize diagnostic logs from VCH.
doubt it matters, but good practice would be to only assign to `d.appliance` after we've confirmed that it's a VCH.
@@ -152,6 +152,10 @@ exports.rules = [ { filesMatching: '{src,extensions}/**/*.js', mustNotDependOn: '3p/**/*.js', + whitelist: [ + 'extensions/amp-analytics/0.1/transport.js->' + + '3p/iframe-messaging-client.js', + ], }, // Rules for extensions.
[No CFG could be retrieved]
A list of all js files that should be included in AMP AMP AMP A A list of rules for AMP AMP AMP AMP AMP AMP A.
This exception is added because the amp-analytics tag needs to send messages to the third-party iframe. It covers the import of the file that contains sendMessage().
@@ -134,10 +134,12 @@ class Emitter: The caller should ensure that the (id, module) pair cannot overlap with other calls to this method within a compilation - unit. + group. """ + lib_prefix = '' if not module else self.get_module_lib_prefix(module) + star_maybe = '*' if lib_prefix else '' suffix = self.names.private_name(module or '', id) - return '{}{}'.format(prefix, suffix) + return '{}{}{}{}'.format(star_maybe, lib_prefix, prefix, suffix) def type_struct_name(self, cl: ClassIR) -> str: return self.static_name(cl.name, cl.module_name, prefix=TYPE_PREFIX)
[Emitter->[emit_arg_check->[emit_line,c_error_value],c_error_value->[c_undefined_value],emit_dec_ref->[emit_line,emit_dec_ref],tuple_c_declaration->[ctype_spaced,ctype],emit_label->[label],type_struct_name->[static_name],emit_error_check->[emit_line,tuple_undefined_check_cond,c_error_value,emit_lines],tuple_undefined_check_cond->[tuple_undefined_check_cond],emit_tuple_cast->[emit_line,emit_cast,emit_label,new_label,emit_lines],emit_box->[emit_line,declare_tuple_struct,emit_box,emit_inc_ref,temp_name,emit_lines],emit_inc_ref->[emit_line,emit_inc_ref],emit_unbox->[emit_line,emit_arg_check,c_error_value,declare_tuple_struct,ctype,pretty_name,emit_cast,emit_tuple_cast,emit_unbox,emit_inc_ref,temp_name,emit_lines],emit_line->[dedent,indent],emit_gc_clear->[emit_gc_clear,emit_line,c_undefined_value,ctype],emit_cast->[emit_line,type_struct_name,ctype,pretty_name,emit_lines],tuple_undefined_value_helper->[c_undefined_value,tuple_undefined_value_helper],declare_tuple_struct->[tuple_c_declaration,HeaderDeclaration],emit_gc_visit->[ctype,emit_line,emit_gc_visit],pretty_name->[pretty_name],ctype_spaced->[ctype],emit_printf->[emit_line],emit_union_cast->[emit_line,c_error_value,emit_cast,emit_label,new_label],emit_lines->[emit_line]]]
Create a name for a C static variable.
It was unclear why we might have `*` in a name of a C static variable.
@@ -77,10 +77,6 @@ abstract class StructuredDataSource<K> implements DataSource { this.casTarget = casTarget; this.isSource = isSource; - if (schema.valueContainsAny(SystemColumns.systemColumnNames())) { - throw new IllegalArgumentException("Schema contains system columns in value schema"); - } - final Set<ColumnName> keyNames = schema.key().stream() .map(Column::name) .collect(Collectors.toSet());
[StructuredDataSource->[checkSchemas->[checkSchemas],toString->[getName],getKafkaTopicName->[getKafkaTopicName],getCompatMessage->[getName,toString]]]
Private constructor for the object. Returns the logical schema for a given node ID.
Noting this as another (potentially) functional change that ideally would not be lumped together with the non-functional feature flag changes in this PR.
@@ -18,6 +18,7 @@ namespace System.Text.Json // Support JSON Path on exceptions. public byte[]? JsonPropertyName; // This is Utf8 since we don't want to convert to string until an exception is thown. public string? JsonPropertyNameAsString; // This is used for dictionary keys and re-entry cases that specify a property name. + internal byte[]? DictionaryKeyName; // This will contain the Utf8 Json property name that represents a dictionary key; used to defer parsing on async/re-entry. // Validation state. public int OriginalDepth;
[ReadStackFrame->[Reset->[None,EndProperty],IsProcessingDictionary->[ClassType,Dictionary],IsProcessingEnumerable->[ClassType,Enumerable],InitializeReEntry->[ClassType,Invalid,PolicyProperty,Assert,GetOrAddClass],EndProperty->[None],EndElement->[None]]]
Contains information about a single object in the JSON format. propertyNa m e.
I realized that I can re-use `JsonPropertyName` for this purpose, however, `JsonPropertyName` will no longer be exclusive for exceptions. @steveharter @layomia
@@ -14,7 +14,8 @@ class AssignmentsController < ApplicationController def show assignment = Assignment.find(params[:id]) @assignment = assignment.is_peer_review? ? assignment.parent_assignment : assignment - if @assignment.is_hidden + @section_hidden = SectionDueDate.find_by(assessment: params[:id], section: current_user.section)&.is_hidden + if @assignment.is_hidden || @section_hidden render 'shared/http_status', formats: [:html], locals: {
[AssignmentsController->[start_timed_assignment->[update],new->[new],create->[new],set_boolean_graders_options->[update]]]
shows a single node in the system.
Make this a regular variable (not an instance variable) unless you intend to use this in a view later on.
@@ -98,6 +98,8 @@ public class FlowCatalog extends AbstractIdleService implements SpecCatalog, Mut MetricContext realParentCtx = parentMetricContext.or(Instrumented.getMetricContext(new org.apache.gobblin.configuration.State(), getClass())); this.metricContext = realParentCtx.childBuilder(FlowCatalog.class.getSimpleName()).build(); + putMeter = metricContext.contextAwareMeter("testGaas"); + this.metrics = new MutableStandardMetrics(this, Optional.of(config)); this.addListener(this.metrics); } else {
[FlowCatalog->[getSpecsWithTimeUpdate->[getSpecs],getSpec->[getSpec],getSpecs->[getSpecs],addListener->[addListener],exists->[exists],serialize->[serialize],registerWeakSpecCatalogListener->[registerWeakSpecCatalogListener],removeListener->[removeListener],remove->[remove],deserialize->[deserialize],put->[put],getSpecURIs->[getSpecURIs]]]
Creates a FlowCatalog object from the given configuration. Constructor for the SpecStore.
I don't think you are intending to use "testGaas" as the meter name.
@@ -1,6 +1,8 @@ require 'rails_helper' describe RequestPasswordReset do + let(:analytics) { FakeAnalytics.new } + describe '#perform' do context 'when the user is not found' do it 'sends the account registration email' do
[email,create,new,let,describe,instance_double,first,it,order,perform,to,find_with_email,before,with,t,last,require,default_scopes,receive,now,around,id,update!,context,hash_including,eq,run,and_return]
It creates a new user in the system and sends a password reset email. when the user is found and confirmed and the email address is not set then send a message.
Is this doing anything? Do we not have any coverage for throttled flows in these specs?
@@ -226,8 +226,8 @@ public class ProAI extends AbstractAI { } finally { data.releaseReadLock(); } - this.data = dataCopy; - s_battleCalculator.setGameData(dataCopy); + ProData.setData(dataCopy); + ProBattleUtils.setData(dataCopy); final PlayerID playerCopy = dataCopy.getPlayerList().getPlayerID(player.getName()); final IMoveDelegate moveDel = DelegateFinder.moveDelegate(dataCopy); final IDelegateBridge bridge = new ProDummyDelegateBridge(this, playerCopy, dataCopy);
[ProAI->[selectCasualties->[getGameData],selectAttackSubs->[getGameData],scrambleUnitsQuery->[scrambleUnitsQuery,getGameData],politicalActions->[politicalActions],stopGame->[stopGame],place->[place],getGameData->[getGameData],purchase->[purchase],retreatQuery->[retreatQuery,getGameData]]]
This method is called when a new game is created. Simulate the next phases until place or end of turn is reached. purchase.
See if you can avoid this. The way `cloneGameData` data works is by serialized the gameData object to disk and then reading it back. For a larger game file this can take seconds, a performance killer.
@@ -37,7 +37,5 @@ public interface MultimapAggregationState throw new UnsupportedOperationException(); } - long getEstimatedSize(); - int getEntryCount(); }
[reset->[UnsupportedOperationException],merge->[forEach]]
reset the cache to the initial state.
redundannt, part of `AccumulatorState` interface
@@ -95,7 +95,7 @@ func NewRegionSyncer(s Server) *RegionSyncer { tlsConfig: s.GetTLSConfig(), } syncer.mu.streams = make(map[string]ServerStream) - syncer.mu.closed = make(chan struct{}) + syncer.mu.clientCtx, syncer.mu.clientCancel = context.WithCancel(context.Background()) return syncer }
[broadcast->[Info,Error,Unlock,ZapError,RLock,Lock,Send,String,RUnlock],bindStream->[Lock,Unlock],Sync->[ClusterID,bindStream,syncHistoryRegion,WithStack,GetName,String,Errorf,GetMember,GetClientUrls,Info,Recv,GetClusterId,GetHeader],syncHistoryRegion->[ClusterID,Size,Warn,Now,ZapError,Duration,Info,Error,Int,GetStat,GetStartIndex,GetRegions,GetName,Since,Wait,Uint64,Name,Send,GetNextIndex,RecordsFrom,GetMeta,GetMember,String,GetLeader],RunServer->[ClusterID,Stop,broadcast,GetStat,NewTicker,GetMeta,GetNextIndex,Info,Record,GetLeader],GetRegionStorage,GetStorage,GetTLSConfig,NewBucketWithRate]
NewRegionSyncer returns a new region syncer. Index returns the last N N regions that have not yet been seen.
Just a question, why not create a sub-context from the server rather than using a `context.Background()`?
@@ -63,6 +63,10 @@ class DataUpdateScript < ApplicationRecord "#{self.class::NAMESPACE}::#{parsed_file_name.camelcase}".safe_constantize end + def save_error!(error) + update!(error: error) + end + private def parsed_file_name
[DataUpdateScript->[mark_as_finished!->[update!,current],scripts_to_run?->[to_sym,to_h,any?,size],filenames->[basename,map],insert_new_scripts->[map,insert_all,current],mark_as_failed!->[update!,current],parsed_file_name->[match],file_class->[safe_constantize],mark_as_run!->[update!,current],file_path->[class],scripts_to_run->[order],validates,freeze,enum]]
Returns the file_name_missing if the file_name_missing is not defined.
This might fail, I'm not entirely sure what Rails does here. If it calles `.to_s` on the exception object, it will only save the error message As it usually really help to save the exception class as well and the error message itself might not be very self explanatory, I propose to replace `error: error` with `error: "#{e.class}:#{e.message}` or something similar I also think this should be part of the `mark_as_failed!`, we probably don't need a separate method. Something like: `def mark_as_failed!(error)`
@@ -77,6 +77,11 @@ func runFlags(cmd *cobra.Command) { flags.StringVar(&runOpts.DetachKeys, detachKeysFlagName, containerConfig.DetachKeys(), "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or a comma separated sequence of `ctrl-<value>`, where `<value>` is one of: `a-cf`, `@`, `^`, `[`, `\\`, `]`, `^` or `_`") _ = cmd.RegisterFlagCompletionFunc(detachKeysFlagName, common.AutocompleteDetachKeys) + gpuFlagName := "gpus" + flags.String(gpuFlagName, "", "This is a Docker specific option and is a NOOP") + _ = cmd.RegisterFlagCompletionFunc(gpuFlagName, completion.AutocompleteNone) + _ = flags.MarkHidden("gpus") + if registry.IsRemote() { _ = flags.MarkHidden("preserve-fds") _ = flags.MarkHidden("conmon-pidfile")
[StringVar,DefineCreateFlags,ImageEngine,Fd,Warnf,SetExitCode,SetNormalizeFunc,SetInterspersed,MarkHidden,NewSpecGenerator,UintVar,IsFdInherited,Stat,MinimumNArgs,Errorf,GetContext,DefineNetFlags,NetFlagsToNetOptions,Wrapf,RegisterFlagCompletionFunc,ContainerRun,Remove,ToLower,FillOutSpecGen,DetachKeys,JoinErrors,IsRemote,Flag,Println,IsTerminal,ContainerEngine,BoolVarP,Flags,BoolVar]
runFlags is a function that can be used to run a container. run - command.
I wonder if it's worth printing a log message (Info level, probably) if this is actually set, notifying the user it has no effect
@@ -80,7 +80,7 @@ class Jetpack_Sync_Module_Terms extends Jetpack_Sync_Module { * * @param object the Term object */ - do_action( 'jetpack_sync_save_term', $term_object ); + do_action( 'jetpack_sync_save_term', $term_object, current_filter() ); } function set_taxonomy_whitelist( $taxonomies ) {
[Jetpack_Sync_Module_Terms->[enqueue_full_sync_actions->[prepare,get_col],estimate_full_sync_actions->[get_var,prepare]]]
Handler for the save_term action.
Lets split jetpack_sync_save_term into 2 events. One for added and the other for edited instead of passing the current_filter. We have been doing this in other places as well. It would be good to continuo this pattern. This will require a change to .com. I would keep jetpack_sync_save_term as the event when the term is edited.
@@ -80,15 +80,15 @@ class SeoTwigExtension extends \Twig_Extension ) { $template = 'SuluWebsiteBundle:Extension:seo.html.twig'; - @trigger_error( + @trigger_error(sprintf( 'This twig extension is deprecated and should not be used anymore, include the "%s".', $template - ); + )); $defaultLocale = null; $portal = $this->requestAnalyzer->getPortal(); if ($portal) { - $defaultLocale = $portal->getXDefaultLocalization()->getLocale(); + $defaultLocale = $portal->getDefaultLocalization()->getLocale(); } return $twig->render(
[SeoTwigExtension->[renderSeoTags->[getLocale,getPortal,render]]]
Renders SEO tags.
Why should we use the other default localization here? I thought the `x-default` was introduced especially for the SEO functionality.
@@ -19,8 +19,8 @@ from acme import messages logger = logging.getLogger(__name__) -# https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning -if six.PY2: +# Python does not validate certificates by default before version 2.7.9 +if sys.version_info < (2, 7, 9): requests.packages.urllib3.contrib.pyopenssl.inject_into_urllib3()
[ClientNetwork->[_get_nonce->[_add_nonce,head],post->[_check_response,_wrap_in_jws,_send_request,_add_nonce,_get_nonce],get->[_check_response,_send_request],head->[_send_request]],Client->[agree_to_tos->[update_registration],poll->[_authzr_from_response],request_challenges->[_authzr_from_response],check_cert->[_get_cert],update_registration->[_regr_from_response],refresh->[check_cert],fetch_chain->[_get_cert],poll_and_request_issuance->[retry_after,poll,request_issuance],request_domain_challenges->[request_challenges],register->[_regr_from_response]]]
Create an instance of a class. Creates a new registration resource.
Travis fails because this line is not covered. Could you please add `# pragma: no check` at the end of this line (note two prefix spaces)
@@ -154,4 +154,13 @@ abstract class WPSEO_Abstract_Post_Filter implements WPSEO_WordPress_Integration protected function get_post_types() { return array( 'post', 'page' ); } + + /** + * Checks if the current opened post type is supported. + * + * @return bool True when it is supported. + */ + protected function is_supported_post_type() { + return in_array( $this->get_current_post_type(), $this->get_post_types(), true ); + } }
[WPSEO_Abstract_Post_Filter->[add_filter_link->[get_query_val,get_label,get_post_total],render_hidden_input->[get_query_val],is_filter_active->[get_query_val],get_filter_url->[get_query_val]]]
Get post types.
This should receive a post type to check against as argument
@@ -144,6 +144,14 @@ var feedProps = ` }, "projects": { "type": "keyword" + }, + "parent_name": { + "type": "keyword", + "ignore_above": 256 + }, + "parent_type": { + "type": "keyword", + "ignore_above": 256 } } `
[Sprintf]
The fields of the verb object that are used in the verb - feed - index. DocType - > DocType.
Chef Actions have a parent type and parent name to allow a child-parent relationship.
@@ -641,6 +641,7 @@ public class DefaultMuleEvent implements MuleEvent, DeserializationPostInitialis public static MuleEvent copy(MuleEvent event) { DefaultMuleEvent eventCopy = new DefaultMuleEvent(event.getMessage(), event, new DefaultMuleSession(event.getSession())); eventCopy.flowVariables = ((DefaultMuleEvent) event).flowVariables.clone(); + eventCopy.error = event.getError(); return eventCopy; }
[DefaultMuleEvent->[transformMessageToString->[transformMessage],copy->[getSession,getMessage,DefaultMuleEvent],initAfterDeserialisation->[initAfterDeserialisation],setSecurityContext->[setSecurityContext],equals->[equals],getCorrelationId->[getCorrelationId,getParent],getSecurityContext->[getSecurityContext],getMessageAsString->[getMessageAsString],setFlowVariable->[setFlowVariable],getFlowVariableOrNull->[getFlowVariable],hashCode->[hashCode],transformMessage->[transformMessage],toString->[toString,getId],setEndpointFields->[isFlowConstructNonBlockingProcessingStrategy,resolveEventSynchronicity],getMuleContext->[getMuleContext]]]
Creates a copy of the given event.
You should copy in all copy constructors not here.
@@ -177,11 +177,17 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. + .. note:: + This norm API is different from `np.linalg.norm`. + This api supports high-order input tensors (rank >= 3), and certain axes need to be pointed out to calculate the norm. + But `np.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. + For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. + Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, - `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. + `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.
[cross->[cross],bmm->[bmm],matmul->[__check_input],norm->[frobenius_norm->[frobenius_norm],frobenius_norm,vector_norm,p_matrix_norm,inf_norm],cholesky->[cholesky],histogram->[histogram],mv->[__check_input,mv]]
Normalizes the Frobenius or vector norm of a given tensor. Computes the frobenius and pnorm vectors along the last two dimensions. frobenius norm op is to calculate the frobenius norm of certain two This function creates a network with p_norm and frobenius_norm op.
and certain axes -> and certain axis
@@ -131,6 +131,12 @@ public class LivyHelper { } } + protected void initializeSpark(final InterpreterContext context, + final Map<String, Integer> userSessionMap) throws Exception { + interpret("val sqlContext = new org.apache.spark.sql.SQLContext(sc)\n" + + "import sqlContext.implicits._", context, userSessionMap); + } + public InterpreterResult interpretInput(String stringLines, final InterpreterContext context, final Map<String, Integer> userSessionMap,
[LivyHelper->[executeHTTP->[getRestTemplate],closeSession->[executeHTTP]]]
Creates a session. Interprets the input string and returns the session id. This method is called to parse the line l and check if the next line is a missing Interprets the input of the Spark Interpreter.
isn't Livy suppose to do this internally?
@@ -343,6 +343,11 @@ class File(graphene.ObjectType): required=False, description="Content type of the file." ) + def resolve_url(root, info): + return info.context.build_absolute_uri( + os.path.join(settings.MEDIA_URL, root.url) + ) + class PriceRangeInput(graphene.InputObjectType): gte = graphene.Float(description="Price greater than or equal to.", required=False)
[WishlistError->[WishlistErrorCode],IntRangeInput->[Int],LanguageDisplay->[LanguageCodeEnum,String],ExportError->[ExportErrorCode],ProductChannelListingError->[List,NonNull],StockError->[StockErrorCode],CollectionChannelListingError->[List,NonNull],Permission->[PermissionEnum,String],SeoInput->[String],Weight->[WeightUnitsEnum,Float],TaxType->[String],ShippingError->[ShippingErrorCode,NonNull,List],ProductWithoutVariantError->[List,NonNull],Error->[String],ShopError->[ShopErrorCode],AppError->[AppErrorCode,List,NonNull],ProductError->[List,ProductErrorCode,NonNull],BulkProductError->[List,NonNull,Int],File->[String],CollectionError->[CollectionErrorCode],GiftCardError->[GiftCardErrorCode],PluginError->[PluginErrorCode],StaffError->[List,NonNull],MenuError->[MenuErrorCode],UploadError->[UploadErrorCode],PermissionGroupError->[List,PermissionGroupErrorCode,NonNull],CheckoutError->[List,NonNull,CheckoutErrorCode],WarehouseError->[WarehouseErrorCode],OrderError->[ID,OrderErrorCode,NonNull,List],ChannelError->[ChannelErrorCode],WebhookError->[WebhookErrorCode],BulkStockError->[Int],PageError->[PageErrorCode,NonNull,List],AccountError->[AccountErrorCode],Job->[resolve_type->[get,type],DateTime,String,JobStatusEnum],DiscountError->[NonNull,List,DiscountErrorCode],DateRangeInput->[Date],AttributeError->[AttributeErrorCode],CountryDisplay->[String,Field],DateTimeRangeInput->[DateTime],Image->[get_adjusted->[build_absolute_uri,get_thumbnail,Image],String],MetadataError->[MetadataErrorCode],PaymentError->[PaymentErrorCode],TranslationError->[TranslationErrorCode],OrderSettingsError->[OrderSettingsErrorCode],InvoiceError->[InvoiceErrorCode],PriceRangeInput->[Float]]
Represents weight value in a specific weight unit. This is a helper function to create a new instance of the type specified in the tax gateway.
We should use `from urllib.parse import urljoin` instead of `os.path.join`.
@@ -52,14 +52,15 @@ def test_simulate_evoked(): # Generate times series for 2 dipoles stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times) - stc._data *= 1e-9 # Generate noisy evoked data iir_filter = [1, -0.9] - evoked = simulate_evoked(fwd, stc, evoked_template.info, cov, snr, - tmin=0.0, tmax=0.2, iir_filter=iir_filter) + evoked = simulate_evoked(fwd, stc, evoked_template.info, cov, + tmin=0.0, tmax=0.2, iir_filter=iir_filter, + nave=nave) assert_array_almost_equal(evoked.times, stc.times) assert_true(len(evoked.data) == len(fwd['sol']['data'])) + assert_true(evoked.nave == nave) # make a vertex that doesn't exist in fwd, should throw error stc_bad = stc.copy()
[test_simulate_evoked->[,max,read_cov,assert_raises,linspace,assert_almost_equal,assert_array_almost_equal,log10,read_forward_solution,simulate_evoked,copy,read_raw_fif,simulate_sparse_stc,assert_array_equal,mean,len,pick_types_forward,read_evokeds,assert_true,pick_types],run_tests_if_main,dirname,data_path,join,simplefilter]
Test simulation of evoked data. Simulate a series of evoked and a covariance matrix.
better to use `assert_equal` when possible because the error message is nicer
@@ -65,11 +65,14 @@ uint64_t crt_hlc_get(void) return ret; } -uint64_t crt_hlc_get_msg(uint64_t msg) +int crt_hlc_get_msg(uint64_t msg, uint64_t *hlc_out) { uint64_t pt = crt_hlc_localtime_get(); uint64_t hlc, ret, ml = msg & ~CRT_HLC_MASK; + if (ml > pt && ml - pt > crt_hlc_epsilon) + return -DER_HLC_SYNC; + do { hlc = crt_hlc; if ((hlc & ~CRT_HLC_MASK) < ml)
[No CFG could be retrieved]
Get the most significant 48 bits of time from the System Time. - - - - - - - - - - - - - - - - - -.
How is this error supposed to be handled? Does it indicate a 'fatal' condition or is it something we expect to happen intermittently with rpc being retried upon this error? Also, why is swim being isolated from those errors, and not other internal rpcs, for example uri lookup?
@@ -263,6 +263,12 @@ export class Viewer { || this.isWebviewEmbedded_ || !ampdoc.isSingleDoc()); + /** + * Whether the AMP document is embedded in a Chrome Custom Tab. + * @private @const {boolean} + */ + this.isCustomTab_ = this.win.location.search.indexOf('amp_agsa=1') != -1; + /** @private {boolean} */ this.hasBeenVisible_ = this.isVisible();
[Viewer->[replaceUrl->[origin,href,dev,removeFragment,parseUrl,getSourceOrigin],sendMessageInternal_->[findIndex,eventType,resolve,awaitResponse,getChannelError,responsePromise,data,reject],constructor->[documentStateFor,reportError,resolve,timerFor,parseInt,dev,isSingleDoc,getChannelError,isIframed,parseParams_,win,removeFragment,VISIBLE,assign,length,map],receiveMessage->[dev,responder,resolve,fire],setMessageDeliverer->[eventType,dev,data,responseResolver,forEach,awaitResponse],isTrustedViewerOrigin_->[some,protocol,test,parseUrl,hostname],hasCapability->[split],isTrustedReferrer_->[some,protocol,test,parseUrl,hostname],onMessage->[add],whenNextVisible->[resolve],onVisibilityChange_->[now],navigateToAmpUrl->[dict],toggleRuntime->[dev],isVisible->[VISIBLE],setVisibilityState_->[HIDDEN,INACTIVE,dev,PRERENDER,PAUSED,VISIBLE]],duplicateErrorIfNecessary,message,registerServiceBuilderForDoc,parseQueryString,setVisibilityState_]
A sequence of functions that can be called to initialize a sequence of functions. Private methods for handling the negative negative value of a negative value. Checks if a node is embedded in a viewer JS or a shadow doc.
This can also match with any prefix and suffix e.g. `?garbage_amp_agsa=12345`. Probably want to use `parseQueryString()` here.
@@ -137,7 +137,13 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) { am.state = cfg.Peer } else if cfg.ShardingEnabled { level.Debug(am.logger).Log("msg", "starting tenant alertmanager with ring-based replication") - am.state = newReplicatedStates(cfg.UserID, cfg.ReplicationFactor, cfg.ReplicateStateFunc, cfg.GetPositionFunc, am.stop, am.logger, am.registry) + state := newReplicatedStates(cfg.UserID, cfg.ReplicationFactor, cfg.ReplicateStateFunc, cfg.GetPositionFunc, am.logger, am.registry) + + if err := state.Service.StartAsync(context.Background()); err != nil { + return nil, fmt.Errorf("failed to start state replication service: %v", err) + } + + am.state = state } else { level.Debug(am.logger).Log("msg", "starting tenant alertmanager without replication") am.state = &NilPeer{}
[mergePartialExternalState->[MergePartialState],StopAndWait->[Wait,Stop],Stop->[Close,Stop],ApplyConfig->[Set,Join,Stop,NewInhibitor,NewDispatcher,New,NewSilencer,NewRoute,Run,With,FromGlobs,Update],Handle,NewDispatcherMetrics,Duration,NewIntegration,With,WithRetention,Add,Done,NewAlerts,WithPrefix,Sum,New,WithLogger,Errorf,WithMaintenance,Len,NewMarker,NewPipelineBuilder,HasSuffix,Debug,Join,Uint64,NotFoundHandler,Log,Groups,SetBroadcast,Register,AddState,Sprintf,WithSnapshot,Background,NewGauge,WithMetrics,Maintenance]
New creates a new alertmanager instance. nflog is a helper function to create a notification log for a user.
This should never return an error given the start function for the service is `nil`, but I think it should be checked regardless, as one could easily decide to implement a start function in the future.
@@ -38,6 +38,11 @@ var interfaceConfig = { */ AUTHENTICATION_ENABLE: true, + /** + * If you are alone in the meeting, show an invite prompt in the header + */ + SHOW_INVITE_MORE_HEADER: true, + /** * The name of the toolbar buttons to display in the toolbar. If present, * the button will display. Exceptions are "livestreaming" and "recording"
[No CFG could be retrieved]
Displays a toolbar of the user s network blocks. Displays the settings for a specific .
Can you please reverse this and call it `HIDE_INVITE_MORE_HEADER` defaulting to `false`. Otherwise those updating but not changing interface_config.js will get different behavior.
@@ -156,6 +156,8 @@ internal class Xcode dllMap.AppendLine($" mono_dllmap_insert (NULL, \"{aFileName}\", NULL, \"__Internal\", NULL);"); } + dllMap.AppendLine($" mono_dllmap_insert (NULL, \"System.Globalization.Native\", NULL, \"__Internal\", NULL);"); + File.WriteAllText(Path.Combine(binDir, "runtime.m"), Utils.GetEmbeddedResource("runtime.m") .Replace("//%DllMap%", dllMap.ToString())
[Xcode->[BuildAppBundle->[Append,iOS,LogInfo,Sum,Combine,GetFileNameWithoutExtension,tvOS,RunProcess,GetDirectoryName,ToString,Length],GenerateXCode->[StartsWith,Add,Copy,NewLine,Append,GetFiles,Combine,Any,MacCatalyst,IsNullOrEmpty,Remove,GetFileNameWithoutExtension,RunProcess,GetFileName,ToString,ToArray,AppendLine,iOS,WriteAllText,Replace,GetEmbeddedResource],iOS,tvOS,RunProcess]]
Generate XCode for a given package. Adds a new library to the library list and adds the necessary libraries to the library list. Adds CMake files and CMake lists to the given binDir.
I did not realize we're doing this in `AppleAppBuilder`. That's a little unfortunate, and before net6 ships I'd like to completely get rid of (or at least simplify) the dllmap code.
@@ -614,14 +614,14 @@ class Jetpack_Likes_Settings { break; } - switch( $reblogs_new_state ) { - case 'off' : + switch ( (bool) $reblogs_new_state ) { + case false: if ( true == $reblogs_db_state && ! $this->in_jetpack ) { $g_gif = file_get_contents( 'https://pixel.wp.com/g.gif?v=wpcom-no-pv&x_reblogs=disabled_reblogs' ); } update_option( 'disabled_reblogs', 1 ); break; - case 'on' : + case true: default: if ( false == $reblogs_db_state && ! $this->in_jetpack ) { $g_gif = file_get_contents( 'https://pixel.wp.com/g.gif?v=wpcom-no-pv&x_reblogs=reenabled_reblogs' );
[Jetpack_Likes_Settings->[is_index_enabled->[get_options],is_likes_visible->[is_post_likeable,is_enabled_sitewide],admin_settings_callback->[reblogs_enabled_sitewide,is_enabled_sitewide],is_attachment_enabled->[get_options],meta_box_save->[is_enabled_sitewide],admin_settings_init->[is_enabled_sitewide],admin_settings_showbuttonon_init->[get_options],is_single_page_enabled->[get_options],is_post_likeable->[is_enabled_sitewide]]]
This is the callback for the admin settings page. on - disable - reblogs on - enable - comment - likes on - disable.
Same thing here, let's avoid typecasting the value directly.
@@ -1278,6 +1278,10 @@ class Contact } } + if (!empty($follow_link) || !empty($unfollow_link)) { + $contact_drop_link = ''; + } + /** * Menu array: * "name" => [ "Label", "link", (bool)Should the link opened in a new tab? ]
[Contact->[photoMenu->[t],addRelationship->[t],getPostsFromUrl->[isMobile,getItemsPerPage,renderMinimal,get,getQueryString,getStart],createFromProbe->[getUrlPath,getHostname,redirect,get,t],markForArchival->[get],getAccountType->[t]]]
Returns a menu of photos for a contact This function is used to create a link to a contact in a new tab. This function is used to display a menu of the contact s photos. This function is used to create a menuitem object that can be used to create a menu.
What is the case where you can't neither follow nor unfollow a contact?
@@ -181,7 +181,8 @@ func (s *IdentifyState) computeKeyDiffs(dhook func(keybase1.IdentifyKey) error) observedEldest := s.u.GetEldestKID() if s.track != nil { trackedEldest := s.track.GetEldestKID() - if observedEldest.NotEqual(trackedEldest) { + if observedEldest.NotEqual(trackedEldest) || + s.u.GetCurrentEldestSeqno() > s.track.GetTrackedLinkSeqno() { diff := TrackDiffNewEldest{tracked: trackedEldest, observed: observedEldest} s.res.KeyDiffs = append(s.res.KeyDiffs, diff) display(observedEldest, diff)
[computeKeyDiffs->[getLastDelegationSig],Precompute->[computeTrackDiffs,initResultList,computeRevokedProofs]]
computeKeyDiffs computes the differences between the current key and the eldest key. Keys - returns all keys in the system.
@patrickxb quick look at a change in how resets in tracked users are detected?
@@ -1002,6 +1002,7 @@ public abstract class IncrementalIndex<AggregatorType> extends AbstractIndex imp * {@link RollupFactsHolder} needs concurrent collections, that are not present in fastutil. */ private int rowIndex; + private long dimsKeySize; TimeAndDims( long timestamp,
[IncrementalIndex->[loadDimensionIterable->[isEmpty],addNewDimension->[size,add],getColumnNames->[getDimensionNames,getMetricNames],toTimeAndDims->[formatRow,add],MetricDesc->[getName],getMaxTime->[getMaxTimeMillis,isEmpty],getMinTimeMillis->[getMinTimeMillis],PlainFactsHolder->[putIfAbsent->[getTimestamp,setRowIndex,putIfAbsent,add],concat->[iterator,concat],keySet->[concat],clear->[clear],iterator->[iterator]],RollupFactsHolder->[getMaxTimeMillis->[getTimestamp],putIfAbsent->[setRowIndex,putIfAbsent],keySet->[keySet],getMinTimeMillis->[getTimestamp],clear->[clear],timeRangeIterable->[keySet,TimeAndDims],iterator->[iterator]],getDimensionIndex->[getDimension],TimeAndDimsComp->[compare->[compare,getIndexer]],FloatMetricColumnSelector->[isNull->[isNull],getFloat->[isNull,getMetricFloatValue]],makeColumnSelectorFactory->[IncrementalIndexInputRowColumnSelectorFactory->[makeDimensionSelector->[makeDimensionSelector],makeColumnValueSelector->[makeColumnValueSelector],getColumnCapabilities->[getColumnCapabilities]],IncrementalIndexInputRowColumnSelectorFactory,makeColumnSelectorFactory],getMaxTimeMillis->[getMaxTimeMillis],ObjectMetricColumnSelector->[getObject->[getMetricObjectValue]],DoubleMetricColumnSelector->[isNull->[isNull],getDouble->[isNull,getMetricDoubleValue]],iterableWithPostAggregations->[iterator->[getAggVal,iterator,getAggsForRow,getDimensions]],TimeAndDims->[hashCode->[getIndexer],equals->[getIndexer]],getInterval->[getMaxTimeMillis,isEmpty],LongMetricColumnSelector->[getLong->[isNull,getMetricLongValue],isNull->[isNull]],add->[addToFacts,add],getMinTime->[isEmpty,getMinTimeMillis],iterator->[iterator]]]
Creates a new object that represents a single column of a column - specific type. This class is used to store the data in the object.
`dimsKeySize` could be final. And anything that could be final, should be final.
@@ -475,11 +475,15 @@ func (i *Ingester) MetricsForLabelMatchers(ctx old_ctx.Context, req *client.Metr // UserStats returns ingestion statistics for the current user. func (i *Ingester) UserStats(ctx old_ctx.Context, req *client.UserStatsRequest) (*client.UserStatsResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, fmt.Errorf("no user id") + } i.userStatesMtx.RLock() defer i.userStatesMtx.RUnlock() - state, err := i.userStates.getOrCreate(ctx) - if err != nil { - return nil, err + state, ok := i.userStates.get(userID) + if !ok { + return &client.UserStatsResponse{}, nil } return &client.UserStatsResponse{
[Shutdown->[Shutdown],RegisterFlags->[RegisterFlags]]
UserStats returns statistics about the user.
Do you think it might make sense to make `get` take a context (like `getOrCreate`)?
@@ -730,3 +730,17 @@ func checkSeedAssociations(ctx context.Context, c client.Client, seedName string return nil } + +func mustEnableVPA(ctx context.Context, c client.Client, shoot *gardencorev1beta1.Shoot) (bool, error) { + if err := c.Get(ctx, kutil.Key(shoot.Status.TechnicalID, "vpa-admission-controller"), &appsv1.Deployment{}); err != nil { + if apierrors.IsNotFound(err) { + // VPA deployment in shoot namespace was not found, so we have to enable the VPA for this seed until it's + // being deployed. + return true, nil + } + return false, err + } + + // VPA deployment in shoot namespace was found, so we don't need to enable the VPA for this seed. + return false, nil +}
[Reconcile->[DeepCopy,ForGarden,Infof,Sprintf,ForShoot,Client,NewShootLogger,Errorf,GetClient,Event,TODO],reconcileShootedSeedRegistrationKey->[Reconcile,Shoots,Errorf,IsNotFound,Get,ReadShootedSeed,Debugf],seedRegistrationUpdate->[seedRegistrationAdd,DeepEqual],seedRegistrationAdd->[SplitMetaNamespaceKey,Add,MetaNamespaceKeyFunc],IsAlreadyExists,Before,DeepCopy,BuildBootstrapperName,Getenv,Apply,Values,Now,Delete,SetName,ParseQuantity,RESTConfig,Seeds,MarshalKubeconfigWithToken,BootstrapTokenFrom,AddToScheme,ReadFile,FindImage,WithKind,New,UTC,SetNamespace,Errorf,Key,Create,Join,ChartApplier,SetKind,IsNoMatchError,NewControllerRef,CreateOrUpdate,ComputeSHA256Hex,Get,BootstrapTokenSecretName,NewScheme,SetAPIVersion,MergeStringMaps,Sprintf,CoreV1beta1,ConvertToVersion,Client,GardenCore,String,Parse,IsNotFound,ComputeBootstrapToken,ShootWantsVerticalPodAutoscaler,IgnoreNotFound]
null - > null.
Shouldn't we check for a `deletionTimestamp` in case a deployment was found? Maybe the deployment is not gone, yet.
@@ -93,6 +93,9 @@ public class DictionaryBlock this.retainedSizeInBytes = INSTANCE_SIZE + dictionary.getRetainedSizeInBytes() + sizeOf(ids); if (dictionaryIsCompacted) { + if (dictionary instanceof DictionaryBlock) { + throw new IllegalArgumentException("compacted dictionary should not have dictionary base block"); + } this.sizeInBytes = this.retainedSizeInBytes; this.uniqueIds = dictionary.getPositionCount(); }
[DictionaryBlock->[getLong->[getLong],getLoadedBlock->[getLoadedBlock,getPositionCount,DictionaryBlock],getShort->[getShort],equals->[equals],getInt->[getInt],copyRegion->[DictionaryBlock],getByte->[getByte],getSlice->[getSlice],getRegionSizeInBytes->[getSizeInBytes,getPositionCount],getSingleValueBlock->[getSingleValueBlock],writePositionTo->[writePositionTo],writeBytesTo->[writeBytesTo],calculateCompactSize->[getPositionCount],compareTo->[compareTo],getObject->[getObject],getPositions->[DictionaryBlock,getPositionCount],compact->[DictionaryBlock,getPositionCount,getId,isCompact,copyPositions],bytesEqual->[bytesEqual],isCompact->[calculateCompactSize,getPositionCount],bytesCompare->[bytesCompare],getRegion->[DictionaryBlock],isNull->[isNull],getSliceLength->[getSliceLength],getEstimatedDataSizeForStats->[getEstimatedDataSizeForStats],isLoaded->[isLoaded],hash->[hash],getPositionsSizeInBytes->[getPositionsSizeInBytes,getPositionCount],retainedBytesForEachPart->[getRetainedSizeInBytes],getLogicalSizeInBytes->[getPositionCount],toString->[toString,getPositionCount],copyPositions->[copyPositions,DictionaryBlock]]]
Fields of the DictionaryBlock are not copied. Replies the long or slice of the object at the given position.
`checkArgument(!(dictionary instanceof DictionaryBlock), ...`
@@ -540,9 +540,7 @@ class Toolbox(QObject, Extension): @pyqtSlot(str, result = bool) def isEnabled(self, package_id: str) -> bool: - if package_id in self._plugin_registry.getActivePlugins(): - return True - return False + return package_id in self._plugin_registry.getActivePlugins() # Check for plugins that were installed with the old plugin browser def isOldPlugin(self, plugin_id: str) -> bool:
[Toolbox->[resetMaterialsQualitiesAndUninstall->[_resetUninstallVariables,closeConfirmResetDialog],launch->[_restart],_updateInstalledModels->[_convertPluginMetadata],install->[_updateInstalledModels],uninstall->[_updateInstalledModels],_fetchPackageUpdates->[_prepareNetworkManager],_onRequestFinished->[resetDownload,isLoadingComplete],checkPackageUsageAndUninstall->[_createDialog],_onNetworkAccessibleChanged->[resetDownload],_fetchPackageData->[_prepareNetworkManager],_onDownloadComplete->[install,openLicenseDialog]]]
Check if a plugin is enabled.
This is just refactor, doesn't relate to the ticket.
@@ -47,6 +47,7 @@ from tensorflow.python.ops.control_flow_ops import no_op from tensorflow.python.ops.control_flow_ops import tuple from tensorflow.python.ops.control_flow_ops import cond from tensorflow.python.ops.control_flow_ops import case +from tensorflow.python.ops.control_flow_ops import smart_cond from tensorflow.python.ops.control_flow_ops import while_loop from tensorflow.python.ops.data_flow_ops import * from tensorflow.python.ops.functional_ops import *
[remove_undocumented]
Imports a single node from the network. Imports all the variables from the system.
can you add this to tf.contrib.framework instead of to the core api?
@@ -0,0 +1,13 @@ +class AuthAppLoginOptionPolicy + def initialize(user) + @user = user + end + + def configured? + !user.otp_secret_key.nil? + end + + private + + attr_reader :user +end
[No CFG could be retrieved]
No Summary Found.
@monfresh These changes are all out of scope for this PR. I'm handling this refactoring in parallel work, as we already discussed.
@@ -36,6 +36,7 @@ import org.rstudio.core.client.widget.OperationWithInput; import org.rstudio.core.client.widget.ProgressOperationWithInput; import org.rstudio.core.client.widget.Toolbar; import org.rstudio.core.client.widget.ToolbarPopupMenu; +import org.rstudio.studio.client.application.events.EventBus; import org.rstudio.studio.client.common.FileDialogs; import org.rstudio.studio.client.common.GlobalDisplay; import org.rstudio.studio.client.common.filetypes.FileTypeRegistry;
[FilesPane->[getSelectedFiles->[getSelectedFiles],selectNone->[selectNone],onBeforeSelected->[onFileNavigation],createMainWidget->[DisplayObserverProxy],renameFile->[renameFile],setColumnSortOrder->[setColumnSortOrder],listDirectory->[onError->[onFileNavigation]],resetColumnWidths->[resetColumnWidths],selectAll->[selectAll],DisplayObserverProxy->[onFileNavigation->[onFileNavigation],onSelectAllValueChanged->[onSelectAllValueChanged],onColumnSortOrderChanaged->[onColumnSortOrderChanaged],onFileSelectionChanged->[onFileSelectionChanged]]]]
Imports a single version of the object that represents a command. Imports all the Filenames and Files in the Workbench panel.
nit: update copyright year in header
@@ -2,16 +2,14 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -#nullable disable - namespace System.Windows.Forms { internal class MouseHoverTimer : IDisposable { - private Timer _mouseHoverTimer = new Timer(); + private Timer? _mouseHoverTimer = new Timer(); // Consider - weak reference? - private ToolStripItem _currentItem; + private ToolStripItem? _currentItem; public MouseHoverTimer() {
[MouseHoverTimer->[Dispose->[Dispose,Cancel],Cancel->[Cancel]]]
Creates an object that can be used to cancel a weak reference.
@gpetrou , the way this is initialized, we do not expect `_mouseHoverTimer` to be null in any practical scenarios here. I understand that `null` assignment statement in the `dispose ` method is forcing this. We think the right thing to do here is remove the `null `assignment statement in the dispose method and keep this non-nullable.
@@ -49,6 +49,7 @@ from .map_plots import * from .mappers import * from .markers import * from .math_text import * +from .plain_text import * from .plots import * from .ranges import * from .renderers import *
[getLogger]
Component which imports all of the non - standard non - standard non - standard non - standard.
would it make sense to combine these into a single `text.py` module? Since there are not associated implementation, we tend to condense the number of modules on the Python side (relative to BokehJS)
@@ -52,7 +52,8 @@ <% else %> <strong> <%= t('grade_entry_forms.students.detailed_marks_message') %> </strong> <% end %> - <% elsif grade_entry_form.date < Time.zone.now.to_date %> + <% elsif grade_entry_form.date.nil? or + grade_entry_form.date < Time.zone.now.to_date %> <%= t('grade_entry_forms.students.no_results') %> <% end %> </td>
[No CFG could be retrieved]
displays the hidden element of the .
In general, use `||` instead of `or` (the precedence rules are more intuitive).
@@ -38,7 +38,10 @@ public class XmlValidatingMessageSelectorTests { new XmlValidatingMessageSelector(resource, XmlValidatorFactory.SCHEMA_W3C_XML); } - @Test(expected=IllegalArgumentException.class) + /* + * Move XmlValidatorFactory.createValidator to afterPropertiesSet, so it will no longer throw IllegalArgumentException on constructor + */ +// @Test(expected=IllegalArgumentException.class) public void validateFailureInvalidSchemaLanguage() throws Exception{ Resource resource = new ByteArrayResource("<xsd:schema xmlns:xsd='http://www.w3.org/2001/XMLSchema'/>".getBytes()); new XmlValidatingMessageSelector(resource, "foo");
[XmlValidatingMessageSelectorTests->[validateCreationWithSchemaAndProvidedSchemaType->[ByteArrayResource,XmlValidatingMessageSelector,getBytes],validateFailureWhenNoSchemaResourceProvided->[XmlValidatingMessageSelector],validateFailureInvalidSchemaLanguage->[ByteArrayResource,XmlValidatingMessageSelector,getBytes],validateCreationWithSchemaAndDefaultSchemaType->[ByteArrayResource,XmlValidatingMessageSelector,getBytes]]]
This test tests if a resource is created with schema and provided schema type.
When you will revert the logic in the `XmlValidatingMessageSelector` this test will work again
@@ -776,7 +776,11 @@ namespace Microsoft.Xna.Framework } - + /// <summary> + /// Inverts values in the specified <see cref="Quaternion"/>. + /// </summary> + /// <param name="quaternion">Source <see cref="Quaternion"/> on the right of the sub sign.</param> + /// <returns>Result of the inversion.</returns> public static Quaternion operator -(Quaternion quaternion) { Quaternion quaternion2;
[Quaternion->[ToMatrix->[ToMatrix],ToString->[ToString],Equals->[Equals],GetHashCode->[GetHashCode]]]
- - - - - - - - - - - - - - - - - - DebugDisplayString - DebugDisplayString - string representation of a negative quaternion.
This is negation... not inversion... those are two very different things. Use the docs for `Negate` here.
@@ -13,6 +13,10 @@ class Cmake(Package): url = 'https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5.tar.gz' maintainers = ['chuckatkins'] + executables = ['cmake'] + + version('3.17.0', sha256='b74c05b55115eacc4fa2b77a814981dbda05cdc95a53e279fe16b7b272f00847') + version('3.16.5', sha256='5f760b50b8ecc9c0c37135fae5fbf00a2fef617059aa9d61c1bb91653e5a8bfc') version('3.16.2', sha256='8c09786ec60ca2be354c29829072c38113de9184f29928eb9da8446a5f2ce6a9') version('3.16.1', sha256='a275b3168fa8626eca4465da7bb159ff07c8c6cb0fb7179be59e12cbdfa725fd') version('3.16.0', sha256='6da56556c63cab6e9a3e1656e8763ed4a841ac9859fefb63cbe79472e67e8c5f')
[Cmake->[install->[make],build->[make],bootstrap->[bootstrap,Executable,bootstrap_args],test->[make],flag_handler->[append,any,ValueError],bootstrap_args->[append,format,str,satisfies],depends_on,conflicts,version,patch,on_package_attributes,variant,run_after]]
A cross - platform CMake - based application that uses CMake to create a single object Return a list of version numbers for all sequence numbers. Version of the Ethereum Ethereum Ethereum Ethereum Ethereum.
I'll try tomorrow to play a bit with this package and see how easy it is to detect variants based on `ccmake` and other executables being found.
@@ -50,6 +50,10 @@ type SecurityContextConstraints struct { // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"` + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes" protobuf:"bytes,21,rep,name=allowedFlexVolumes"` // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"` // AllowHostPorts determines if the policy allows host ports in the containers.
[No CFG could be retrieved]
Capabilities in this field maybe added at the Pod author s discretion. Run as user.
Is it OK for protobuf annotations to be out of order?
@@ -27,13 +27,13 @@ public class Rules { public static boolean eligibleForLoad(Interval src, Interval target) { - return src.contains(target); + return src.overlaps(target); } public static boolean eligibleForLoad(Period period, Interval interval, DateTime referenceTimestamp) { final Interval currInterval = new Interval(period, referenceTimestamp); - return currInterval.overlaps(interval) && interval.getStartMillis() >= currInterval.getStartMillis(); + return eligibleForLoad(currInterval, interval); } private Rules() {}
[Rules->[eligibleForLoad->[Interval,getStartMillis,overlaps,contains]]]
Checks if the given interval is eligible for load.
nit: better to call `eligibleForLoad(currInterval, interval)` for easier code maintenance.
@@ -74,8 +74,6 @@ namespace const command_line::arg_descriptor<bool, false> arg_testnet = {"testnet", genms::tr("Create testnet multisig wallets"), false}; const command_line::arg_descriptor<bool, false> arg_stagenet = {"stagenet", genms::tr("Create stagenet multisig wallets"), false}; const command_line::arg_descriptor<bool, false> arg_create_address_file = {"create-address-file", genms::tr("Create an address file for new wallets"), false}; - - const command_line::arg_descriptor< std::vector<std::string> > arg_command = {"command", ""}; } static bool generate_multisig(uint32_t threshold, uint32_t total, const std::string &basename, network_type nettype, bool create_address_file)
[bool->[,password,str,what,extra_info,push_back,wallets],char->[i18n_translate],main->[,generate_multisig,TRY_ENTRY,get_arg,CATCH_ENTRY_L0,sscanf]]
Generate multisig wallets. private key pkn sk name.
Not sure why you'd want to remove the command argument here, seems like a mistake.
@@ -133,6 +133,10 @@ def monkey_patch_varbase(): Args: backward_strategy( :ref:`api_fluid_dygraph_BackwardStrategy` ): The Backward Strategy to run backward + retain_graph(bool, optional): If False, the graph used to compute grads will be freed. After calling + this method(`backward`), if you still want to add more ops to the graph previously built, you have to + set the parameter `retain_graph` True, then the grads will be retained. Thus, seting it False is much + more memory-efficient. Defaults to False. Returns: NoneType: None
[monkey_patch_varbase->[gradient->[value,_grad_ivar,CPUPlace,array],__nonzero__->[bool,all,_is_initialized,value,__array__,prod],backward->[BackwardStrategy,_dygraph_tracer,_run_backward,ValueError,in_dygraph_mode],block->[default_main_program],_to_static_var->[getattr,ismethod,update,copy,startswith,isinstance,Variable,dir,Parameter],__bool__->[__nonzero__],__str__->[str,_is_initialized,value],set_value->[value,_current_expected_place,numpy,format,isinstance],monkey_patch_math_varbase,setattr]]
Monkey patches VarBase into a static Variable and a parameter. Set a new value for a node in the current graph. Returns the gradient of current Variable A property to provide a description of a object.
English issue: "you want", "I want" is not good manners in English. You can change the sentence as: "If you would like to add more ops to the built graph after calling this method(`backward`), set the ..."
@@ -296,4 +296,15 @@ zpool_feature_init(void) ZFEATURE_FLAG_READONLY_COMPAT | ZFEATURE_FLAG_PER_DATASET, userobj_accounting_deps); } + + { + static const spa_feature_t encryption_deps[] = { + SPA_FEATURE_EXTENSIBLE_DATASET, + SPA_FEATURE_NONE + }; + zfeature_register(SPA_FEATURE_ENCRYPTION, + "com.datto:encryption", "encryption", + "Support for dataset level encryption", + 0, encryption_deps); + } }
[No CFG could be retrieved]
ZFEATURE_FLAG_READONLY_COMPAT | ZFEATURE_FLAG_PER_DATA.
The encryption feature is is deactivated when it's no longer used, but it isn't `PER_DATASET`? I'll look for where this is implemented... it may be simpler to use the PER_DATASET flag here.
@@ -867,7 +867,15 @@ class BigQueryWrapper(object): @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) - def create_temporary_dataset(self, project_id): + def get_table_location(self, project_id, dataset_id, table_id): + table = self._get_table(project_id, dataset_id, table_id) + return table.location + + @retry.with_exponential_backoff( + num_retries=MAX_RETRIES, + retry_filter=retry.retry_on_server_errors_and_timeout_filter) + def create_temporary_dataset(self, project_id, location=None): + # TODO: make location required, once "query" locations can be determined dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix # Check if dataset exists to make sure that the temporary id is unique try:
[BigQuerySink->[display_data->[format],schema_as_json->[schema_list_as_object->[schema_list_as_object],schema_list_as_object],__init__->[RowAsDictJsonCoder,validate_write,validate_create,_parse_table_reference]],BigQueryWrapper->[get_or_create_table->[_is_table_empty,_create_table,_delete_table,_get_table],insert_rows->[_insert_all_rows],_get_temp_table->[_parse_table_reference],create_temporary_dataset->[get_or_create_dataset],convert_row_to_dict->[_convert_cell_value_to_dict],clean_up_temporary_dataset->[_get_temp_table,_delete_dataset],run_query->[_get_query_results,_start_query_job],_start_query_job->[_get_temp_table]],parse_table_schema_from_json->[_parse_schema_field->[_parse_schema_field],_parse_schema_field],BigQuerySource->[__init__->[RowAsDictJsonCoder,_parse_table_reference]],BigQueryWriter->[__exit__->[_flush_rows_buffer],Write->[_flush_rows_buffer]]]
Creates a temporary dataset.
do we need this function anymore?
@@ -40,11 +40,8 @@ feature 'Password Recovery' do user = create(:user, :unconfirmed) confirm_last_user reset_email - visit new_user_password_path - fill_in 'Email', with: user.email - click_button t('forms.buttons.continue') - open_last_email - click_email_link_matching(/reset_password_token/) + + trigger_reset_password_and_click_email_link(user.email) expect(current_path).to eq edit_user_password_path
[recovery_code_from_pii->[password,create,save!,new_from_hash,unlock_user_access_key,encrypt_pii],reset_password_and_sign_back_in->[fill_in,email,click_button,t,fill_in_credentials_and_submit],visit,email,create,text,let,feature,reset_password_sent_at,join,it,fill_in_credentials_and_submit,to,have_content,before,click_button,reset_password_within,click_link,scenario,t,require,click_email_link_matching,include,generate,signin,update,now,enter_correct_otp_code_for_user,each,fill_in,context,reset_password_and_sign_back_in,edit_user_password_path,have_button,recovery_code_from_pii,hour,not_to,eq,direct_otp]
redirects to forgot_password path and sends an email to the user user who has only confirmed email resends confirmation then resets password.
YES! Also, noticing that we define this method at the bottom of the file. I am used to that convention but noticed we were defining a helper method at the top of this file. Thoughts on which we should do, generally? Happy with either as long as we're consistent