patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -34,7 +34,8 @@ class Git(Storage): Args: - flow_path (str): A file path pointing to a .py file containing a flow - - repo (str): the name of a git repository to store this Flow + - repo (str, optional): The name of a git repository to store this Flow. + One of `repo` or `git_clone_url_secret_name` is required. - repo_host (str, optional): The site hosting the repo. Defaults to 'github.com' - flow_name (str, optional): A specific name of a flow to extract from a file. If not set then the first flow object retrieved from file will be returned.
[Git->[git_clone_url->[Secret],git_token_secret->[str,Secret],add_flow->[ValueError,format],__init__->[bool,split,any,sum,super,ValueError,warning],get_flow->[ValueError,TemporaryGitRepo,join,extract_flow_from_file]]]
This class represents a mapping of a flow name to a list of file paths contained in the - 1 value for branch_name tag.
I would maybe say: If not provided, the repo must be set using a secret. See `git_clone_url_secret_name`.
@@ -109,6 +109,17 @@ public class JAXBCoder<T> extends AtomicCoder<T> { return getJAXBClass().getName(); } + private static class CloseIgnoringInputStream extends FilterInputStream { + protected CloseIgnoringInputStream(InputStream in) { + super(in); + } + + @Override + public void close() { + // Do nothing. JAXB closes the underyling stream so we must filter out those calls. + + } + } //////////////////////////////////////////////////////////////////////////////////// // JSON Serialization details below
[JAXBCoder->[of->[of],asCloudObject->[asCloudObject]]]
Returns the encoding id of the current class.
remove newline. Also, I was wrong about flush -- no need to flush an input stream (duh)
@@ -0,0 +1,17 @@ +<?php +/** + * WP-Admin Posts list bootstrap file. + * + * @package Jetpack + */ + +/** + * Load the Posts_List_Notification. + */ +global $pagenow; + +// phpcs:ignore +if ( ( $pagenow === 'edit.php' && isset( $_GET['post_type'] ) && $_GET['post_type'] === 'page' ) || $pagenow === 'post.php' ) { + require_once __DIR__ . '/class-posts-list-page-notification.php'; + Automattic\Jetpack\Dashboard_Customizations\Posts_List_Page_Notification::init(); +}
[No CFG could be retrieved]
No Summary Found.
That is great, thanks for giving it a second chance!
@@ -410,6 +410,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati public final static ConfigKey<Long> IOPS_MAX_WRITE_LENGTH = new ConfigKey<Long>(Long.class, "vm.disk.iops.maximum.write.length", "Advanced", "0", "Maximum IOPS write burst duration (seconds). If '0' (zero) then does not check for maximum burst length.", true, ConfigKey.Scope.Global, null); + public static final ConfigKey<Boolean> ADD_HOST_ON_SERVICE_RESTART = new ConfigKey<Boolean>(Boolean.class, "add.host.on.service.restart", "Advanced", "true", + "Indicates whether the host will be added back to cloudstack after restarting agent service on host. If false it wont be added back even after service restart", + true, ConfigKey.Scope.Global, null); + private static final String IOPS_READ_RATE = "IOPS Read"; private static final String IOPS_WRITE_RATE = "IOPS Write"; private static final String BYTES_READ_RATE = "Bytes Read";
[ConfigurationManagerImpl->[updateConfiguration->[start,updateConfiguration],commitVlan->[doInTransaction->[createVlanAndPublicIpRange]],deletePod->[checkIfPodIsDeletable],checkPodCidrSubnets->[getCidrAddress,getCidrSize],createPod->[createPod,checkPodAttributes],editPod->[editPod,podHasAllocatedPrivateIPs,checkPodAttributes],releasePublicIpRange->[releasePublicIpRange],createNetworkOffering->[createNetworkOffering],createZone->[createZone,checkZoneParameters],createServiceOffering->[createServiceOffering],createPodIpRange->[getVlanNumberFromUri],createDiskOffering->[createDiskOffering],deleteVlanIpRange->[deleteVlanAndPublicIpRange],deleteZone->[checkIfZoneIsDeletable],savePublicIPRange->[doInTransaction->[savePublicIPRange]],createPortableIpRange->[checkOverlapPublicIpRange],editZone->[checkZoneParameters],hasSameSubnet->[checkIfSubsetOrSuperset],createVlanAndPublicIpRange->[checkOverlapPrivateIpRange],releaseDomainSpecificVirtualRanges->[doInTransactionWithoutResult->[releasePublicIpRange]],releaseAccountSpecificVirtualRanges->[doInTransactionWithoutResult->[releasePublicIpRange]]]]
Indicates whether to use local storage pools or shared storage pools for system VMs. This method is called to configure the VB VM.
If this is KVM specific can we add kvm in the global setting name @ravening ?
@@ -171,5 +171,11 @@ public class LdapAuthenticationConfiguration { handler.setAuthenticationControls(new PasswordPolicyControl()); return handler; } -} + private static PooledCompareAuthenticationHandler getPooledCustomCompareAuthenticationHandler(final LdapAuthenticationProperties l) { + final PooledCompareCustomAttributeAuthenticationHandler handler = new PooledCompareCustomAttributeAuthenticationHandler( + Beans.newPooledConnectionFactory(l)); + handler.setPasswordAttributeName(l.getPrincipalAttributePassword()); + return handler; + } +}
[LdapAuthenticationConfiguration->[getDirectBindAuthenticator->[getPooledBindAuthenticationHandler,FormatDnResolver,getBaseDn,Authenticator],getAuthenticatedOrAnonSearchAuthenticator->[setSubtreeSearch,newPooledConnectionFactory,getUserFilter,getPooledBindAuthenticationHandler,setUserFilter,Authenticator,PooledSearchDnResolver,isSubtreeSearch,getBaseDn,setConnectionFactory,setAllowMultipleDns,isAllowMultipleDns,setBaseDn],getPooledBindAuthenticationHandler->[PasswordPolicyControl,newPooledConnectionFactory,PooledBindAuthenticationHandler,setAuthenticationControls],ldapAuthorizationGenerator->[isAllowMultipleResults,getRolePrefix,setAllowMultipleResults,getRoleAttribute,UnsupportedAuthenticationMechanismException,LdapAuthorizationGenerator,setRolePrefix,setRoleAttribute],initLdapAuthenticationHandlers->[setPrincipalAttributeMap,setAuthenticationResponseHandlers,PasswordExpirationAuthenticationResponseHandler,getType,toString,setAuthenticator,put,setAllowMultiplePrincipalAttributeValues,forEach,getPrincipalAttributeId,setPasswordPolicyConfiguration,setServicesManager,isEmpty,isAllowMultiplePrincipalAttributeValues,getAdditionalAttributes,setPrincipalIdAttribute,ActiveDirectoryAuthenticationResponseHandler,setAdditionalAttributes,getAuthenticator,PasswordPolicyAuthenticationResponseHandler,putAll,LdapAuthenticationHandler,getPasswordWarningNumberOfDays,convert,getAttributes,isUsePasswordPolicy],getAuthenticator->[getType,getDirectBindAuthenticator,getAuthenticatedOrAnonSearchAuthenticator,getActiveDirectoryAuthenticator],getActiveDirectoryAuthenticator->[setSubtreeSearch,getUserFilter,getPooledBindAuthenticationHandler,getDnFormat,setEntryResolver,Authenticator,setUserFilter,SearchEntryResolver,FormatDnResolver,getBaseDn,isSubtreeSearch,setBaseDn]]]
Creates a PooledBindAuthenticationHandler.
If the principal attribute password is not defined, could we fallback to the pooledBindAuthnHandler?
@@ -126,6 +126,11 @@ module Repository raise NotImplementedError, "Repository.get_latest_revision: Not yet implemented" end + # Returns all revisions + def get_all_revisions + raise NotImplementedError, "Repository.get_all_revision: Not yet implemented" + end + # Return a Repository::AbstractRevision for a given revision_number # if it exists def get_revision(revision_number)
[AbstractRevision->[directories_at_path->[raise],path_exists?->[raise],changed_files_at_path->[raise],files_at_path->[raise],attr_reader],Conflict->[attr_reader],Transaction->[remove->[push],has_jobs?->[size],add_conflict->[push],conflicts?->[size],replace->[push],add_path->[push],add->[push],attr_reader],AbstractRepository->[create->[raise],closed?->[raise],set_bulk_permissions->[raise],initialize->[raise],stringify_files->[raise],expand_path->[raise],get_permissions->[raise],repository_exists?->[raise],get_revision->[raise],get_users->[raise],get_revision_by_timestamp->[raise],add_user->[raise],get_transaction->[raise],close->[raise],open->[raise],delete->[raise],delete_bulk_permissions->[raise],get_latest_revision->[raise],remove_user->[raise],set_permissions->[raise],commit->[raise],access->[raise]],Repository->[nil?,new,to_sym,include?,raise,each],RevisionFile->[attr_accessor],RevisionDirectory->[attr_accessor],dirname,require,join]
Get the latest revision number for a given revision number.
Prefer single-quoted strings when you don't need string interpolation or special symbols.
@@ -195,6 +195,9 @@ class FreqtradeBot(object): # Refresh whitelist self.pairlists.refresh_pairlist() self.active_pair_whitelist = self.pairlists.whitelist + if not self.active_pair_whitelist: + logger.warning('Whitelist is empty.') + return False # Calculating Edge positioning if self.edge:
[FreqtradeBot->[execute_buy->[get_target_bid,_get_min_pair_stake_amount],cleanup->[cleanup],process_maybe_execute_buy->[create_trade],handle_timedout_limit_buy->[handle_buy_order_full_cancel],handle_trade->[get_sell_rate],create_trade->[_get_trade_stake_amount],notify_sell->[get_sell_rate]]]
Checks if one or more open trades has been created or closed. This method is called when an exception occurs while processing a .
This will prevent all open trades from being processed, since `_process` is aborted here. Removing the return statement wont work either, since that'll duplicate the log-message
@@ -63,7 +63,7 @@ LANGUAGES = [ ('de', _('German')), ('en', _('English')), ('es', _('Spanish')), - ('fa-ir', _('Persian (Iran)')), + ('fa', _('Persian (Iran)')), ('fr', _('French')), ('hu', _('Hungarian')), ('it', _('Italian')),
[get_list->[strip,split],get_bool_from_env->[literal_eval,ValueError,format],get_host->[get_current],get_currency_fraction,bool,int,pgettext_lazy,_,parse,append,get_list,dirname,insert,get,getenv,config,setdefault,join,normpath,get_bool_from_env,CACHES]
PROJECT_ROOT = root path to saleor root_urlCONF = urlconf Check if a node is available and if so send it to email.
This is incorrect. `fa` is generic Farsi (Persian), not the Iranian variant.
@@ -13,11 +13,15 @@ from matrix_client.client import CACHE, MatrixClient from matrix_client.errors import MatrixHttpLibError, MatrixRequestError from matrix_client.room import Room as MatrixRoom from matrix_client.user import User +from requests import Response from requests.adapters import HTTPAdapter log = structlog.get_logger(__name__) +SHUTDOWN_TIMEOUT = 35 + + class Room(MatrixRoom): """ Matrix `Room` subclass that invokes listener callbacks in separate greenlets """
[GMatrixClient->[set_account_data->[set_account_data],modify_presence_list->[_send],set_presence_state->[_send],_handle_response->[_mkroom,call],get_user_presence->[_send],__init__->[GMatrixHttpApi],_mkroom->[Room,update_aliases],get_presence_list->[_send],search_room_directory->[Room,_send],typing->[_send],search_user_directory->[_send]]]
Initialize a object.
This seems quite long, what's the reason for this?
@@ -5631,7 +5631,7 @@ GAME( 1993, jparkj, jpark, sega_system32_analog, jpark, segas32_new_state, GAME( 1993, jparkja, jpark, sega_system32_analog, jpark, segas32_new_state, jpark, ROT0, "Sega", "Jurassic Park (Japan, Deluxe)", MACHINE_IMPERFECT_GRAPHICS ) GAME( 1993, jparkjc, jpark, sega_system32_analog, jpark, segas32_new_state, jpark, ROT0, "Sega", "Jurassic Park (Japan, Rev A, Conversion)", MACHINE_IMPERFECT_GRAPHICS ) -GAME( 1994, kokoroj2, 0, sega_system32_cd, kokoroj2, segas32_new_state, radr, ROT0, "Sega", "Soreike Kokology Vol. 2 - Kokoro no Tanteikyoku", MACHINE_IMPERFECT_GRAPHICS | MACHINE_IMPERFECT_SOUND | MACHINE_NODEVICE_PRINTER) /* uses an Audio CD */ +GAME( 1993, kokoroj2, 0, sega_system32_cd, kokoroj2, segas32_new_state, radr, ROT0, "Sega / IVS", "Soreike Kokology Vol. 2 - Kokoro no Tanteikyoku", MACHINE_IMPERFECT_GRAPHICS | MACHINE_IMPERFECT_SOUND | MACHINE_NODEVICE_PRINTER) /* uses an Audio CD */ GAME( 1990, radm, 0, sega_system32_analog, radm, segas32_new_state, radm, ROT0, "Sega", "Rad Mobile (World)", MACHINE_IMPERFECT_GRAPHICS ) /* Released in 02.1991 */ GAME( 1990, radmu, radm, sega_system32_analog, radm, segas32_new_state, radm, ROT0, "Sega", "Rad Mobile (US)", MACHINE_IMPERFECT_GRAPHICS )
[No CFG could be retrieved]
2016 - 02 - 2013 2016 - 02 - 191.
IVS is the Japanese television company that produced the Kokology show. Leave them out of this, please.
@@ -224,6 +224,12 @@ define([ } } + // GLSLModernizer inserts its own layout qualifiers + // at this position in the source + if (context.webgl2) { + result += '#define OUTPUT_DECLARATION\n\n'; + } + // append built-ins if (shaderSource.includeBuiltIns) { result += getBuiltinsAndAutomaticUniforms(combinedSources);
[No CFG could be retrieved]
Combines the given GLSL string into a single string. Sets the to false if this shader will become a source in another shader.
Minor, but I would suggest to use a token that is a bit more unique, e.g., prefix with `CZM_` like we do elsewhere.
@@ -12,10 +12,5 @@ using System.Runtime.InteropServices; [assembly: AssemblyTitle("Dnn.Modules.Console")] [assembly: AssemblyDescription("")] -// Setting ComVisible to false makes the types in this assembly not visible -// to COM components. If you need to access a type in this assembly from -// COM, set the ComVisible attribute to true on that type. -[assembly: ComVisible(false)] - // The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("A7C9F51B-63D8-4D81-B59D-A5B78B7E9A7C")]
[No CFG could be retrieved]
Add name and description to an assembly file.
I don't believe this should have changed
@@ -106,11 +106,13 @@ func newDestroyCmd() *cobra.Command { } _, res := s.Destroy(commandContext(), backend.UpdateOperation{ - Proj: proj, - Root: root, - M: m, - Opts: opts, - Scopes: cancellationScopes, + Proj: proj, + Root: root, + M: m, + Opts: opts, + StackConfiguration: cfg, + SecretsManager: sm, + Scopes: cancellationScopes, }) if res != nil && res.Error() == context.Canceled { return result.FromError(errors.New("destroy cancelled"))
[StringVar,GetGlobalColorization,Error,RunResultFunc,New,StringSliceVar,IntVarP,StringVarP,FromError,Destroy,Wrap,Interactive,BoolVarP,PersistentFlags,BoolVar]
Magic flags for the environment. Flags for the given analyzers as part of this update.
Any reason these are passed in here instead of looked up off the `stack` inside `Destroy` and friends? (not requiring these concepts to bubble up all the way to the CLI code). As far as I can tell - the results of `getStackConfiguration(s)` are only ever used to pass in as inputs to methods on `s`.
@@ -248,8 +248,7 @@ def call_subprocess( elif on_returncode == 'ignore': pass else: - raise ValueError('Invalid value: on_returncode=%s' % - repr(on_returncode)) + raise ValueError('Invalid value: on_returncode={}'.format(repr(on_returncode))) return ''.join(all_output)
[make_subprocess_output_error->[format_command_args],call_subprocess->[reveal_command_args,make_subprocess_output_error,format_command_args],runner_with_spinner_message->[runner->[call_subprocess]]]
Calls subprocess. Popen with the given arguments and returns the result as a Text . This function logs the subprocess output at INFO or DEBUG level. Provides a subprocess_runner that provides a message to be used to call the pep5.
This should be split across multiple lines.
@@ -189,8 +189,11 @@ public class CLI implements AutoCloseable { rsp.write(ch); } String head = new BufferedReader(new StringReader(rsp.toString("ISO-8859-1"))).readLine(); - if (!head.startsWith("HTTP/1.0 200 ")) - throw new IOException("Failed to establish a connection through HTTP proxy: "+rsp); + if (!(head.startsWith("HTTP/1.0 200 ") || head.startsWith("HTTP/1.1 200 "))) { + s.close(); + System.err.println("Failed to tunnel the CLI port through the HTTP proxy. Falling back to HTTP."); + throw new IOException("Failed to establish a connection through HTTP proxy: " + rsp); + } // HTTP proxies (at least the one I tried --- squid) doesn't seem to do half-close very well. // So instead of relying on it, we'll just send the close command and then let the server
[CLI->[upgrade->[execute],connectViaCliPort->[close->[close]],computeVersion->[close],execute->[execute],loadKey->[loadKey],_main->[close,execute],flushURLConnection->[close],close->[close],hasCommand->[hasCommand],authenticate->[close,authenticate]]]
Connects via a TCP connection via a CLI port. Creates a connection to the Jenkins server and opens a new connection to the Jenkins server.
Don't print directly to stderr, i'm using CLI programmatically and don't need to have stderr on jvm.
@@ -29,6 +29,7 @@ export class AbstractWallet { this.preferredBalanceUnit = BitcoinUnit.BTC; this.chain = Chain.ONCHAIN; this.hideBalance = false; + this.id = undefined; } getTransactions() {
[No CFG could be retrieved]
The abstract wallet class Returns delta of unconfirmed balance.
you dont need to save wallet's id if you easily generate it on the fly. remove
@@ -98,7 +98,7 @@ class Tests_UserSite: expect_error=True, ) assert ( - "Can not perform a '--user' install. User site-packages are not " + "Can not perform a '--user' install. User site-packages is not " "visible in this virtualenv." in result.stderr )
[Tests_UserSite->[test_upgrade_user_conflict_in_globalsite->[_patch_dist_in_site_packages],test_install_user_conflict_in_globalsite_and_usersite->[_patch_dist_in_site_packages],test_install_user_conflict_in_globalsite->[_patch_dist_in_site_packages]]]
Test if install_user_venv_nositepkgs fails with message .
Cosmetic fix. Should be in a separate PR.
@@ -1352,10 +1352,10 @@ module.exports = class wavesexchange extends Exchange { symbol = market['symbol']; } const amountCurrency = this.safeCurrencyCode (this.safeString (assetPair, 'amountAsset', 'WAVES')); - const price = this.parseNumber (this.priceFromPrecision (symbol, priceString)); - const amount = this.parseNumber (this.currencyFromPrecision (amountCurrency, amountString)); - const filled = this.parseNumber (this.currencyFromPrecision (amountCurrency, filledString)); - const average = this.parseNumber (this.priceFromPrecision (symbol, this.safeString (order, 'avgWeighedPrice'))); + const price = this.priceFromPrecision (symbol, priceString).toString (); + const amount = this.currencyFromPrecision (amountCurrency, amountString).toString (); + const filled = this.currencyFromPrecision (amountCurrency, filledString).toString (); + const average = this.priceFromPrecision (symbol, this.safeString (order, 'avgWeighedPrice')).toString (); const status = this.parseOrderStatus (this.safeString (order, 'status')); let fee = undefined; if ('type' in order) {
[No CFG could be retrieved]
Get the order object that represents the order weighed price and the fee of the order Get a safe order object with a matcher fee.
Not sure if we need `.toString()` here, `priceFromPrecision` already returns a string.
@@ -258,6 +258,11 @@ func (c *Container) Commit(ctx context.Context, sess *session.Session, h *Handle c.State = *h.State commitEvent = events.ContainerStarted + + // refresh the struct with what propery collector provides + if err := c.Refresh(); err != nil { + return err + } } c.ExecConfig = &h.ExecConfig
[Commit->[Refresh],shutdown->[waitForPowerState],Signal->[startGuestProgram],stop->[shutdown],Error->[Error],Remove->[Remove],String,Error]
Commit commits the container This function is called by the init function to initialize the container and the VM.
The refresh happens after reconfigure. Does that mean if there is inconsistency between portlayer and VC, the operation will always fail? Currently, the VM power on will change the changeVersion, so out of band operation might cause failure with it.
@@ -137,12 +137,12 @@ func UpdateAvatarSetting(ctx *context.Context, form auth.AvatarForm, ctxUser *mo if err = ctxUser.UploadAvatar(data); err != nil { return fmt.Errorf("UploadAvatar: %v", err) } - } else { + } else if ctxUser.UseCustomAvatar && !com.IsFile(ctxUser.CustomAvatarPath()) { // No avatar is uploaded but setting has been changed to enable, // generate a random one when needed. if ctxUser.UseCustomAvatar && !com.IsFile(ctxUser.CustomAvatarPath()) { if err := ctxUser.GenerateRandomAvatar(); err != nil { - log.Error(4, "GenerateRandomAvatar[%d]: %v", ctxUser.ID, err) + log.Error("GenerateRandomAvatar[%d]: %v", ctxUser.ID, err) } } }
[IsFile,UpdateUserCols,UploadAvatar,Close,GenerateRandomAvatar,CustomAvatarPath,Redirect,IsImageFile,HTML,DeleteAvatar,Error,IsErrUserAlreadyExist,GetOwner,New,UpdateUserSetting,Errorf,IsErrNameReserved,Trace,HasError,IsErrEmailAlreadyUsed,IsLocal,Tr,ServerError,Written,GetBaseRepo,IsErrNamePatternNotAllowed,ChangeUserName,ToLower,SetCookie,GetOrgsByUserID,GetRepositories,EncodeMD5,Open,ReadAll,Success]
UpdateAvatarSetting updates user s avatar AvatarPost update user avatar settings and delete avatar page.
This line could be removed?
@@ -47,6 +47,7 @@ func fuzzInternalObject(t *testing.T, forVersion unversioned.GroupVersion, item obj.PolicyConfig.OpenShiftInfrastructureNamespace = bootstrappolicy.DefaultOpenShiftInfraNamespace } if len(obj.RoutingConfig.Subdomain) == 0 { + // TODO: use clusterDomain instead of cluster.local obj.RoutingConfig.Subdomain = "router.default.svc.cluster.local" }
[DeepCopy,NumElements,DeepEqual,ObjectGoPrintSideBySide,Encode,RandString,RandUint64,Rfc3339Copy,IsNotRegisteredError,UID,Fuzz,WithKind,New,NewCodecFactory,Funcs,Errorf,Logf,Unix,TypeAccessor,SetKind,RandSource,Int63,TypeOf,Name,FormatUint,KnownTypes,Convert,Log,Fatalf,DecodeInto,ObjectDiff,SetAPIVersion,NewSource,Decode,Elem,LegacyCodec,NilChance,Interface,FuzzNoCustom]
fuzzInternalObject fuzzes an object and returns a fuzzer object that can be used The ClientCA into that field is filled in by the fuzzer.
Remove this todo, this is setting to the default
@@ -42,7 +42,8 @@ class ModelTestCase(AllenNlpTestCase): def ensure_model_can_train_save_and_load(self, param_file: str, tolerance: float = 1e-4, - cuda_device: int = -1): + cuda_device: int = -1, + gradients_to_ignore: Set[str] = None): save_dir = self.TEST_DIR / "save_and_load_test" archive_file = save_dir / "model.tar.gz" model = train_model_from_file(param_file, save_dir)
[ModelTestCase->[assert_fields_equal->[assert_fields_equal]]]
This function is called from the train_and_load methods. Checks if model and loaded_model have the same keys and values.
Add a docstring for this field because its function is not obvious from its name.
@@ -38,7 +38,12 @@ import ( "github.com/pulumi/pulumi/pkg/util/contract" "github.com/pulumi/pulumi/pkg/util/httputil" - "github.com/pulumi/pulumi/pkg/workspace" +) + +const ( + // BookkeepingDir is the name of our bookkeeping folder, we store state here (like .git for git). + // Copied from workspace.BookkeepingDir to break import cycle. + BookkeepingDir = ".pulumi" ) // Asset is a serialized asset reference. It is a union: thus, only one of its fields will be non-nil. Several helper
[GetURIURL->[GetURI],readText->[GetText],archiveZIP->[Open,Close],Close->[Close],archiveTar->[Open,Close],Open->[IsAssets,readURI,readPath,IsPath,IsURI,HasContents],readURI->[GetURIURL],readPath->[Open,GetPath],Read->[IsURI,Read,IsText,IsPath],Equals->[EnsureHash],GetAssets->[IsAssets],HasContents->[IsURI,IsAssets,IsText,IsPath],ReadSourceArchive->[Open,openURLStream,GetPath,GetURIURL],archiveTarGZIP->[archiveTar],GetText->[IsText],readAssets->[GetAssets],GetURI->[IsURI],Serialize->[Serialize],Bytes->[GetText,Bytes],Next->[Open,Read,Next],openURLStream->[Open],IsUserProgramCode->[IsText],EnsureHash->[Read,ReadSourceArchive,Archive],GetPath->[IsPath],Size,IsUserProgramCode,EnsureHash,Next]
ComponentType of Asset is a union of all of the fields that are non - nil. NewTextAsset creates a new asset with a random type hash from the given text.
What caused the import cycle?
@@ -125,7 +125,9 @@ void plan_arc( // Vector rotation matrix values float raw[XYZE]; const float theta_per_segment = angular_travel / segments, - linear_per_segment = linear_travel / segments, + #if DISABLED(AUTO_BED_LEVELING_UBL) + linear_per_segment = linear_travel / segments, + #endif extruder_per_segment = extruder_travel / segments, sin_T = theta_per_segment, cos_T = 1 - 0.5f * sq(theta_per_segment); // Small angle approximation
[No CFG could be retrieved]
This function is used to calculate the in the current position. - - - - - - - - - - - - - - - - - -.
This ought to be getting revived soon. Arcs are supposed to have a linear axis that can move as the arc is drawn. For example, you should be able to make a circle in the XY plane and have a Z move at the same time. The change that removed this functionality for UBL is a temporary hack as a workaround for UBL not being fully integrated with the planner. I will study the problem and try to provide a proper fix soon.
@@ -60,6 +60,9 @@ var defaultConfig = harmonyconfig.HarmonyConfig{ KMSConfigSrcType: kmsConfigTypeShared, KMSConfigFile: "", }, + MMR: harmonyconfig.MmrConfig{ + DbDir: "./db/mmr", + }, TxPool: harmonyconfig.TxPoolConfig{ BlacklistFile: "./.hmy/blacklist.txt", },
[No CFG could be retrieved]
Get the default configuration for the given object. The configuration for the .
As far as I see, MMR data is part of chain data, so I think it would be better to make it along with chain data per shard directory. Furthermore, MMR is not an individual service that need to add a new config.
@@ -256,6 +256,10 @@ public class SelectByteBuddyHelpers { return store(currentArrayField++, valueToWrite); } + public int reserveSlot() { + return currentArrayField++; + } + public StackManipulation store(int arrayIndexToWrite, StackManipulation valueToWrite) { Preconditions.checkArgument(arrayIndexToWrite < arraySize); return new StackManipulation() {
[SelectByteBuddyHelpers->[getRowSelector->[of],ArrayManager->[createArray->[apply->[apply]],store->[apply->[apply]]],createRowSelector->[getSchema,getFieldAccessDecriptor],SelectInstruction->[selectIntoArray->[apply],processList->[selectIntoArrayHelper,ArrayManager,apply,loadFieldValue],loadFieldValue->[getCurrentRowFieldValue],processMap->[selectIntoArrayHelper,ArrayManager,apply,loadFieldValue],selectIntoArrayHelper->[selectIntoArray,apply,loadFieldValue],appender->[ArrayManager,apply]]]]
Append a value to the array.
nit: (slightly related to this PR) can we make `SelectByteBuddyHelpers` package private please
@@ -303,6 +303,7 @@ public class HoodieAppendHandle<T extends HoodieRecordPayload, I, K, O> extends .onParentPath(FSUtils.getPartitionPath(hoodieTable.getMetaClient().getBasePath(), partitionPath)) .withFileId(fileId).overBaseCommit(baseCommitTime) .withLogVersion(latestLogFile.map(HoodieLogFile::getLogVersion).orElse(HoodieLogFile.LOGFILE_BASE_VERSION)) + .withFileSize(latestLogFile.map(HoodieLogFile::getFileSize).orElse(0L)) .withSizeThreshold(config.getLogFileMaxSize()).withFs(fs) .withLogWriteToken(latestLogFile.map(x -> FSUtils.getWriteTokenFromLogPath(x.getPath())).orElse(writeToken)) .withRolloverLogWriteToken(writeToken)
[HoodieAppendHandle->[write->[init],close->[doAppend,close],writeToBuffer->[getIndexedRecord],doAppend->[doAppend,init],flushToDiskIfRequired->[doAppend]]]
Creates a log writer for the given file slice.
why is this change needed?
@@ -75,7 +75,10 @@ class LegacyAddressMapper(AddressMapper): addresses = set() for (spec, _), state in root_entries.items(): - if missing_is_fatal and not state.value.dependencies: + if isinstance(state, Throw) and missing_is_fatal: + raise self.BuildFileScanError( + 'Spec `{}` does not match any targets.\n{}'.format(spec.to_spec_string(), str(state.exc))) + elif missing_is_fatal and not state.value.dependencies: raise self.BuildFileScanError( 'Spec `{}` does not match any targets.'.format(spec.to_spec_string())) addresses.update(state.value.dependencies)
[LegacyAddressMapper->[scan_addresses->[_internal_scan_specs]]]
Internal method to scan specs.
This isn't a specific enough match... an exception could be thrown for multiple reasons. Might need to do an exception type match here?
@@ -370,7 +370,9 @@ class Command(object): try: reference = ConanFileReference.loads(args.path_or_reference) except ConanException: + name, version, user, channel = get_reference_fields(args.reference) info = self._conan.install(path=args.path_or_reference, + name=name, version=version, user=user, channel=channel, settings=args.settings, options=args.options, env=args.env, remote_name=args.remote,
[Command->[upload->[upload],copy->[copy],package->[package],test->[test],export->[export],info->[info],_show_help->[check_all_commands_listed],install->[install],download->[download],run->[_commands,_warn_python2,_show_help],create->[create],source->[source],remove->[remove,info],new->[new],build->[build],export_pkg->[export_pkg],imports->[imports],inspect->[inspect]],main->[Command,run]]
Installs the specified requirements specified in a recipe or a file containing a conanfile. Installs or updates a single missing key.
Check that `args.reference` is None or raise. (Two references makes no sense)
@@ -23,6 +23,8 @@ import threading from apache_beam.io import iobase +__all__ = ['ConcatSource', 'ConcatRangeTracker'] + class ConcatSource(iobase.BoundedSource): """For internal use only; no backwards-compatibility guarantees.
[ConcatRangeTracker->[sub_range_tracker->[get_range_tracker]],ConcatSource->[split->[split],estimate_size->[estimate_size]]]
Creates a new object containing the contents of a single non - empty object. Expect start and stop positions to be None. Received start and stop positions to be None.
Do we want users using this? (Maybe...if so should we mark as experimental?)
@@ -89,6 +89,13 @@ type JobSchedulerConfig struct { Running bool `toml:"running"` } +// ProjectUpdateConfig - the config for project updating +type ProjectUpdateConfig struct { + State string `toml:"state"` + ProjectUpdateID string `toml:"project_update_id"` + EsJobID string `toml:"es_job_id"` +} + func defaultConfig() aggregateConfig { return aggregateConfig{ JobSchedulerConfig: JobSchedulerConfig{
[GetNodesMissingSchedulerConfig->[GetJobConfig],GetDeleteNodesSchedulerConfig->[GetJobConfig],GetMissingNodesForDeletionSchedulerConfig->[GetJobConfig]]
- the name of the job to be created. type Manager - the type of data that is returned from the Get method.
Adding a new config section to store.
@@ -38,9 +38,9 @@ class TestDataset(AllenNlpTestCase): dataset.index_instances(self.vocab) padding_lengths = dataset.get_padding_lengths() arrays = dataset.as_arrays(padding_lengths) + text1 = arrays["text1"]["words"] + text2 = arrays["text2"]["words"] - text1 = arrays["text1"][0] - text2 = arrays["text2"][0] numpy.testing.assert_array_almost_equal(text1, numpy.array([[2, 3, 4, 5, 6], [1, 3, 4, 5, 6]])) numpy.testing.assert_array_almost_equal(text2, numpy.array([[2, 3, 4, 1, 5, 6],
[TestDataset->[get_dataset->[Dataset,Instance,TextField],test_instances_must_have_homogeneous_fields->[LabelField,raises,Instance,Dataset,TextField],setUp->[Vocabulary,super,add_token_to_namespace],test_padding_lengths_uses_max_instance_lengths->[get_padding_lengths,index_instances,get_dataset],test_as_arrays->[assert_array_almost_equal,get_padding_lengths,index_instances,get_dataset,as_arrays,array]]]
Test if the data is in the same order as the padding lengths.
Why did you change the namespace from "tokens" to "words" in the test? I think it'd be better to have it use the defaults - reading through this test might give you the wrong impression about how you should access the arrays.
@@ -189,6 +189,11 @@ export class VariableService { /** @const @private {!./linker-reader.LinkerReader} */ this.linkerReader_ = linkerReaderServiceFor(this.ampdoc_.win); + /** @const @private {!../../../src/service/url-replacements-impl.UrlReplacements} */ + this.urlReplacementService_ = Services.urlReplacementsForDoc( + this.ampdoc_.getHeadNode() + ); + this.register_('$DEFAULT', defaultMacro); this.register_('$SUBSTR', substrMacro); this.register_('$TRIM', value => value.trim());
[VariableService->[expandTemplateSync->[encodeVars,iterations,user,vars,replace,noEncode,getNameArgs,getVar,freezeVars],getMacros->[dev,getConsentStateStr,assign,cookieReader],constructor->[trim,dict,String,toUpperCase,stringToBool,toLowerCase,linkerReaderServiceFor,base64UrlEncodeFromString],expandTemplate->[tryResolve],hashMacro_->[cryptoFor],register_->[devAssert]],substr,getConsentPolicyState,isArray,getServiceForDoc,getServicePromiseForDoc,parseInt,userAssert,user,match,map,replace,String,getNameArgs,registerServiceBuilderForDoc,isFiniteNumber,encodeURIComponent,length,Number]
private int version = 0 ;.
We can't do this. Instead we'll have to call `Services.urlReplacementsForDoc(element)` for every element we want to expand. Because FIE doesn't have its own ampdoc, but it has its own urlReplacementService.
@@ -290,12 +290,10 @@ func (node *Node) AddPendingTransaction(newTx *types.Transaction) { // AddPendingReceipts adds one receipt message to pending list. func (node *Node) AddPendingReceipts(receipts *types.CXReceiptsProof) { - if node.NodeConfig.GetNetworkType() != nodeconfig.Mainnet { - node.pendingCXMutex.Lock() - node.pendingCXReceipts = append(node.pendingCXReceipts, receipts) - node.pendingCXMutex.Unlock() - utils.Logger().Error().Int("totalPendingReceipts", len(node.pendingCXReceipts)).Msg("Got ONE more receipt message") - } + node.pendingCXMutex.Lock() + node.pendingCXReceipts = append(node.pendingCXReceipts, receipts) + node.pendingCXMutex.Unlock() + utils.Logger().Error().Int("totalPendingReceipts", len(node.pendingCXReceipts)).Msg("Got ONE more receipt message") } // Take out a subset of valid transactions from the pending transaction list
[AddPendingTransaction->[tryBroadcast,addPendingTransactions],addPendingTransactions->[reducePendingTransactions],countNumTransactionsInBlockchain->[Blockchain],GetInitShardState->[Blockchain],getTransactionsForNewBlock->[Blockchain,reducePendingTransactions],Blockchain,Beaconchain]
AddPendingReceipts adds a list of CXReceipts to the list of pending C.
Please change Error to Info/Debug.
@@ -170,7 +170,7 @@ namespace Dnn.Modules.ResourceManager.Services IsDefault = m.MappingName == "Standard" || m.MappingName == "Secure" || m.MappingName == "Database", editUrl = this.UserInfo.IsAdmin ? - moduleContext.EditUrl( + this.GetModuleContext().EditUrl( "ItemID", m.FolderMappingID.ToString(), "EditFolderMapping",
[ItemsController->[GetItemViewModel->[GetFileIconUrl,GetFolderIconUrl]]]
Get folder mappings.
This is going to call `GetModuleContext` for each item in `mappings`, was this change intentional?
@@ -757,6 +757,7 @@ namespace System.Net.Sockets public byte[] Receive([System.Diagnostics.CodeAnalysis.NotNullAttribute] ref System.Net.IPEndPoint? remoteEP) { throw null; } public System.Threading.Tasks.Task<System.Net.Sockets.UdpReceiveResult> ReceiveAsync() { throw null; } public int Send(byte[] dgram, int bytes) { throw null; } + public int Send(ReadOnlySpan<byte> dgram) {throw null; } public int Send(byte[] dgram, int bytes, System.Net.IPEndPoint? endPoint) { throw null; } public int Send(byte[] dgram, int bytes, string? hostname, int port) { throw null; } public System.Threading.Tasks.Task<int> SendAsync(byte[] datagram, int bytes) { throw null; }
[SocketTaskExtensions->[ReceiveFromAsync->[Never],ReceiveMessageFromAsync->[Never],ReceiveAsync->[Never],AcceptAsync->[Never],SendToAsync->[Never],SendAsync->[Never],ConnectAsync->[Never]],Socket->[Never],Never]
This method is used to send a datagram through the network. It is a blocking call.
The param name as approved by API review is "datagram".
@@ -21,6 +21,7 @@ var _ = g.Describe("[imageapis][registry] image signature workflow", func() { ) g.It("can push a signed image to openshift registry and verify it", func() { + g.Skip("FIXME: fix oadm verify-image-signature to work with secured registry") g.By("building a signer image that knows how to sign images") output, err := oc.Run("create").Args("-f", signerBuildFixture).Output() if err != nil {
[By,Exec,Expect,HaveOccurred,FixturePath,It,Username,Args,AsAdmin,To,Namespace,Logf,ContainSubstring,Output,NewCLI,NotTo,Describe,Fprintf,GetDockerRegistryURL,WaitForAnImageStreamTag,Sprintf,GinkgoRecover,NewPodExecutor,Run,KubeConfigPath]
package registry import imports the given bearer token from the registry user - the username of the user who is the user who is the user who is.
same here, please open an issue to track fixing this if we don't have one.
@@ -121,5 +121,13 @@ func (o *NewProjectOptions) Run() error { } } + fmt.Fprintf(o.Out, ` +You can add applications to this project with the 'new-app' command. For example, try: + + $ %[1]s new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-hello-world.git + +to build a new hello-world application in Ruby. +`, o.Name) + return nil }
[Run->[Create,ProjectRequests,RunProject,List,Everything],complete->[Args,Help,New,Complete,NewPathOptions,Flags],StringVar,Sprintf,complete,CheckErr,Run,Clients,Flags]
Run creates a new project with the given options.
Not required but could also make a reference to `oc new-app -h`.
@@ -56,7 +56,7 @@ class WPSEO_Gutenberg_Compatibility { * @return bool True if the currently installed version is the latest known version. False otherwise. */ public function is_latest_version() { - return $this->get_major_minor_version( $this->current_version ) === $this->get_major_minor_version( $this->get_latest_release() ); + return version_compare( $this->current_version, $this->get_latest_release(), '==' ); } /**
[WPSEO_Gutenberg_Compatibility->[is_fully_compatible->[is_below_minimum,is_latest_version]]]
Checks if the current version is the latest version of the release.
This is no longer used.
@@ -44,8 +44,12 @@ func (node *Node) UpdateStakingList(stakeInfoReturnValue *structs.StakeInfoRetur } // True if the token is still staked within the locking period. if curEpoch-startEpoch <= lockPeriodCount.Uint64()*lockPeriodInEpochs { + blsPubKey := types.BlsPublicKey{} + copy(blsPubKey[:32], stakeInfoReturnValue.BlsPubicKeys1[i][:]) + copy(blsPubKey[32:64], stakeInfoReturnValue.BlsPubicKeys2[i][:]) + copy(blsPubKey[64:96], stakeInfoReturnValue.BlsPubicKeys2[i][:]) node.CurrentStakes[addr] = &structs.StakeInfo{ - stakeInfoReturnValue.BlsAddresses[i], + blsPubKey, blockNum, lockPeriodCount, stakeInfoReturnValue.Amounts[i],
[printStakingList->[Info,GetLogInstance],UpdateStakingList->[CurrentBlock,Uint64,NumberU64,GetEpochFromBlockNumber],SaveECDSA,Error,GetLogInstance,SetBytes,HexToECDSA,Int64,Encode,Exit]
UpdateStakingList updates the current staking list with the given stake info.
why copy 3 part?
@@ -30,10 +30,15 @@ public class MuleSharedDomainClassLoader extends AbstractArtifactClassLoader imp private File domainDir; private File domainLibraryFolder; - @SuppressWarnings("unchecked") public MuleSharedDomainClassLoader(String domain, ClassLoader parent) { - super(new URL[0], parent); + this(domain, parent, null); + } + + @SuppressWarnings("unchecked") + public MuleSharedDomainClassLoader(String domain, ClassLoader parent, Set<String> loaderOverrides) + { + super(new URL[0], parent, loaderOverrides); try { if (domain == null)
[MuleSharedDomainClassLoader->[addUrls->[toString],findResource->[findResource]]]
Creates a class loader which loads a shared class from a given domain. This method is used to find a resource in the system.
Pass an empty set instead of null
@@ -96,13 +96,12 @@ class LSUN(data.Dataset): 'Options are: ' + str(dset_opts))) else: raise(ValueError('Unknown option for classes')) - self.classes = classes # for each class, create an LSUNClassDataset self.dbs = [] for c in self.classes: self.dbs.append(LSUNClass( - db_path=db_path + '/' + c + '_lmdb', + root=root + '/' + c + '_lmdb', transform=transform)) self.indices = []
[LSUN->[__repr__->[__len__,__repr__],__init__->[LSUNClass]]]
Initialize LSUNDataset from a list of base LSUN classes.
This doesn't look right: there is a few operations that are performed just before that modify `classes`, and you don't take that into account when moving `self.classes` up.
@@ -7,7 +7,7 @@ module TwoFactorAuthenticatable before_action :require_current_password, if: :current_password_required? before_action :check_already_authenticated before_action :reset_attempt_count_if_user_no_longer_locked_out, only: :create - before_action :apply_secure_headers_override, only: :show + before_action :apply_secure_headers_override, only: [:show, :create] end DELIVERY_METHOD_MAP = {
[handle_invalid_otp->[handle_second_factor_locked_user],otp_view_data->[recovery_code_unavailable?,unconfirmed_phone?],authenticator_view_data->[recovery_code_unavailable?],phone_view_data->[recovery_code_unavailable?,unconfirmed_phone?]]
The TwoFactorAuthenticatable class endregion region IHandleNova.
If this fix is in TwoFactorAuthenticatable, wouldn't it also affect SAML requests? I am not able to reproduce this coming from sp.int.login.gov. I think what we want to do here is to modify the `before_action` in `OpenidConnect::AuthorizationController` which defines its own `apply_secure_headers_override` method.
@@ -201,10 +201,13 @@ var maxTime = runDescriptor.Settings.TestExecutionTimeout ?? TimeSpan.FromSeconds(90); while (!done() && !cts.Token.IsCancellationRequested) { - if (DateTime.UtcNow - startTime > maxTime) + if (!Debugger.IsAttached) { - ThrowOnFailedMessages(runDescriptor, runners); - throw new TimeoutException(GenerateTestTimedOutMessage(maxTime)); + if (DateTime.UtcNow - startTime > maxTime) + { + ThrowOnFailedMessages(runDescriptor, runners); + throw new TimeoutException(GenerateTestTimedOutMessage(maxTime)); + } } await Task.Delay(1).ConfigureAwait(false);
[ScenarioRunner->[InitializeRunners->[CreateRoutingTable]]]
Perform scenarios.
Do you know is this property cached or will we suffer perf hit of this call?
@@ -443,7 +443,7 @@ public class DruidCoordinator () -> { try { if (serverInventoryView.isSegmentLoadedByServer(toServer.getName(), segment) && - curator.checkExists().forPath(toLoadQueueSegPath) == null && + (curator == null || curator.checkExists().forPath(toLoadQueueSegPath) == null) && !dropPeon.getSegmentsToDrop().contains(segment)) { dropPeon.dropSegment(segment, loadPeonCallback); } else {
[DruidCoordinator->[markSegmentAsUnused->[markSegmentAsUnused],becomeLeader->[call->[isLeader],start],UpdateCoordinatorStateAndPrepareCluster->[stopPeonsForDisappearedServers->[stop],startPeonsForNewServers->[start]],start->[stopBeingLeader->[stopBeingLeader],becomeLeader->[call->[],becomeLeader]],isLeader->[isLeader],getCurrentLeader->[getCurrentLeader],getTotalSizeOfSegmentsAwaitingCompaction->[getTotalSizeOfSegmentsAwaitingCompaction],DutiesRunnable->[run->[isLeader,getCurrentLeader,stopBeingLeader,run]],stopBeingLeader->[stop]]]
Moves a segment from one server to another. Load a segment from the load queue.
This doesn't necessarily need to change in this PR, but it seems kind of leaky that this thing has a `CuratorFramework` at all, it seems like the load peon should provide this check so it can just be a no-op for non-zk. and then `DruidCoordinator` no longer needs a curator or zk paths I think?
@@ -155,8 +155,8 @@ static void destruct_method(void *method, void *data) } void *evp_generic_fetch(OPENSSL_CTX *libctx, int operation_id, - const char *name, const char *properties, - void *(*new_method)(const char *name, + int name_id, const char *properties, + void *(*new_method)(int name_id, const OSSL_DISPATCH *fns, OSSL_PROVIDER *prov, void *method_data),
[evp_generic_do_all->[ossl_algorithm_do_all],evp_generic_fetch->[method_id,ossl_namemap_name2num,ossl_method_store_cache_get,ossl_namemap_stored,up_ref_method,ossl_assert,ossl_method_construct,ossl_method_store_cache_set,get_default_method_store],int->[ossl_namemap_add,method_id,ossl_namemap_stored,ossl_method_store_add,get_default_method_store],uint32_t->[ossl_assert],EVP_set_default_properties->[ossl_method_store_set_global_properties,EVPerr,get_default_method_store],OSSL_METHOD_STORE->[openssl_ctx_get_data],void->[method_id,ossl_namemap_name2num,ossl_namemap_stored,ossl_method_store_fetch,ossl_method_store_new,get_default_method_store,ossl_method_store_free]]
This function is called by the fetch code when a generic fetch is needed. Get a method from the store.
I've going back to expect a name here. The reason is that a caller may mistakenly create a new number for an alias because the alias hasn't been given from any provider yet.
@@ -76,7 +76,7 @@ class ListController < ApplicationController define_method("#{filter}_feed") do discourse_expires_in 1.minute - @title = "#{filter.capitalize} Topics" + @title = "#{SiteSetting.title} - #{filter.capitalize} Topics" @link = "#{Discourse.base_url}/#{filter}" @description = I18n.t("rss_description.#{filter}") @atom_link = "#{Discourse.base_url}/#{filter}.rss"
[ListController->[category_none_top->[top],category_top->[top],construct_prev_url_with->[prev_page_params],parent_category_category_top->[top],construct_next_url_with->[next_page_params]]]
This class generates all the methods related to a category and its children list of topics in a category.
This should use I18n instead of hardcoding `Topics`
@@ -101,7 +101,7 @@ func DefaultClientConfig(flags *pflag.FlagSet) kclientcmd.ClientConfig { cobra.MarkFlagFilename(flags, config.OpenShiftConfigFlagName) // set our explicit defaults - defaultOverrides := &kclientcmd.ConfigOverrides{ClusterDefaults: kclientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")}} + defaultOverrides := &kclientcmd.ConfigOverrides{ClusterDefaults: kclientcmd.ClusterDefaults} loadingRules.DefaultClientConfig = kclientcmd.NewDefaultClientConfig(kclientcmdapi.Config{}, defaultOverrides) overrides := &kclientcmd.ConfigOverrides{ClusterDefaults: defaultOverrides.ClusterDefaults}
[ProtocolsForObject->[ProtocolsForObject],MapBasedSelectorForObject->[MapBasedSelectorForObject],Namespace->[Namespace],BindFlags->[BindFlags],CanBeExposed->[CanBeExposed],Pauser->[Pauser,JSONEncoder],ClientSetForVersion->[ClientSetForVersion],FlagSet->[FlagSet],DefaultNamespace->[DefaultNamespace],SuggestedPodTemplateResources->[SuggestedPodTemplateResources],Bind->[Bound],Generators->[Generators],DefaultResourceFilterOptions->[DefaultResourceFilterOptions],BindExternalFlags->[BindExternalFlags],LabelsForObject->[LabelsForObject],Command->[Command],Decoder->[Decoder],ClientConfig->[ClientConfig],FederationClientSetForVersion->[FederationClientSetForVersion],ClientConfigForVersion->[ClientConfigForVersion],DiscoveryClient->[DiscoveryClient],Resumer->[Resumer,JSONEncoder],UpdatePodSpecForObject->[UpdatePodSpecForObject],RESTClient->[RESTClient],ClientSet->[ClientSet],FederationClientForVersion->[FederationClientForVersion],ConfigAccess->[ConfigAccess],RawConfig->[RawConfig],ResolveImage->[Clients,ResolveImage],PortsForObject->[PortsForObject],JSONEncoder->[JSONEncoder],DefaultResourceFilterFunc->[DefaultResourceFilterFunc],CanBeAutoscaled->[CanBeAutoscaled]]
Negotiates and returns a client for the given configuration. NewNonInteractiveDeferredLoadingClientConfig returns a function that can be used to create a client for.
@liggitt this matches what runs upstream. If you prefer your 44570, I have no objection, but please make sure it works here :-).
@@ -53,7 +53,7 @@ gulp.task('test', ['wiredep:test', 'ngconstant:dev'], function (done) { }); <% if (testFrameworks.indexOf('protractor') > -1) { %> gulp.task('protractor', function () { - return gulp.src([config.test + 'e2e/*.js']) + return gulp.src([config.test + 'e2e/**/*.js']) .pipe(plumber({errorHandler: handleErrors})) .pipe(protractor({ configFile: config.test + 'protractor.conf.js'
[No CFG could be retrieved]
Gulp tasks to clean and test the n - index. Gulp tasks to find the missing font in the application.
Oh shit i thought i changed this
@@ -50,7 +50,8 @@ import org.jboss.weld.util.reflection.Reflections; public class ResolvableBuilder { - private static final Class<?>[] FACADE_TYPES = new Class<?>[] { Event.class, Instance.class, WeldInstance.class, Provider.class, InterceptionFactory.class }; + private static final Class<?>[] FACADE_TYPES = new Class<?>[] { Event.class, Instance.class, WeldEvent.class, + WeldInstance.class, Provider.class, InterceptionFactory.class }; private static final Class<?>[] METADATA_TYPES = new Class<?>[] { Interceptor.class, Decorator.class, Bean.class }; private static final Set<QualifierInstance> ANY_SINGLETON = Collections.singleton(QualifierInstance.ANY);
[ResolvableBuilder->[addQualifier->[addQualifier],addQualifiers->[addQualifier,addQualifiers],ResolvableImpl->[toString->[getTypes,getQualifiers],equals->[getTypes,equals]]]]
Create a new instance of a bean manager. Create a ResolvableBuilder that will resolve to the given type.
Not really sure we need to change this?
@@ -2457,13 +2457,11 @@ class MixedLayer(LayerBase): input_layer = self.get_input_layer(input_index) operator_conf.input_sizes.append(input_layer.size) operator_input_index.append(input_index) - if self.config.size == 0: - size = operator.calc_output_size(operator_conf.input_sizes) - if size != 0: + size = operator.calc_output_size(operator_conf.input_sizes) + if size != 0: + if self.config.size == 0: self.set_layer_size(size) - else: - size = operator.calc_output_size(operator_conf.input_sizes) - if size != 0: + else: config_assert(size == self.config.size, "different inputs have different size: %s vs. %s" % (size, self.config.size))
[ConcatenateLayer2->[__init__->[config_assert,calc_parameter_dims,calc_output_size,set_layer_size,calc_parameter_size,get_input_layer,gen_parameter_name,create_input_parameter]],BlockExpand->[__init__->[add_keys]],MDLstmLayer->[__init__->[config_assert,set_layer_size,get_input_layer,create_bias_parameter,create_input_parameter]],SimpleData->[DataBase],AverageLayer->[__init__->[get_input_layer,set_layer_size,create_bias_parameter,config_assert]],Input->[__init__->[MakeLayerNameInSubmodel,add_keys]],MultiplexLayer->[__init__->[get_input_layer,config_assert]],CosSimLayer->[__init__->[get_input_layer,config_assert]],ParameterReluLayer->[__init__->[create_input_parameter,get_input_layer,set_layer_size,config_assert]],SamplingIdLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],Evaluator->[type_of,MakeLayerNameInSubmodel],GruStepLayer->[__init__->[get_input_layer,create_input_parameter,create_bias_parameter,config_assert]],BatchNormLayer->[__init__->[parse_image,config_assert,Input,set_layer_size,calc_parameter_size,get_input_layer,create_bias_parameter,create_input_parameter]],make_config_environment->[make_setter,make_importer],parse_conv->[config_assert],Data->[DataBase],BlockExpandLayer->[__init__->[get_input_layer,set_layer_size,parse_block_expand]],RecurrentLayerGroupEnd->[GetLayerBaseName,SubModelEnd,config_assert],EosIdLayer->[__init__->[set_layer_size,config_assert]],DataBase->[default,config_assert],NormLayer->[__init__->[parse_norm,get_input_layer,set_layer_size]],parse_config_and_serialize->[parse_config],CosSimVecMatLayer->[__init__->[get_input_layer,config_assert]],MixedLayer->[__init__->[config_assert,calc_parameter_dims,calc_output_size,Input,check_dims,set_layer_size,calc_parameter_size,get_input_layer,gen_parameter_name,create_bias_parameter,create_input_parameter]],TransLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],Pool->[__init__->[add_keys]],PyData->[get_path,DataBase],ResizeLayer->[__init__->[config_assert]],RecurrentLayerGroupBegin->[RecurrentLayerGroupSetOutLink,RecurrentLayerGroupSetGenerator,config_assert,RecurrentLayerGroupWithoutOutLinksBegin],SubModelEnd->[config_assert],LambdaCost->[__init__->[config_assert]],TensorLayer->[__init__->[get_input_layer,create_input_parameter,create_bias_parameter,config_assert]],ScalingLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],ExpandLayer->[__init__->[get_input_layer,set_layer_size,create_bias_parameter,config_assert]],Outputs->[config_assert],OuterProdLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],SelectiveFCLayer->[__init__->[get_input_layer,create_input_parameter,create_bias_parameter,config_assert]],TrainData->[config_assert],SumToOneNormLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],Norm->[__init__->[add_keys]],ExpressionLayer->[MixedLayer],Layer->[config_assert],AddToLayer->[__init__->[get_input_layer,set_layer_size,create_bias_parameter,config_assert]],TestData->[config_assert],CTCLayer->[__init__->[config_assert]],InterpolationLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],parse_pool->[default,config_assert],LayerBase->[set_layer_size->[config_assert],create_input_parameter->[config_assert],create_bias_parameter->[Bias,type_of,gen_bias_parameter_name,config_assert],__init__->[MakeLayerNameInSubmodel,config_assert,Input,gen_parameter_name,type_of]],GatedRecurrentLayer->[__init__->[config_assert,set_layer_size,get_input_layer,create_bias_parameter,create_input_parameter]],Image->[__init__->[add_keys]],LstmLayer->[__init__->[config_assert,set_layer_size,get_input_layer,create_bias_parameter,create_input_parameter]],Operator->[__init__->[add_keys]],LstmStepLayer->[__init__->[get_input_layer,create_bias_parameter,config_assert]],Inputs->[config_assert],DataNormLayer->[__init__->[create_input_parameter,get_input_layer,set_layer_size,config_assert]],DotMulOperator->[check_dims->[config_assert],__init__->[config_assert]],FCLayer->[__init__->[get_input_layer,create_bias_parameter,create_input_parameter]],ConvLayerBase->[__init__->[parse_conv,config_assert,set_layer_size,calc_parameter_size,get_input_layer,create_bias_parameter,create_input_parameter]],parse_config->[importlib,make_config_environment,init_config_environment],ConcatenateLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],Parameter->[default,config_assert],RecurrentLayerGroupWithoutOutLinksBegin->[SubModelBegin,MakeLayerNameInSubmodel,MakeLayerNameInParentSubmodel,config_assert],parse_norm->[config_assert],ConvexCombinationLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],SequenceReshapeLayer->[__init__->[set_layer_size,create_bias_parameter,config_assert]],SlopeInterceptLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],MaxLayer->[__init__->[get_input_layer,set_layer_size,create_bias_parameter,config_assert]],RecurrentLayerGroupSetGenerator->[MakeLayerNameInSubmodel],MaxIdLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],GetOutputLayer->[__init__->[config_assert]],RecurrentLayer->[__init__->[config_assert,set_layer_size,get_input_layer,create_bias_parameter,create_input_parameter]],FeatMapExpandLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],CRFDecodingLayer->[__init__->[create_input_parameter,config_assert]],HierarchicalSigmoidLayer->[__init__->[get_input_layer,create_input_parameter,create_bias_parameter,config_assert]],Conv->[__init__->[config_assert,add_keys]],SubModelBegin->[config_assert],SubSequenceLayer->[__init__->[get_input_layer,set_layer_size,create_bias_parameter,config_assert]],parse_image->[config_assert],ProtoData->[DataBase],RecurrentLayerGroupSetOutLink->[MakeLayerNameInParentSubmodel,MakeLayerNameInSubmodel],PoolLayer->[__init__->[parse_pool,get_input_layer,set_layer_size]],ConvShiftLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],CRFLayer->[__init__->[create_input_parameter,config_assert]],SequenceLastInstanceLayer->[__init__->[get_input_layer,set_layer_size,create_bias_parameter,config_assert]],Memory->[MakeLayerNameInParentSubmodel,MakeLayerNameInSubmodel,config_assert,AgentLayer,SequenceAgentLayer,create_bias_parameter],ConvOperator->[__init__->[config_assert]],Bias->[__init__->[add_keys]],Projection->[__init__->[MakeLayerNameInSubmodel,add_keys]],PowerLayer->[__init__->[get_input_layer,set_layer_size,config_assert]],NCELayer->[__init__->[get_input_layer,create_input_parameter,create_bias_parameter,config_assert]],SequenceConcatLayer->[__init__->[get_input_layer,set_layer_size,create_bias_parameter,config_assert]],config_layer,parse_config,define_cost]
Initialize a mixed layer. layer_size - The size of the n - layer in the network.
This is not right. It can change size to 0 and make line 2485 fail.
@@ -478,7 +478,7 @@ class T5Attention(AttentionModule): attention_head_size=key_value_proj_dim, num_attention_heads=num_heads, output_linear=True, - scoring_func="scaled_dot_product", + scoring_func="dot_product", dropout=dropout, bias=False, normalize_weights=normalize,
[AttentionModule->[_get_attention_probs->[_position_bias],_project->[_transpose_for_scores],forward->[_get_attention_probs,_project,_query_layer,AttentionOutput,_output_layer,_get_lengths],_query_layer->[_transpose_for_scores],compute_bias->[_relative_position_bucket]]]
Initialize a new HMM with a missing configuration.
Why do we change the default? The original transformer uses `scaled_dot_product`, right?
@@ -115,7 +115,7 @@ public class DefaultDnsCache implements DnsCache { @Override public boolean clear(String hostname) { checkNotNull(hostname, "hostname"); - Entries entries = resolveCache.remove(hostname); + Entries entries = resolveCache.remove(appendDot(hostname)); return entries != null && entries.clearAndCancel(); }
[DefaultDnsCache->[cache0->[get],Entries->[cancelExpiration->[cancelExpiration],add->[cancelExpiration,address,get,add,cause],clearAndCancel->[cancelExpiration]],DefaultDnsCacheEntry->[toString->[toString]],get->[get,emptyAdditionals],toString->[toString],cache->[emptyAdditionals]]]
Clear the cache for the given hostname.
What happens if there is a search path? If my search path is `netty.io` then the domain name `foo` would be incorrectly cached as `foo.` right?
@@ -832,7 +832,7 @@ static int serverinfo_srv_add_cb(SSL *s, unsigned int ext_type, return 0; /* No extension found, don't send extension */ return 1; /* Send extension */ } - return -1; /* No serverinfo data found, don't send + return 0; /* No serverinfo data found, don't send * extension */ }
[No CFG could be retrieved]
This function is called from the serverinfo_srv_parse_cb function. It parses read 2 - byte type and length field.
As seen a few lines earlier, if no extension is found, don't send any extension _using a return value of 0_. Thus using a return value of -1 here, for a similar case, is what leads to the failed handshake unexpectedly.
@@ -353,7 +353,7 @@ namespace System } [DllImport(RuntimeHelpers.QCall, CharSet = CharSet.Unicode)] - private static extern IntPtr _RegisterFrozenSegment(IntPtr sectionAddress, IntPtr sectionSize); + private static extern IntPtr _RegisterFrozenSegment(IntPtr sectionAddress, nint sectionSize); [DllImport(RuntimeHelpers.QCall, CharSet = CharSet.Unicode)] private static extern void _UnregisterFrozenSegment(IntPtr segmentHandle);
[GC->[WaitForPendingFinalizers->[_WaitForPendingFinalizers],CancelFullGCNotification->[_CancelFullGCNotification],GetMemoryLoad->[GetMemoryInfo],TryStartNoGCRegion->[StartNoGCRegionWorker],EndNoGCRegion->[_EndNoGCRegion],InvokeMemoryLoadChangeNotifications->[GetMemoryLoad],GetTotalMemory->[WaitForPendingFinalizers,Collect,GetTotalMemory],GCNotificationStatus->[_WaitForFullGCComplete,_WaitForFullGCApproach],GCMemoryInfo->[GetMemoryInfo],CollectionCount->[_CollectionCount],StartNoGCRegionWorker->[_StartNoGCRegion],Collect->[_Collect,Collect],GetGeneration->[KeepAlive,GetGenerationWR],RegisterForFullGCNotification->[_RegisterForFullGCNotification],AddMemoryPressure->[_AddMemoryPressure],ReRegisterForFinalize->[_ReRegisterForFinalize],RemoveMemoryPressure->[_RemoveMemoryPressure],SuppressFinalize->[_SuppressFinalize],GetMaxGeneration]]
Returns the handle of the last frozen segment.
I was considering removing `CharSet = CharSet.Unicode` here and in `_UnregisterFrozenSegment`, since these two methods don't marshal any strings. Thoughts?
@@ -207,6 +207,11 @@ public class RhelUtils { majorVersion, release, arch)); } + // next check if OracleLinux + if (oracleReleaseFile.filter(StringUtils::isNotBlank).isPresent()) { + return oracleReleaseFile.map(v -> detectPlainRHEL(v, arch, "OracleLinux")); + } + // next check if Centos if (centosReleaseFile.filter(StringUtils::isNotBlank).isPresent()) { return centosReleaseFile.map(v -> detectPlainRHEL(v, arch, "CentOS"));
[RhelUtils->[detectRhelProduct->[RhelProduct],detectPlainRHEL->[RhelProduct,parseReleaseFile],parseReleaseFile->[ReleaseFile]]]
Detects a RHEL or CIROS RHEL or CIROS RHEL or.
In the DB the name of the product is one of `oraclelinux-6`, `oraclelinux-7` or `oraclelinux-8` but this method will look for the product by the lower cased name i.e. `oraclelinux`
@@ -83,6 +83,17 @@ DBPriv *DBPrivOpenDB(const char *dbpath, dbid id) dbpath, mdb_strerror(rc)); goto err; } + if (id == dbid_lastseen) + { + /* lastseen needs by default 4x more reader locks than other DBs*/ + rc = mdb_env_set_maxreaders(db->env, 126*4); + if (rc) + { + Log(LOG_LEVEL_ERR, "Could not set maxreaders for database %s: %s", + dbpath, mdb_strerror(rc)); + goto err; + } + } if (id != dbid_locks) { rc = mdb_env_open(db->env, dbpath, MDB_NOSUBDIR, 0644);
[DBPrivWrite->[mdb_put,mdb_txn_commit,mdb_strerror,Log,mdb_txn_begin,mdb_txn_abort,mdb_cursor_txn],DBPrivWriteCursorEntry->[Log,mdb_strerror,mdb_cursor_put],DBPrivOpenDB->[mdb_strerror,mdb_env_open,mdb_txn_commit,mdb_env_create,free,mdb_open,mdb_env_set_mapsize,Log,mdb_env_close,mdb_txn_begin,xcalloc],DBPrivCloseDB->[free,mdb_env_close],DBPrivWriteNoCommit->[mdb_strerror,mdb_put,mdb_open,Log,mdb_txn_begin,mdb_txn_abort],DBPrivDelete->[mdb_strerror,mdb_txn_commit,mdb_del,Log,mdb_txn_begin,mdb_txn_abort,mdb_cursor_txn],DBPrivOpenCursor->[mdb_strerror,mdb_cursor_open,Log,mdb_txn_begin,mdb_txn_abort,xcalloc],DBPrivDiagnose->[StringFormat],DBPrivCommit->[mdb_strerror,mdb_txn_commit,Log],DBPrivHasKey->[mdb_strerror,mdb_get,Log,mdb_txn_begin,mdb_txn_abort],DBPrivAdvanceCursor->[mdb_strerror,free,mdb_cursor_del,memcpy,xmalloc,mdb_cursor_get,Log],DBPrivDeleteCursorEntry->[mdb_cursor_get],DBPrivCloseCursor->[mdb_strerror,free,mdb_txn_commit,mdb_cursor_del,Log,mdb_cursor_close,mdb_cursor_txn],DBPrivGetValueSize->[mdb_strerror,mdb_get,Log,mdb_txn_begin,mdb_txn_abort],DBPrivRead->[mdb_strerror,memcpy,mdb_get,Log,mdb_txn_begin,mdb_txn_abort]]
Opens a database with a specific handle. if no node in database return NULL ;.
`126*4` should be a constant somewhere.
@@ -6,10 +6,10 @@ namespace OpenDDS { namespace DCPS { template<typename AceClock> -const TimePoint_T<AceClock> TimePoint_T<AceClock>::zero_value = TimePoint_T<AceClock>(ACE_Time_Value::zero); +const TimePoint_T<AceClock> TimePoint_T<AceClock>::zero_value(ACE_Time_Value(0, 0)); template<typename AceClock> -const TimePoint_T<AceClock> TimePoint_T<AceClock>::max_value = TimePoint_T<AceClock>(ACE_Time_Value::max_time); +const TimePoint_T<AceClock> TimePoint_T<AceClock>::max_value(ACE_Time_Value(ACE_Numeric_Limits<time_t>::max(), ACE_ONE_SECOND_IN_USECS)); template<typename AceClock> AceClock TimePoint_T<AceClock>::clock;
[No CFG could be retrieved]
#region TimePoint_T. h.
You didn't add the same minus one here as you did to `TimeDuration`.
@@ -323,10 +323,13 @@ class TestSubsystem(GoalSubsystem): type=list, member_type=str, default=[], - help="Specify a list additional environment variables to include in test processes. Entries are strings " - "in the form `ENV_VAR=value` to use explicitly; or just `ENV_VAR` to copy the value of a variable in Pants's " - "own environment. `value` may be a string with spaces in it such as `ENV_VAR=has some spaces`. `ENV_VAR=` sets " - "a variable to be the empty string.", + help=( + "Specify a list additional environment variables to include in test processes. " + "Entries are strings in the form `ENV_VAR=value` to use explicitly; or just " + "`ENV_VAR` to copy the value of a variable in Pants's own environment. " + "`value` may be a string with spaces in it such as `ENV_VAR=has some spaces`. " + "`ENV_VAR=` sets a variable to be the empty string." + ), ) @property
[CoverageReports->[artifacts->[get_artifact],materialize->[materialize]],enrich_test_result->[EnrichedTestResult],run_tests->[Test,materialize],CoverageReportType->[__new__->[__new__]],get_filtered_environment->[TestExtraEnv]]
Register options for the test runner.
No changes beyond formatting.
@@ -4,6 +4,7 @@ import logging import time import zope.component +from josepy.jwk import JWK # pylint: disable=unused-import from acme import challenges from acme import errors as acme_errors
[_report_failed_authzrs->[getUtility,challb_to_achall,add_message,setdefault,_generate_failed_chall_msg,values],_find_smart_path->[enumerate,_report_no_chall_path,get],_find_dumb_path->[append,isinstance,enumerate,_report_no_chall_path,next],_generate_failed_chall_msg->[append,is_acme_error,join],gen_challenge_path->[_find_smart_path,_find_dumb_path],challb_to_achall->[KeyAuthorizationAnnotatedChallenge,Error,DNS,format,isinstance,info],AuthHandler->[_poll_authorizations->[,_report_failed_authzrs,max,poll,sleep,extend,AuthorizationError,warning,retry_after,items,enumerate,range,values],deactivate_valid_authorizations->[append,debug,deactivate_authorization],_choose_challenges->[gen_challenge_path,extend,len,_challenge_factory,tuple,info,range,_get_chall_pref],handle_authorizations->[perform,_poll_authorizations,_choose_challenges,critical,getUtility,ExitHandler,notify,len,AuthorizationError,zip,answer_challenge,info],_challenge_factory->[append,challb_to_achall],_cleanup_challenges->[info,cleanup],_get_chall_pref->[append,extend,AuthorizationError,set,get_chall_pref]],_report_no_chall_path->[AuthorizationError,critical,isinstance,len],getLogger]
A class to handle an authorization request. Retrieve the that need to be validated.
why do we need this import?
@@ -211,6 +211,14 @@ class StudentT(distribution.Distribution): (math_ops.sqrt(self.df) * math.sqrt(math.pi) * self.sigma) * math_ops.pow(1. + math_ops.square(y) / self.df, -(0.5 + half_df))) + def _log_cdf(self, x): + return math_ops.log(self._cdf(x)) + + def _cdf(self, x): + y = (x - self.mu) / self.sigma + beta_y = (self.df / (math_ops.square(y) + self.df)) + return (1 - 0.5 * math_ops.betainc(self.df/2, 0.5, beta_y)) + def _entropy(self): u = array_ops.expand_dims(self.df * self._ones(), -1) v = array_ops.expand_dims(self._ones(), -1)
[StudentTWithAbsDfSoftplusSigma->[__init__->[softplus,pop,super,locals,name_scope,abs,floor]],StudentT->[_event_shape->[constant],_entropy->[lbeta,expand_dims,get_shape,len,concat,log,_ones,digamma],_get_batch_shape->[get_shape,broadcast_shape],_mode->[identity],_sample_n->[random_gamma,gen_new_seed,batch_shape,sqrt,concat,ones,constant,random_normal],_batch_shape->[shape],_log_prob->[log,square,lgamma],_prob->[square,pow,sqrt,exp,lgamma],_std->[variance,sqrt],_ones->[batch_shape,ones],_param_shapes->[dict,convert_to_tensor,zip],_get_event_shape->[scalar],_variance->[as_numpy_dtype,square,batch_shape,with_dependencies,greater,fill,assert_less,_ones,ones,select,array],__init__->[assert_positive,control_dependencies,pop,super,locals,assert_same_float_dtype,identity,name_scope],_mean->[as_numpy_dtype,batch_shape,with_dependencies,greater,fill,assert_less,_ones,ones,select,array],AppendDocstring]]
Compute the probability of a non - zero sample.
Nit: No need for extra parens (here, and below)
@@ -209,12 +209,11 @@ import org.slf4j.LoggerFactory; * <tt>"group.id"</tt>, <tt>"enable.auto.commit"</tt>, etc. * * <h3>Event Timestamp and Watermark</h3> - * By default record timestamp and watermark are based on processing time in KafkaIO reader. - * This can be overridden by providing {@code WatermarkFn} with - * {@link Read#withWatermarkFn(SerializableFunction)}, and {@code TimestampFn} with - * {@link Read#withTimestampFn(SerializableFunction)}.<br> - * Note that {@link KafkaRecord#getTimestamp()} reflects timestamp provided by Kafka if any, - * otherwise it is set to processing time. + * By default, record timestamp (evnt time) is set to processing time in KafkaIO reader and + * source watermark is current wall time. If a topic has Kafka server-side ingestion timestamp + * enabled ('LogAppendTime'), it can enabled with {@link Read#withLogAppendTime()}. + * A custom timestamp policy can be provided by implementing {@link TimestampPolicyFactory}. See + * {@link Read#withTimestampPolicyFactory(TimestampPolicyFactory)} for more information. */ @Experimental(Experimental.Kind.SOURCE_SINK) public class KafkaIO {
[KafkaIO->[TypedWithoutMetadata->[populateDisplayData->[populateDisplayData]],KafkaValueWrite->[populateDisplayData->[populateDisplayData]],Read->[withProcessingTime->[withProcessingTime],withMaxNumRecords->[build],withTimestampFn->[withTimestampFn2],commitOffsetsInFinalize->[build],withTopicPartitions->[build],expand->[getMaxNumRecords,getKeyDeserializer,getKeyCoder,getValueDeserializer,getValueCoder,withMaxNumRecords,getMaxReadTime,getStartReadTime,isCommitOffsetsInFinalizeEnabled],withKeyDeserializerAndCoder->[build],withStartReadTime->[build],withValueDeserializer->[build],withLogAppendTime->[withLogAppendTime],withWatermarkFn->[withWatermarkFn2],withMaxReadTime->[build],withKeyDeserializer->[build],withTopics->[build],withValueDeserializerAndCoder->[build],withConsumerFactoryFn->[build],withWatermarkFn2->[build],withReadCommitted->[updateConsumerProperties],populateDisplayData->[populateDisplayData,getTopicPartitions,getTopics],withTimestampPolicyFactory->[build],updateConsumerProperties->[getConsumerConfig,build],withTimestampFn2->[build]],Write->[updateProducerProperties->[updateKafkaProperties,getProducerConfig,build],withConsumerFactoryFn->[build],expand->[isEOS,getValueSerializer,getKeySerializer,getTopic],withEOS->[build],withProducerFactoryFn->[build],values->[build],validate->[isEOS],populateDisplayData->[populateDisplayData],withValueSerializer->[build],withKeySerializer->[build],withTopic->[build]]]]
This class is used to write records with default empty key. Creates an uninitialized object that represents a KAFKA partition.
typo `evnt time` here
@@ -115,12 +115,8 @@ public class HttpClientUpgradeHandler extends HttpObjectAggregator implements Ch public HttpClientUpgradeHandler(SourceCodec sourceCodec, UpgradeCodec upgradeCodec, int maxContentLength) { super(maxContentLength); - if (sourceCodec == null) { - throw new NullPointerException("sourceCodec"); - } - if (upgradeCodec == null) { - throw new NullPointerException("upgradeCodec"); - } + ObjectUtil.checkNotNull(sourceCodec, "sourceCodec"); + ObjectUtil.checkNotNull(upgradeCodec, "upgradeCodec"); this.sourceCodec = sourceCodec; this.upgradeCodec = upgradeCodec; }
[HttpClientUpgradeHandler->[read->[read],disconnect->[disconnect],connect->[connect],write->[write],setUpgradeRequestHeaders->[protocol,setUpgradeHeaders],decode->[protocol,upgradeFrom,prepareUpgradeFrom,upgradeTo,decode],close->[close],deregister->[deregister],bind->[bind],flush->[flush]]]
Bind the channel to the given address.
nit: you can merge the lines above as `checkNotNull` will return the given argument
@@ -133,6 +133,8 @@ def recalculate_order_prices(order: Order, **kwargs): voucher_discount = kwargs.get("discount", zero_money(order.currency)) # discount amount can't be greater than order total + if type(voucher_discount) == TaxedMoney: + voucher_discount = voucher_discount.gross voucher_discount = min(voucher_discount, total.gross) total -= voucher_discount
[update_order_discount_for_order->[apply_discount_to_value],remove_discount_from_order_line->[update_taxes_for_order_line],create_order_discount_for_order->[apply_discount_to_value],get_products_voucher_discount_for_order->[get_prices_of_discounted_specific_product],get_prices_of_discounted_specific_product->[get_discounted_lines],change_order_line_quantity->[_update_allocations_for_line],update_order_prices->[recalculate_order,update_taxes_for_order_lines],recalculate_order->[recalculate_order_prices,recalculate_order_discounts],restock_order_lines->[get_order_country],update_discount_for_order_line->[apply_discount_to_value,update_taxes_for_order_line],get_voucher_discount_for_order->[get_products_voucher_discount_for_order],recalculate_order_prices->[get_voucher_discount_assigned_to_order],update_taxes_for_order_lines->[update_taxes_for_order_line],order_needs_automatic_fulfillment->[order_line_needs_automatic_fulfillment],update_order_status->[_calculate_quantity_including_returns]]
Recalculate order prices based on order line total prices and order discount.
Wouldn't be better to call `isinstance`? `type` returns only class so for the first look it means that we use `cls` in the next loc.
@@ -270,6 +270,15 @@ class Exchange(object): f'Pair {pair} is not available on {self.name}. ' f'Please remove {pair} from your whitelist.') + def get_valid_pair_combination(self, paira, pairb) -> str: + """ + Get valid combination of paira and pairb by trying both combinations. + """ + for pair in [f"{paira}/{pairb}", f"{pairb}/{paira}"]: + if pair in self._api.markets and self._api.markets[pair].get('active'): + return pair + raise DependencyException(f"Could not combine {paira} and {pairb} to get a valid pair.") + def validate_timeframes(self, timeframe: List[str]) -> None: """ Checks if ticker interval from config is a supported timeframe on the exchange
[retrier->[wrapper->[wrapper]],Exchange->[cancel_order->[cancel_order],get_trades_for_order->[exchange_has],create_order->[symbol_amount_prec,symbol_price_prec,create_order],buy->[create_order,dry_run_order],sell->[create_order,dry_run_order],_load_markets->[_load_async_markets],stoploss_limit->[symbol_price_prec,create_order,dry_run_order]],retrier_async->[wrapper->[wrapper]]]
Checks if all given pairs are tradable on the current exchange. Raises OperationalException if.
parameters are not the pairs, these are currencies (or symbols), the names of the parameters are confusing
@@ -54,6 +54,11 @@ func (c Config) KeysDir() string { return path.Join(c.RootDir, "keys") } +// ClientNodeURL returns the full URL for communicating with the node on. +func (c Config) ClientNodeURL() string { + return "http://localhost:" + string(c.Port) +} + func parseEnv(cfg interface{}) error { return env.ParseWithFuncs(cfg, env.CustomParsers{ reflect.TypeOf(big.Int{}): bigIntParser,
[KeysDir->[Join],FileMode,ParseWithFuncs,Expand,TypeOf,Fatal,Errorf,Set,SetString,MkdirAll]
KeysDir returns the path to the keys directory.
Do we want to assume it's always on the localhost? Not sure why we got rid of `ClientNodeURL` and added `Port` if `ClientNodeURL` supported configuring the port and the host. It's functionally less capable.
@@ -187,6 +187,10 @@ define([ this.terrainExaggeration = 1.0; } + FrameState.prototype.addCommand = function(command) { + this.commandList.push(command); + }; + /** * A function that will be called at the end of the frame. *
[No CFG could be retrieved]
A function that will be called at the end of the frame.
Should other places besides models use this function too?
@@ -783,15 +783,12 @@ var _ = g.Describe("[Feature:DeploymentConfig] deploymentconfigs", func() { o.Expect(fmt.Errorf("expected no deployment, found %#v", rcs[0])).NotTo(o.HaveOccurred()) } - _, err = updateConfigWithRetries(oc.AppsClient().Apps(), oc.Namespace(), dcName, func(dc *appsv1.DeploymentConfig) { - // TODO: oc rollout pause should patch instead of making a full update - dc.Spec.Paused = false - }) + dc, err = oc.AppsClient().AppsV1().DeploymentConfigs(oc.Namespace()).Patch(dcName, types.StrategicMergePatchType, []byte(`{"spec": {"paused": false}}`)) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(waitForLatestCondition(oc, dcName, deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred()) g.By("making sure it updates observedGeneration after being paused") - dc, err = oc.AppsClient().Apps().DeploymentConfigs(oc.Namespace()).Patch(dcName, types.StrategicMergePatchType, []byte(`{"spec": {"paused": true}}`)) + dc, err = oc.AppsClient().AppsV1().DeploymentConfigs(oc.Namespace()).Patch(dcName, types.StrategicMergePatchType, []byte(`{"spec": {"paused": true}}`)) o.Expect(err).NotTo(o.HaveOccurred()) _, err = waitForDCModification(oc, dc.Namespace, dcName, deploymentChangeTimeout,
[By,AppsV1,BeZero,ImageClient,ConfigSelector,Should,New,SingleObject,AdminKubeClient,Watch,Namespace,ContainSubstring,BeNil,Apps,NewCLI,WaitForUserBeAuthorized,JustBeforeEach,NotTo,Image,WithCancel,ImageStreamTags,Trim,KubeClient,BeEquivalentTo,Now,IsTerminatedDeployment,FixturePath,It,WaitForServiceAccount,ReplicationControllers,Since,PollImmediate,Until,Create,Output,BeEmpty,BeNumerically,Describe,Float32,List,DeploymentVersionFor,AppsClient,GinkgoRandomSeed,Pods,Sleep,CurrentGinkgoTestDescription,Patch,CoreV1,Expect,Duration,HaveKey,DeploymentNameForConfigVersion,DeploymentConfigs,LatestDeploymentNameForConfigAndVersion,Args,Start,To,TrimSpace,NewDeleteOptions,Equal,Execute,ServiceAccounts,Convert,GetResourceVersion,Get,NewSource,DeployerPodNameForDeployment,GetControllerOf,Sprintf,GetDeploymentCondition,GinkgoRecover,String,IsNotFound,Run,KubeConfigPath,DeploymentsForCleanup,HaveOccurred,BeTrue,Delete,cancel,Username,Errorf,DeploymentStatusFor,Logf,Wait,Contains,HaveLen,ImageStreams,Background,AfterEach]
This is a helper function that can be used to cancel a paused deployment or rollback a paused Determines if the deployment config is observed by the given DC.
@tnozicka not sure what the intention here was, but if we want to patch then lets patch ;-)
@@ -117,6 +117,17 @@ switch ( $platform_tabs->current_tab() ) { break; } ?> +<?php + // Add link to Knowledge Base article about crawl issues. + echo '<p>'; + + printf( + /* translators: %1$s expands anchor to knowledge base article, %2$s expands to </a> */ + __( 'Please refer to %1$sour article about how to connect your website to Google Search Console%2$s if you need assistance.', 'wordpress-seo'), '<a href="https://kb.yoast.com/kb/how-to-connect-and-retrieve-crawl-issues/" target="_blank">', '</a>' ); + + echo '</p>'; +?> + <br class="clear" /> <?php
[getAccessToken,current_tab,admin_header,createAuthUrl,set_option,admin_footer,get_sites,select,output_help_center,set_screen_reader_content,display_table]
Displays the hidden hidden input for the object.
Can you put a space before the closing function. `, 'wordpress-seo')` thus: `, 'wordpress-seo' )`
@@ -50,11 +50,11 @@ namespace System.Net.Sockets // Our internal state doesn't automatically get updated after a non-blocking connect // completes. Keep track of whether we're doing a non-blocking connect, and make sure // to poll for the real state until we're done connecting. - private bool _nonBlockingConnectInProgress; + private bool _pollPendingConnect; // Keep track of the kind of endpoint used to do a non-blocking connect, so we can set // it to _rightEndPoint when we discover we're connected. - private EndPoint? _nonBlockingConnectRightEndPoint; + private EndPoint? _pendingConnectRightEndPoint; // These are constants initialized by constructor. private AddressFamily _addressFamily;
[Socket->[SetLingerOption->[SetLingerOption],SetIPProtectionLevel->[SetIPProtectionLevel],AcceptAsync->[AcceptAsync],DoBind->[Bind],Send->[Send],ConnectAsync->[Connect,ConnectAsync,CanTryAddressFamily],SendPacketsAsync->[SendPacketsAsync],Shutdown->[Shutdown],UpdateStatusAfterSocketError->[SetToDisconnected,UpdateStatusAfterSocketError],ReceiveAsync->[ReceiveAsync,Receive],GetSocketOption->[Socket],Receive->[Receive],EndReceiveFrom->[CanTryAddressFamily],ReceiveFromAsync->[ReceiveFrom,ReceiveFromAsync,CanTryAddressFamily],Dispose->[Receive,Send,Socket,Shutdown,Dispose],DoConnect->[Connect],EndReceiveMessageFrom->[CanTryAddressFamily],InternalShutdown->[Shutdown],Disconnect->[Disconnect],SendToAsync->[SendToAsync,SendTo],IOControl->[IOControl],SocketError->[SocketError],InternalSetBlocking->[InternalSetBlocking],SetSocketOption->[SetSocketOption,Socket],IAsyncResult->[AcceptAndReceiveHelperAsync],ReceiveMessageFrom->[CanTryAddressFamily,ReceiveMessageFrom],CancelConnectAsync->[CancelConnectAsync],SetReceivingPacketInformation->[SetSocketOption],SetIPv6MulticastOption->[SetIPv6MulticastOption],ReceiveMessageFromAsync->[CanTryAddressFamily,ReceiveMessageFromAsync,ReceiveMessageFrom],CheckErrorAndUpdateStatus->[UpdateStatusAfterSocketError],ReceiveFrom->[ReceiveFrom],Listen->[Listen],DisconnectAsync->[Disconnect,DisconnectAsync],Select->[Select],SetMulticastOption->[SetMulticastOption],Connect->[Connect,CanTryAddressFamily],Serialize->[Serialize],SendAsync->[Send,SendAsync],ValidateReceiveFromEndpointAndState->[CanTryAddressFamily],Poll->[Poll],SendFile->[SendFile],SendTo->[SendTo],Dispose,Socket]]
The Socket constructor. Socket class.
I kind of like the old field name (`_nonBlockingConnectInProgress`) better -- it conveys *why* we are polling.
@@ -42,13 +42,11 @@ class Bazaar(VersionControl): if os.path.exists(location): rmtree(location) - with TempDirectory(kind="export") as temp_dir: - self.unpack(temp_dir.path) - - self.run_command( - ['export', location], - cwd=temp_dir.path, show_stdout=False, - ) + url, rev_options = self.get_url_rev_options() + self.run_command( + ['export', location, url] + rev_options.to_args(), + show_stdout=False, + ) def fetch_new(self, dest, url, rev_options): rev_display = rev_options.to_display()
[Bazaar->[get_src_requirement->[get_revision,get_url]]]
Exports the Bazaar repository at the given location to the given location.
You should be calling `make_rev_options()` here after this like the beginning of `obtain()` does. `get_base_rev_args()` is meant for use by the RevOptions class rather than by VersionControl directly.
@@ -834,7 +834,7 @@ interface Function } DateTime date; try { - date = DateTime.parse(value.asString(), formatter); + date = DateTimes.wrapFormatter(formatter).parse(value.asString()); } catch (IllegalArgumentException e) { throw new IAE(e, "invalid value %s", value.asString());
[CaseSearchedFunc->[apply->[eval,name]],UpperFunc->[apply->[name]],NvlFunc->[apply->[eval,name]],StrlenFunc->[apply->[name]],ConditionFunc->[apply->[eval,name]],SingleParamMath->[eval->[eval]],DoubleParam->[apply->[eval,name]],CaseSimpleFunc->[apply->[eval,name]],TimestampFromEpochFunc->[apply->[eval,name]],ReplaceFunc->[apply->[name]],LowerFunc->[apply->[name]],SubstringFunc->[apply->[name]],DoubleParamMath->[eval->[eval]],SingleParam->[apply->[name]]]
Evaluate the function.
Above, `DateTimes.wrapFormatter(ISODateTimeFormat.dateOptionalTimeParser())` could be cached more globally.
@@ -210,6 +210,7 @@ class ClientCache(SimplePaths): def load_manifest(self, conan_reference): """conan_id = sha(zip file)""" + assert isinstance(conan_reference, ConanFileReference) filename = self.digestfile_conanfile(conan_reference) return FileTreeManifest.loads(load(filename))
[ClientCache->[package_lock->[_no_locks],conanfile_lock_files->[_no_locks],conanfile_write_lock->[_no_locks],conanfile_read_lock->[_no_locks]],_mix_settings_with_env->[get_env_value,get_setting_name]]
Load manifest from conan or package.
Asserts in this file only added because there are present in other methods, to be consistent, not really related to the revisions feature.
@@ -841,12 +841,14 @@ func (mod *modContext) getProperties(properties []*schema.Property, lang string, } docProperties = append(docProperties, property{ + ID: strings.ToLower(propLangName), DisplayName: wbr(propLangName), Name: propLangName, Comment: prop.Comment, DeprecationMessage: prop.DeprecationMessage, IsRequired: prop.IsRequired, IsInput: input, + Link: strings.ToLower("#" + propLangName), Type: mod.typeString(prop.Type, lang, characteristics, true), }) }
[getTSLookupParams->[getLanguageModuleName],genLookupParams->[getTSLookupParams,getCSLookupParams,getGoLookupParams],genConstructorGo->[getLanguageModuleName],getConstructorResourceInfo->[getLanguageModuleName],genNestedTypes->[getLanguageModuleName,typeString],getGoLookupParams->[getLanguageModuleName],genConstructorCS->[getLanguageModuleName,typeString],genConstructors->[genConstructorTS,genConstructorGo,genConstructorCS],getProperties->[typeString],genConstructorTS->[getLanguageModuleName],cleanTypeString->[cleanTypeString,getLanguageModuleName],getCSLookupParams->[getLanguageModuleName],typeString->[cleanTypeString,getLanguageModuleName,typeString],genResource->[genNestedTypes,genLookupParams,genConstructors,getProperties,getConstructorResourceInfo,genResourceHeader],gen->[getModuleFileName,genResource,add],genIndex->[getModuleFileName,getLanguageLinks],getLanguageLinks->[getLanguageModuleName],getNestedTypes->[contains,getNestedTypes,add],getTypes->[getNestedTypes],gen]
getProperties returns a slice of properties for the given language DevTools. GetProperties.
It's possible the same property name is going to show up on the same page (e.g. on the resource itself and on one of the supporting types). Should we prefix the type name to help ensure the id is unique?
@@ -34,14 +34,6 @@ public class ZooKeeperEmbeddedTest { private static final long CONNECT_TIMEOUT_MS = (int) EmbeddedSingleNodeKafkaCluster .ZK_CONNECT_TIMEOUT.toMillis(); - /** - * Test is only valid if Jetty is on the class path: - */ - @SuppressWarnings("unused") - private final org.eclipse.jetty.server.Connector ensureClassOnClassPath = null; - @SuppressWarnings("unused") - private final org.eclipse.jetty.servlet.ServletContextHandler ensureClassOnClassPath2 = null; - @Test public void shouldSupportMultipleInstancesRunning() throws Exception { // Given:
[ZooKeeperEmbeddedTest->[assertCanConnect->[ZooKeeper,RuntimeException,getMessage,getState,assertThat,println,await,printStackTrace,close,connectString,CountDownLatch,countDown],shouldSupportMultipleInstancesRunning->[ZooKeeperEmbedded,assertCanConnect,stop],toMillis]]
Checks whether there are multiple instances running in the cluster.
Out of curiosity, do you understand what these were originally for?
@@ -304,6 +304,15 @@ class PendingWithdrawState: recipient_metadata: Optional[AddressMetadata] = None +@dataclass +class CoopSettleState: + total_withdraw_participant: WithdrawAmount + total_withdraw_partner: WithdrawAmount + expiration: BlockExpiration + partner_signature_request: Optional[Signature] = None + partner_signature_confirmation: Optional[Signature] = None + + @dataclass class NettingChannelEndState(State): """The state of one of the nodes in a two party netting channel."""
[make_empty_pending_locks_state->[PendingLocksState]]
The state of one of the nodes in a two party netting channel. A list of the pending locks in order of insertion. Used for calculating the locksroot.
The usual naming pair is `our` (instead of `participant`) and `partner`.
@@ -105,9 +105,11 @@ class Site < ApplicationRecord end end + # If the organization_id corresponds to a municipality ID, + # this method will return an instance of INE::Places::Place def place - @place ||= if self.municipality_id && self.location_name - INE::Places::Place.find self.municipality_id + @place ||= if self.organization_id + INE::Places::Place.find(self.organization_id) end end
[Site->[configuration->[new],site_configuration_attributes->[merge!,new_record?,read_attribute,id,tap],to_s->[name],store_configuration->[instance_values,configuration_data],added_modules_after_update->[has_key?,configuration_data,attribute_before_last_save,wrap],gobierto_people_settings->[gobierto_people_enabled?,find_by],slug->[parameterize],initialize_admins->[admins,preset],find_by_allowed_domain->[include?,find_by],alphabetically_sorted->[reverse],gobierto_budgets_settings->[gobierto_budgets_enabled?,find_by],run_seeder->[each,seed,any?,saved_change_to_attribute?],place->[location_name,find,municipality_id],password_protected?->[draft?],reserved_domains->[fetch,map],location_required->[add,t,any?,blank?],include,enum,after_save,before_save,validate,scope,validates,translates,serialize,before_create,has_many,order,private_class_method]]
Returns the first missing object in the hierarchy of GobiertoBudgets.
Redundant self detected.
@@ -147,5 +147,17 @@ namespace Dynamo.Tests } } + [Test] + public void MAGN_7146() + { + Assert.DoesNotThrow(() => + { + // MAGN-7146 was saved after a crash- Test is to check if it reopens as manual after a crash + var ws = Open<HomeWorkspaceModel>(TestDirectory, crashProtDir, "MAGN_7146.dyn"); + var a = ws.RunSettings.RunType; + Assert.AreEqual(ws.RunSettings.RunType, RunType.Manual); + }); + + } } }
[CrashProtectionTests->[RunAutoFileWithFalseFlagOpensInManual->[AssertManual],RunAutoFileWithSuccessfulRunSavesFlag->[AssertAuto],RunAutoFileWithTrueFlagOpensInAuto->[AssertAuto],RunAutoFileWithoutFlagOpensInManual->[AssertManual]]]
Tests if the flag is set after run successfully completes.
@monikaprabhu: Thanks for adding. Just note that this behavior will work for any file with the Manula Flag set, not just VM related files
@@ -157,10 +157,9 @@ func (e *DockerRegistryServiceController) waitForDockerURLs(ready chan<- struct{ return } - // after syncing, determine the current state and assume that we're up to date for it if you don't do this, - // you'll get an initial storm as you mess with all the dockercfg secrets every time you startup + // after syncing, make sure the dockercfgController is up to date before releasing it + // this controller will do a single scan of all existing secrets to be sure that they're up to date urls := e.getDockerRegistryLocations() - e.setRegistryURLs(urls...) e.dockercfgController.SetDockerURLs(urls...) close(e.dockerURLsInitialized) close(ready)
[syncRegistryLocationChange->[getDockerRegistryLocations,getRegistryURLs,setRegistryURLs],syncSecretUpdate->[getRegistryURLs]]
waitForDockerURLs waits for all the Docker urls to be initialized.
@mfojtik this is the line you found before. Removing this causes us to rescan all secrets once and then only on changes thereafter, right?
@@ -269,7 +269,7 @@ class TorchClassifierAgent(TorchAgent): return self.model.eval() - scores = self.score(batch) + scores = self.score(batch).float() probs = F.softmax(scores, dim=1) if self.threshold is None: _, prediction_id = torch.max(probs.cpu(), 1)
[TorchClassifierAgent->[add_cmdline_args->[add_cmdline_args],eval_step->[_get_labels,_format_interactive_output,_update_confusion_matrix],report->[_report_prec_recall_metrics],train_step->[_get_labels,_update_confusion_matrix]]]
Train on a single batch of examples. Train on a single batch of examples.
you generally want .float and then a `.type_as` later on, to switch BACK to fp16 if needed.
@@ -96,11 +96,7 @@ public class NettyClient extends AbstractClient { //.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, getTimeout()) .channel(NioSocketChannel.class); - if (getConnectTimeout() < 3000) { - bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 3000); - } else { - bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, getConnectTimeout()); - } + bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Math.max(3000,getConnectTimeout())); bootstrap.handler(new ChannelInitializer() {
[NettyClient->[doConnect->[getRemoteAddress,getMessage,getLocalHost,cause,info,channel,getVersion,isSuccess,getUrl,awaitUninterruptibly,isConnected,getConnectAddress,RemotingException,currentTimeMillis,close,isInfoEnabled,getConnectTimeout,connect,isClosed,removeChannelIfDisconnected],doOpen->[initChannel->[addLast,NettyCodecAdapter,getProperty,parseInt,getCodec,Socks5ProxyHandler,InetSocketAddress,getUrl,addFirst,getHeartbeat],getConnectTimeout,Bootstrap,handler,NettyClientHandler,ChannelInitializer,getUrl,option,channel],getChannel->[getUrl,isActive,getOrAddChannel],doDisConnect->[removeChannelIfDisconnected,getMessage,warn],DefaultThreadFactory,NioEventLoopGroup,getLogger,wrapChannelHandler]]
Open the NettyClientHandler.
It's better to keep code style same as other part. I suggest add a space before `getConnectTimeout()`
@@ -69,6 +69,12 @@ class Plugin_Locator { public function find_using_request_action( $allowed_actions ) { // phpcs:disable WordPress.Security.NonceVerification.Recommended + $woo_activating_plugins = $this->find_activating_plugins_in_woo_request(); + + if ( $woo_activating_plugins ) { + return $this->convert_plugins_to_paths( $woo_activating_plugins ); + } + /** * Note: we're not actually checking the nonce here because it's too early * in the execution. The pluggable functions are not yet loaded to give
[Plugin_Locator->[find_using_request_action->[convert_plugins_to_paths],find_using_option->[convert_plugins_to_paths],convert_plugins_to_paths->[find_directory_with_autoloader],find_current_plugin->[find_directory_with_autoloader]]]
Finds the actions that are allowed to be performed in the request.
It would be more consistent with the rest of the `Plugin_Locator` classes to have this as a public method called in the `Plugins_Handler` class.
@@ -507,7 +507,12 @@ namespace Dynamo.PackageManager private void ThisPropertyChanged(object sender, PropertyChangedEventArgs e) { - if (e.PropertyName == "PackageContents") CanSubmit(); + if (e.PropertyName == "PackageContents") + { + CanSubmit(); + BeginInvoke( + (Action)(() => (SubmitCommand).RaiseCanExecuteChanged())); + } } public static PublishPackageViewModel FromLocalPackage(DynamoViewModel dynamoViewModel, Package l)
[PublishPackageViewModel->[GetAllDependencies->[AllDependentFuncDefs,AllFuncDefs],AddDllFile->[AddAdditionalFile],UploadHandleOnPropertyChanged->[OnPublishSuccess],BeginInvoke->[BeginInvoke],GetAllNodeNameDescriptionPairs->[AllFuncDefs]]]
This method is called when a property of a package has changed. get the VM object from the parts.
Why is BeginInvoke required here? Why can we not call RaiseCanExecuteChanged directly? We might want to ask @pboyer whether `BeginInvoke` is required. `BeginInvoke` simply calls the action provided on the the `Dispatcher` belonging to the `DynamoViewModel`. This suggests to me that there are times when this view model would be created NOT on the UI thread. When does that happen?
@@ -1157,7 +1157,7 @@ namespace Dynamo.Models // If the path has a .dll or .ds extension it is a locally imported library if (extension == ".dll" || extension == ".ds") { - LibraryServices.ImportLibrary(path); + LibraryServices.ImportLibrary(path, true); continue; }
[DynamoModel->[InitializeNodeLibrary->[InitializeIncludedNodes],ForceRun->[ResetEngine],Paste->[Paste,Copy],RemoveWorkspace->[Dispose],UngroupModel->[DeleteModelInternal],ResetEngine->[ResetEngine],ShutDown->[OnShutdownCompleted,OnShutdownStarted,ShutDown],OpenXmlFileFromPath->[OnWorkspaceOpening],ResetEngineInternal->[RegisterCustomNodeDefinitionWithEngine,Dispose],SetPeriodicEvaluation->[ResetEngine],DumpLibraryToXml->[DumpLibraryToXml],AddWorkspace->[OnWorkspaceSaved],AddZeroTouchNodeToSearch->[AddZeroTouchNodeToSearch],AddHomeWorkspace->[RegisterHomeWorkspace],DeleteModelInternal->[Dispose],Dispose->[Dispose],SaveBackupFiles->[OnRequestWorkspaceBackUpSave]]]
Initialize the node library.
is this ever false?
@@ -135,8 +135,12 @@ function react(comment) { var reactButton = `<button class="crayons-btn crayons-btn--ghost crayons-btn--icon-left crayons-btn--s mr-1 reaction-like inline-flex reaction-button" id="button-for-comment-${ comment.id }" data-comment-id="${ comment.id }"> ${ iconSmallHeart } ${ iconSmallHeartFilled } - <span class="reactions-count" id="reactions-count-${ comment.id }">${ num }</span> - <span class="reactions-label hidden m:inline-block">like</span> + ${ + i18next.t('comments.num_likes', { + num: `<span class="reactions-count" id="reactions-count-${ comment.id }">${ num }</span>`, + likes: `<span class="reactions-label hidden m:inline-block">${i18next.t('comments.likes', { count: num })}</span>` + }) + } </button>`; return reactButton;
[No CFG could be retrieved]
Creates a button that displays a count of reaction like reaction.
This is not an ideal composition because it does not allow a language where the `likes` part wraps around `num`, but since the element gets update programmatically IIRC, we'd need to rewire the logic a bit.
@@ -154,7 +154,7 @@ func TopicFiltersForRunLog(jobID string) [][]common.Hash { hexJobID := common.BytesToHash([]byte(jobID)) jobIDZeroPadded := common.BytesToHash(common.RightPadBytes(hexutil.MustDecode("0x"+jobID), utils.EVMWordByteLen)) // RunLogTopic AND (0xHEXJOBID OR 0xJOBID0padded) - return [][]common.Hash{{RunLogTopic}, nil, {hexJobID, jobIDZeroPadded}} + return [][]common.Hash{{RunLogTopic}, {hexJobID, jobIDZeroPadded}} } // StartRunLogSubscription starts an InitiatorSubscription tailored for use with RunLogs.
[ToDebug->[ForLogger],ValidateRunLog->[ForLogger],Unsubscribe->[Unsubscribe],Unsubscribe]
StartRunLogSubscription starts a new subscription for the given job. receiveRunLog receives the log from the initiator and runs the job specific to the initiator.
`jobIDZeroPadded` is a little bit of a weird name for a `Hash`, because I think of zero padding strings, but not hashes (which are byte arrays).
@@ -294,6 +294,12 @@ func (rm *resmon) RegisterResource(ctx context.Context, custom := req.GetCustom() parent := resource.URN(req.GetParent()) protect := req.GetProtect() + + dependencies := make([]resource.URN, 0) + for _, dependingURN := range req.GetDependencies() { + dependencies = append(dependencies, resource.URN(dependingURN)) + } + props, err := plugin.UnmarshalProperties( req.GetObject(), plugin.MarshalOptions{Label: label, KeepUnknowns: true, ComputeAssetHashes: true}) if err != nil {
[Invoke->[Package,GetArgs,Wrapf,Invoke,Infof,UnmarshalProperties,Sprintf,Provider,GetTok,V,Errorf,ModuleMember,MarshalProperties],RegisterResourceOutputs->[Wrapf,Infof,UnmarshalProperties,Sprintf,GetUrn,GetOutputs,New,V,URN],Next->[Infof,V,Goal,Assert,Outputs,URN],RegisterResource->[GetType,GetParent,Infof,UnmarshalProperties,Sprintf,GetObject,GetProtect,All,V,GetName,NewGoal,QName,MarshalProperties,Type,GetCustom,URN],forkRun->[IgnoreClose,Wrapf,Decrypt,Assertf,Errorf,LanguageRuntime,Address,Run],Close->[Cancel],Iterate->[Wrap,forkRun],Sprintf,RegisterResourceMonitorServer,Serve]
RegisterResource registers a resource with the resource monitor unpack the response into a RegisterResourceResponse object that can be sent back to the language runtime.
nit: `dependencies := []resource.URN{}` Or if you want to be very slightly more efficient by avoiding reallocations in `append`, you could do `dependencies := make([]resource.URN, 0, len(req.GetDependencies()))`
@@ -76,8 +76,9 @@ module Engine # if set ability must be a :train_discount ability def min_price(ability: nil) return 1 unless from_depot? + return @price unless ability - ability&.discounted_price(self, @price) || @price + Array(ability).map { |a| a.discounted_price(self, @price) }.min end def from_depot?
[Train->[init_variants->[transform_values,unshift],min_price->[discounted_price,from_depot?],from_depot?->[is_a?],local?->[is_a?,include?,find],price->[name,dig],initialize->[select,init_variants],names_to_prices->[transform_values],include,remove_instance_variable,attr_writer,attr_accessor,instance_variable_set,attr_reader,each],require_relative]
Returns the minimum price of a node in the depot.
Add support for private companies that have multiple train discount abilities.
@@ -113,3 +113,7 @@ func (r *indexerNotifier) NotifyIssueChangeContent(doer *models.User, issue *mod func (r *indexerNotifier) NotifyIssueChangeTitle(doer *models.User, issue *models.Issue, oldTitle string) { issue_indexer.UpdateIssueIndexer(issue) } + +func (r *indexerNotifier) NotifyIssueChangeRef(doer *models.User, issue *models.Issue, oldRef string) { + issue_indexer.UpdateIssueIndexer(issue) +}
[NotifyNewPullRequest->[UpdateIssueIndexer],NotifyNewIssue->[UpdateIssueIndexer],NotifyIssueChangeTitle->[UpdateIssueIndexer],NotifyIssueChangeContent->[UpdateIssueIndexer],NotifyDeleteRepository->[DeleteRepoFromIndexer,DeleteRepoIssueIndexer],NotifyCreateIssueComment->[LoadDiscussComments,UpdateIssueIndexer,Error],NotifyUpdateComment->[LoadDiscussComments,UpdateIssueIndexer,Error],NotifyDeleteComment->[LoadDiscussComments,UpdateIssueIndexer,Error,LoadIssue]]
NotifyIssueChangeTitle notifies issue that the title of an issue has changed.
Since the branch name will not be indexed, I don't think we need to do the reindex.
@@ -367,9 +367,15 @@ async def find_binary(request: BinaryPathRequest) -> BinaryPaths: fi """ ) - script_digest = await Get( - Digest, - CreateDigest([FileContent(script_path, script_content.encode(), is_executable=True)]), + + # We get a UUID so that we ignore the cache every time, as this script depends on the state of + # the external environment. + script_digest, uuid = await MultiGet( + Get( + Digest, + CreateDigest([FileContent(script_path, script_content.encode(), is_executable=True)]), + ), + Get(UUID, UUIDRequest()), ) paths = []
[remove_platform_information->[FallibleProcessResult],fallible_to_exec_result_or_raise->[ProcessResult,ProcessExecutionFailure],InteractiveProcess->[from_process->[InteractiveProcess]],upcast_process->[MultiPlatformProcess],MultiPlatformProcess->[product_description->[ProductDescription]],find_binary->[BinaryPaths,Process]]
Find a binary in the system.
IIRC this will cause every rule that depends on this product to rerun every time. Is that really what we want?
@@ -178,6 +178,7 @@ public final class ProMoveUtils { final GameData data = ProData.getData(); + final HashMap<Territory, ArrayList<Move>> movesMap = new HashMap<>(); // Loop through all territories to attack for (final Territory t : attackMap.keySet()) {
[ProMoveUtils->[calculateMoveRoutes->[equals,getUnits,test,addAll,warn,getRouteForUnit,unitIsSea,containsKey,unitIsAir,territoryCanMoveLandUnitsThrough,getSecond,of,unitIsLand,carrierMustMoveWith,isEmpty,toSet,allMatch,getRound,anyMatch,getData,territoryCanMoveSeaUnitsThrough,getName,getFirst,get,contains,collect,territoryCanMoveAirUnitsAndNoAa,add,keySet],calculateBombingRoutes->[equals,getBombers,getData,get,isEmpty,territoryCanMoveAirUnitsAndNoAa,add,getRouteForUnit,unitIsAir,keySet,allMatch],calculateBombardMoveRoutes->[equals,getData,get,territoryCanMoveSeaUnitsThrough,isEmpty,unitCanBeMovedAndIsOwnedSea,add,getRouteForUnit,values,keySet,allMatch],calculateAmphibRoutes->[equals,isTransporting,addAll,test,remove,warn,put,getNeighbors,singletonList,Route,isEmpty,validateCanal,intValue,isWater,getRound,getData,territoryCanMoveSeaUnitsThrough,getDistance,getDistance_IgnoreEndForCondition,getName,get,add,getAmphibAttackMap,keySet],doMove->[pause,size,equals,getName,getEnd,getData,get,move,doMove,addAll,remove,getStart,warn,getRound]]]
Calculate the move routes and move routes for a given player. Add a unit to the list of remaining units to move. find the next node in the network and add it to the routing table.
I'd appreciate if you could try to use interface types wherever possible, I.e. `Map` instead of `HashMap` or `List` instead of `ArrayList`. This way we could easily swap out the implementation at a later time if it no longer servers our needs
@@ -109,4 +109,15 @@ public class ConfigurationOverrideTest extends AbstractInfinispanTest { assertEquals(51, clusteringCfg.hash().numSegments()); } + public void testOverrideWithStore() { + final ConfigurationBuilder builder1 = new ConfigurationBuilder(); + builder1.loaders().addStore(DummyInMemoryCacheStoreConfigurationBuilder.class); + cm = new DefaultCacheManager(new GlobalConfigurationBuilder().build(), builder1.build()); + ConfigurationBuilder builder2 = new ConfigurationBuilder(); + builder2.read(cm.getDefaultCacheConfiguration()); + builder2.eviction().maxEntries(1000); + Configuration configuration = cm.defineConfiguration("named", builder2.build()); + assertEquals(1, configuration.loaders().cacheLoaders().size()); + } + } \ No newline at end of file
[ConfigurationOverrideTest->[testConfigurationOverride->[defineConfiguration,ConfigurationBuilder,build,strategy,getCache,maxEntries,read,assertEquals,createCacheManager],testOldConfigurationOverride->[defineConfiguration,getEvictionMaxEntries,getEvictionStrategy,build,getCache,getConfiguration,assertEquals,createCacheManager],testSimpleDistributedClusterModeDefault->[numOwners,ConfigurationBuilder,getCache,createClusteredCacheManager,cacheMode,numSegments,assertEquals,clustering],testSimpleDistributedClusterModeNamedCache->[numOwners,defineConfiguration,build,getCache,createClusteredCacheManager,cacheMode,numSegments,assertEquals,clustering],stopCacheManager->[stop]]]
Test simple distributed clustering mode with named cache.
the test does not assert anything... is this ok?
@@ -973,7 +973,7 @@ namespace System.Net.Test.Common { Assert.Equal(FrameType.RstStream, frame.Type); } - } while (frame.Type != FrameType.RstStream); + } while (frame.Type != FrameType.RstStream || (frame is RstStreamFrame rstStreamFrame && rstStreamFrame.ErrorCode != (int)ProtocolErrors.CANCEL)); Assert.Equal(streamId, frame.StreamId); }
[Http2LoopbackConnection->[DecodeInteger->[DecodeInteger],DecodeHeader->[DecodeInteger,DecodeLiteralHeader],ReadAndParseRequestHeaderAsync->[DecodeHeader,ReadRequestHeadersFrames],ReadPingAsync->[ReadPingAsync],DecodeLiteralHeader->[DecodeString,DecodeInteger],DecodeString->[DecodeInteger],ReadRequestBodyAsync->[ReadBodyAsync],Task->[ReadFrameAsync,IgnoreWindowUpdates,ShutdownSend],CreateAsync->[CreateAsync]]]
Blocks until the next frame is read.
We should not receive any RstStream frame other than one containing the CANCEL. As written, this will just ignore a RstStream that doesn't contain CANCEL.
@@ -4,7 +4,6 @@ # ------------------------------------ from typing import TYPE_CHECKING -from azure.core.configuration import Configuration from azure.core.pipeline import Pipeline from azure.core.pipeline.policies import UserAgentPolicy, DistributedTracingPolicy from azure.core.pipeline.transport import RequestsTransport
[KeyVaultClientBase->[__init__->[_create_config]]]
Creates a configuration object for a Key Vault client. Set logging_policy retry_policy and custom_hook_policy if specified.
This is used in type checking (so it should be in the `if TYPE_CHECKING` block).
@@ -381,13 +381,9 @@ JsValueRef WScriptJsrt::LoadScript(JsValueRef callee, LPCSTR fileName, LPCSTR fi IfJsrtErrorSetGo(ChakraRTInterface::JsSetCurrentContext(calleeContext)); -#if ENABLE_TTD - errorCode = ChakraRTInterface::JsTTDRunScript(-1, fileContent, GetNextSourceContext(), fullPathNarrow, &returnValue); -#else errorCode = ChakraRTInterface::JsRunScriptUtf8(fileContent, GetNextSourceContext(), fullPathNarrow, &returnValue); -#endif - if (errorCode == JsNoError) + if(errorCode == JsNoError) { errorCode = ChakraRTInterface::JsGetGlobalObject(&returnValue); }
[No CFG could be retrieved]
This function is called when a script is loaded from a file. Sets the current context to the new context.
> if( [](start = 8, length = 3) nit: preferred style throughout our codebase is `if (` (with a space).
@@ -1129,10 +1129,10 @@ public class NoteEditor extends AnkiActivity { View editline_view = getLayoutInflater().inflate(R.layout.card_multimedia_editline, null); FieldEditText newTextbox = (FieldEditText) editline_view.findViewById(R.id.id_note_editText); - // Use custom implementation of ActionMode.Callback customize selection and insert menus - ActionModeCallback actionModeCallback = new ActionModeCallback(newTextbox); - newTextbox.setCustomSelectionActionModeCallback(actionModeCallback); if (Build.VERSION.SDK_INT >= 23) { + // Use custom implementation of ActionMode.Callback customize selection and insert menus + ActionModeCallback actionModeCallback = new ActionModeCallback(newTextbox); + newTextbox.setCustomSelectionActionModeCallback(actionModeCallback); newTextbox.setCustomInsertionActionModeCallback(actionModeCallback); }
[NoteEditor->[onStop->[onStop],SetNoteTypeListener->[onItemSelected->[setNote,updateDeckPosition,resetEditFields,duplicateCheck]],closeCardEditorWithCheck->[hasUnsavedChanges],onActivityResult->[closeNoteEditor,onActivityResult],onCollectionLoaded->[onCollectionLoaded],onKeyUp->[onKeyUp],onSaveInstanceState->[onSaveInstanceState],onDestroy->[onDestroy],onCreate->[onCreate],onOptionsItemSelected->[onOptionsItemSelected,saveNote],closeNoteEditor->[closeNoteEditor],setNote->[setNote,populateEditFields],updateFieldsFromMap->[populateEditFields,updateCards,getKeyByValue],onCreateOptionsMenu->[onCreateOptionsMenu],EditNoteTypeListener->[onItemSelected->[updateFieldsFromMap,updateCards,updateTags,populateEditFields]],populateEditFields->[populateEditFields]]]
Populates the edit fields with the given fields. Add the multimedia editor button to the fields layout.
this line is duplicated now?
@@ -591,7 +591,7 @@ public class StreamingModeExecutionContext extends DataflowExecutionContext<Step timerId, "", cleanupTime, - cleanupTime, + window.maxTimestamp(), TimeDomain.EVENT_TIME); }
[StreamingModeExecutionContext->[fetchSideInput->[fetchSideInput],StreamingModeSideInputReader->[isEmpty->[isEmpty],contains->[contains],of->[StreamingModeSideInputReader]],flushState->[getSerializedKey,flushState],StepContext->[start->[getSerializedKey,getWorkToken],addBlockingSideInputs->[addBlockingSideInput],getSideInputNotifications->[getSideInputNotifications],issueSideInputFetch->[fetchSideInput],getStateFamily],invalidateCache->[getSerializedKey],StreamingModeExecutionStateRegistry->[createState->[StreamingModeExecutionState]],getWorkToken->[getWorkToken],start->[start],UserStepContext->[addBlockingSideInput->[addBlockingSideInput],addBlockingSideInputs->[addBlockingSideInputs],getSideInputNotifications->[getSideInputNotifications],getNextFiredTimer->[getNextFiredUserTimer],stateInternals->[stateInternals],issueSideInputFetch->[issueSideInputFetch],timerInternals->[userTimerInternals]],getCachedReader->[getSerializedKey]]]
Sets a state cleanup timer.
We also seem to set GC timers in ReduceFnRunner.java. @kennknowles do you know why we have both?
@@ -83,7 +83,6 @@ list_set_item_op = method_op( error_kind=ERR_FALSE, emit=call_emit('CPyList_SetItem')) - # list.append(obj) list_append_op = method_op( name='append',
[emit_new->[enumerate,emit_line,len],emit_len->[emit_declaration,temp_name,emit_line],func_op,binary_op,custom_op,name_emit,call_emit,method_op,call_negative_bool_emit,name_ref_op]
Creates a list of objects with the specified index. Creates a list of objects from a list of objects.
I delete a blank line here and I find that one/two blank line(s) is used interchangeably. I am surprised that flake8 is OK with this
@@ -486,8 +486,15 @@ func (pt *programTester) testLifeCycleInitAndDestroy() error { } else { contract.IgnoreError(os.RemoveAll(tmpdir)) } - }() - } + } else { + // When tmpdir is empty, we ran "in tree", which means we wrote output + // to the "command-output" folder in the projdir, and we should clean + // it up if the test passed + if testFinished && !pt.t.Failed() { + contract.IgnoreError(os.RemoveAll(filepath.Join(projdir, commandOutputFolderName))) + } + } + }() err = pt.testLifeCycleInitialize(projdir) if err != nil {
[yarnCmd->[getYarnBin],testLifeCycleDestroy->[GetDebugUpdates,runPulumiCommand],testEdit->[previewAndUpdate],pulumiCmd->[getBin,GetDebugLogLevel],performExtraRuntimeValidation->[GetStackName,runPulumiCommand],copyTestToTemporaryDirectory->[GetStackName,getBin],previewAndUpdate->[GetDebugUpdates,runPulumiCommand],prepareGoProject->[getGoBin,runCommand],prepareProjectDir->[getProjinfo,prepareProject],runPulumiCommand->[pulumiCmd,runCommand],prepareNodeJSProject->[runYarnCommand],testLifeCycleInitialize->[GetStackName,runPulumiCommand],runYarnCommand->[yarnCmd,runCommand]]
testLifeCycleInitAndDestroy initializes and destroys the test project.
This is a bit scary, but perhaps better than leaving a dirty worktree?
@@ -362,7 +362,16 @@ class Concretizer(object): # compiler_for_spec Should think whether this can be more # efficient def _proper_compiler_style(cspec, aspec): - return spack.compilers.compilers_for_spec(cspec, arch_spec=aspec) + compilers = spack.compilers.compilers_for_spec( + cspec, arch_spec=aspec + ) + # If the spec passed as argument is concrete we want to check + # the versions match exactly + if (cspec.concrete and compilers and + cspec.version != compilers[0].version): + return [] + + return compilers if spec.compiler and spec.compiler.concrete: if (self.check_for_compiler_existence and not
[Concretizer->[_adjust_target->[target_from_package_preferences],adjust_target->[_make_only_one_call],concretize_compiler->[_proper_compiler_style,concretize_version],choose_virtual_or_external->[_valid_virtuals_and_externals]],concretize_specs_together->[make_concretization_repository]]
Checks if a compiler is already used for the given spec and if so uses it. Check if a node has abstract compiler information.
@alalazo if there are compilers with mixed `x.y` and `x.y.z` versions, should this filter the list instead of just checking one of the elements?