patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -142,7 +142,9 @@ function declareExtension(name, version, hasCss, opt_noTypeCheck, function buildExtensions(options) { for (var key in extensions) { var e = extensions[key]; - buildExtension(e.name, e.version, e.hasCss, options, e.extraGlobs); + var o = Object.assign({}, options); + o = Object.assign(o, e); + buildExtension(e.name, e.version, e.hasCss, o, e.extraGlobs); } }
[No CFG could be retrieved]
Declare and build the AMP extension. Compiles a single object into a single file.
Why the double assign? `var o = Object.assign({}, options, e)`
@@ -161,7 +161,7 @@ def backtest(args) -> DataFrame: if record and record.find('trades') >= 0: logger.info('Dumping backtest results') misc.file_dump_json('backtest-result.json', records) - labels = ['currency', 'profit_percent', 'profit_BTC', 'duration', 'profit', 'loss'] + labels = ['currency', 'profit_percent', 'profit_BTC', 'duration'] return DataFrame.from_records(trades, columns=labels)
[start->[get_timeframe,generate_text_table,backtest],backtest->[get_trade_entry]]
Returns a DataFrame of the n - th order order records. Create a DataFrame of records from the backtest. This function is called from the command line. It will generate the text table and print the.
Why the drop?
@@ -5534,7 +5534,13 @@ void Parser::ParseFncDeclHelper(ParseNodeFnc * pnodeFnc, LPCOLESTR pNameHint, us } } - if (isTopLevelDeferredFunc || (m_InAsmMode && m_deferAsmJs)) + if (isTopLevelDeferredFunc || + (m_InAsmMode && m_deferAsmJs) || + (buildAST && + pnodeFnc->IsNested() && + pnodeFncSave != nullptr && + m_currDeferredStub != nullptr && + (m_currDeferredStub + (pnodeFncSave->nestedCount - 1))->ichMin == pnodeFnc->ichMin)) { fDeferred = true;
[No CFG could be retrieved]
Checks if a variable is found in the list of arguments. Check if the current function is a nested function declaration or expression.
Does the fact that we have a stub for a function necessarily mean that we want to defer it? I think we need to leave the deferral decision alone here and instead change the way parsing makes use of the stubs *if* we choose to defer.
@@ -53,7 +53,8 @@ class CustomFieldsController < ApplicationController format.json do render json: { html: render_to_string( - partial: 'samples/delete_custom_field_modal_body.html.erb' + partial: 'samples/delete_custom_field_modal_body.html.erb', + locals: { column_index: params[:column_index] } ) } end
[CustomFieldsController->[check_create_permissions->[can_create_custom_field_in_organization],create->[organization_custom_field_path,new,respond_to,render,organization_custom_field_destroy_html_path,user,to_json,json,name,save,id,organization],destroy_html->[render_to_string,render,respond_to,json],load_vars->[find_by_id],load_vars_nested->[find_by_id],check_destroy_permissions->[can_delete_custom_field],check_update_permissions->[can_edit_custom_field],update->[render,respond_to,update_attributes,to_json,json,save],custom_field_params->[permit],destroy->[destroy,render,respond_to,json],before_action]]
Renders the html of a single custom field item with a status of 200.
Line is too long. [106/80]
@@ -104,9 +104,16 @@ public class AccessResource extends ApplicationResource { private static final String OIDC_REQUEST_IDENTIFIER = "oidc-request-identifier"; private static final String OIDC_ERROR_TITLE = "Unable to continue login sequence"; + private static final String OPEN_ID_CONNECT_SUPPORT_IS_NOT_CONFIGURED_MSG = "OpenId Connect support is not configured"; + private static final String REVOKE_ACCESS_TOKEN_LOGOUT = "oidc_access_token_logout"; + private static final String ID_TOKEN_LOGOUT = "oidc_id_token_logout"; + private static final String STANDARD_LOGOUT = "oidc_standard_logout"; + private static final Pattern REVOKE_ACCESS_TOKEN_LOGOUT_FORMAT = Pattern.compile("(.google.com)"); + private static final Pattern ID_TOKEN_LOGOUT_FORMAT = Pattern.compile("(.okta)"); private static final String AUTHENTICATION_NOT_ENABLED_MSG = "User authentication/authorization is only supported when running over HTTPS."; + private X509CertificateExtractor certificateExtractor; private X509AuthenticationProvider x509AuthenticationProvider; private X509PrincipalExtractor principalExtractor;
[AccessResource->[getCookieValue->[getValue,getName,equals],removeOidcRequestCookie->[Cookie,setHttpOnly,setSecure,setPath,setMaxAge,addCookie],knoxCallback->[isSecure,isKnoxEnabled,getNiFiUri,forwardToMessagePage,sendRedirect],getAccessStatus->[getMessage,authenticate,setIdentity,getIdentity,JwtAuthenticationRequestToken,X509AuthenticationRequestToken,name,getNiFiUser,getRemoteAddr,AuthenticationNotSupportedException,AccessDeniedException,isSecure,AccessStatusDTO,InvalidAuthenticationException,substringAfterLast,AdministrationException,AccessStatusEntity,build,extractClientCertificate,setMessage,setAccessStatus,getHeader,setStatus],createDownloadToken->[isSecure,generateDownloadToken,create,generateResourceUri,build,IllegalStateException,getIdentity,AccessDeniedException,OtpAuthenticationToken,getNiFiUser],logOut->[isSecure,getMessage,error,build,IllegalStateException,getNiFiUserIdentity,isEmpty,info,getHeader,logOutUsingAuthHeader],oidcLogout->[isSecure,getEndSessionEndpoint,generateResourceUri,build,IllegalStateException,sendRedirect,toString,isOidcEnabled],getOidcCallback->[generateResourceUri],validateTokenExpiration->[convert,format,warn],getNiFiUri->[generateResourceUri,substringBeforeLast],forwardToMessagePage->[setAttribute,getContext,forward],knoxLogout->[generateResourceUri,sendRedirect],oidcExchange->[isSecure,AuthenticationNotSupportedException,getCookieValue,IllegalArgumentException,getJwt,removeOidcRequestCookie,build,IllegalStateException,getCookies,isOidcEnabled],createAccessToken->[getMessage,create,authenticate,generateResourceUri,getIdentity,getIssuer,mapIdentity,isBlank,IllegalArgumentException,LoginCredentials,getIdentityMappings,LoginAuthenticationToken,AuthenticationNotSupportedException,validateTokenExpiration,IllegalStateException,isSecure,build,AdministrationException,generateSignedToken,getExpiration],createAccessTokenFromTicket->[getTimeDuration,validateKerberosTicket,create,getMessage,generateResourceUri,mapIdentity,IllegalArgumentException,isKerberosSpnegoSupportEnabled,getIdentityMappings,LoginAuthenticationToken,getKerberosAuthenticationExpiration,AuthenticationNotSupportedException,validateTokenExpiration,IllegalStateException,AccessDeniedException,isValidKerberosHeader,isSecure,getName,build,generateSignedToken,getHeader],oidcCallback->[getCookieValue,create,getMessage,removeOidcRequestCookie,AuthorizationCodeGrant,indicatesSuccess,isOidcEnabled,getRequestUri,exchangeAuthorizationCode,getOidcCallback,isStateValid,error,getState,getNiFiUri,parse,forwardToMessagePage,isSecure,getDescription,sendRedirect,getCookies,getAuthorizationCode],createUiExtensionToken->[isSecure,AuthenticationNotSupportedException,create,generateResourceUri,build,generateUiExtensionToken,getIdentity,AccessDeniedException,OtpAuthenticationToken,getNiFiUser],oidcRequest->[isSecure,Cookie,createState,setHttpOnly,setSecure,build,forwardToMessagePage,sendRedirect,setPath,toString,setMaxAge,isOidcEnabled,addCookie],getLoginConfig->[isSecure,build,setConfig,AccessConfigurationEntity,AccessConfigurationDTO,setSupportsLogin],knoxRequest->[isSecure,isKnoxEnabled,build,generateResourceUri,forwardToMessagePage,sendRedirect,toString],getLogger]]
This class is used to provide a RESTful interface to obtain an access token or checking access Get NiFi Access Configuration.
These two Patterns as written appear to be more permissive than intended since the period character class will match any single character. Is that the intent?
@@ -18,12 +18,14 @@ class Geopm(AutotoolsPackage): msr-safe kernel module by your administrator.""" homepage = "https://geopm.github.io" - url = "https://github.com/geopm/geopm/releases/download/v0.4.0/geopm-0.4.0.tar.gz" + url = "https://github.com/geopm/geopm/releases/download/v1.0.0/geopm-1.0.0.tar.gz" git = "https://github.com/geopm/geopm.git" # Add additional proper versions and checksums here. "spack checksum geopm" version('develop', branch='dev') version('master', branch='master') + version('1.1.0', sha256='5f9a4df37ef0d64c53d64829d46736803c9fe614afd8d2c70fe7a5ebea09f88e') + version('1.0.0', sha256='24fe72265a7e44d62bdfe49467c49f0b7a649131ddda402d763c00a49765e1cb') version('1.0.0-rc2', sha256='c6637df54728ded31fd682f39a07dffee45883f350e6dbd13e1496dd50243ffd', url='https://github.com/geopm/geopm/releases/download/v1.0.0%2Brc2/geopm-1.0.0+rc2.tar.gz') version('1.0.0-rc1', sha256='f8a2e5c384a15e9663f409de478b6372cd63e63a28d4701a33ac043fc27905e0',
[Geopm->[configure_args->[Version,enable_or_disable,with_or_without,extend],variant,depends_on,version]]
GEOPM is an extensible power management framework targeting HPC and a tool to Get a list of variants of all the possible version of a sequence.
You can probably remove these release candidates if you want. They shouldn't be needed now that 1.0 is out.
@@ -186,8 +186,9 @@ def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0, logger.info("Number of ECG events detected : %d (average pulse %d / " "min.)" % (n_events, average_pulse)) - ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events), - event_id * np.ones(n_events)] + ecg_events = np.array([ecg_events + raw.first_samp, + np.zeros(n_events, int), + event_id * np.ones(n_events, int)]).T return ecg_events, idx_ecg, average_pulse
[find_ecg_events->[qrs_detector],create_ecg_epochs->[find_ecg_events]]
Find ECG peaks and events. Get the index of the ECG channel and the average pulse of the last non - zero.
np.c_ and np.r_ is not recommended anymore?
@@ -524,6 +524,16 @@ angular.module('ngResource', ['ng']). // jshint +W018 if (action.isArray) { value.length = 0; + // Decorate the array with the properties on the response data + if (action.arrayDecorate) { + for (var i in data) { + if (data.hasOwnProperty(i) + && !/^[0-9]+$/.test(i) + ) { + value[i] = data[i]; + } + } + } forEach(data, function(item) { value.push(new Resource(item)); });
[No CFG could be retrieved]
The main entry point for the resource. resolves the promise if it s not already resolved.
I don't think this needs to be split up so much --- should probably declare the regexp higher up as a variable though
@@ -793,9 +793,11 @@ enum RedRidingHood SPELL_WIDE_SWIPE = 30761, CREATURE_BIG_BAD_WOLF = 17521, -}; -#define GOSSIP_GRANDMA "What phat lewtz you have grandmother?" + // Texts + GOSSIP_MENU_GRANDMOTHER = 7441, + GOSSIP_OPTION_GRANDMOTHER = 0 +}; class npc_grandmother : public CreatureScript {
[boss_tinhead->[boss_tinheadAI->[JustDied->[SummonCroneIfReady]]],boss_strawman->[boss_strawmanAI->[JustDied->[SummonCroneIfReady]]],UpdateAI->[Resurrect,AttackStart,PretendToDie],DamageTaken->[PretendToDie],boss_romulo->[boss_romuloAI->[DamageTaken->[PretendToDie],EnterCombat->[AttackStart],UpdateAI->[Resurrect]]],boss_roar->[boss_roarAI->[JustDied->[SummonCroneIfReady]]],boss_julianne->[boss_julianneAI->[Reset->[Resurrect]]],boss_dorothee->[boss_dorotheeAI->[JustDied->[SummonCroneIfReady],Reset->[Initialize]]]]
The NPC_GRANDMOTHER script.
just use `GOSSIP_ICON_CHAT` for this
@@ -177,6 +177,12 @@ export class AmpAutocomplete extends AMP.BaseElement { buildCallback() { this.action_ = Services.actionServiceForDoc(this.element); this.viewport_ = Services.viewportForDoc(this.element); + const viewer = Services.viewerForDoc(this.getAmpDoc()); + this.ssrTemplateHelper_ = new SsrTemplateHelper( + TAG, + viewer, + this.templates_ + ); const jsonScript = this.element.querySelector( 'script[type="application/json"]'
[No CFG could be retrieved]
The developer specified value of the autocomplete attribute on the form ancestor that contains the input field. inputType attribute is text or search.
Use `this.element` for "forDoc" services where possible.
@@ -991,9 +991,15 @@ func IgnoreRequest(ctx context.Context, g *libkb.GlobalContext, teamname, userna return err } arg := apiArg(ctx, "team/deny_access") - arg.Args.Add("team", libkb.S{Val: teamname}) + arg.Args.Add("team", libkb.S{Val: teamName}) arg.Args.Add("uid", libkb.S{Val: uv.Uid.String()}) - _, err = g.API.Post(arg) + if _, err := g.API.Post(arg); err != nil { + t, err := GetForTeamManagementByStringName(ctx, g, teamName, true) + if err != nil { + return err + } + t.notify(ctx, keybase1.TeamChangeSet{Misc: true}) + } return err }
[MapUIDsToUsernamePackages,GetProofSet,SeitanIKeyV2,CTimeTracer,myRole,IsOpen,ChangeMembership,Post,New,InviteMember,deleteSubteam,AsTeam,NewLoadUserArgWithContext,Time,ImplicitAdmins,OpenTeamJoinAs,IsSubteam,TeamInviteTypeFromString,Stage,Generation,WithPublicKeyOptional,FindActiveInvite,Finish,UserVersionByUID,GenerateSignature,IsMember,Exists,Leave,GetBool,Now,PostTeamSettings,GetError,Add,GenerateAcceptanceKey,MemberRole,GetNormalizedName,ImplicitTeamDisplayName,CTrace,GetUPAKLoader,KBFSTLFID,InviteSeitan,IsWriterOrAbove,AddMemberByUsername,ToLower,InviteSeitanV2,NewLoadUserArg,IsTeamName,HasActiveInvite,WithTimeout,chain,ToTeamName,Rotate,ToTime,Members,WithForcePoll,ResolveFullExpressionNeedUsername,FieldsFunc,UsersWithRole,Eq,GenerateTeamInviteID,GetUsername,CDebugf,CTraceTimed,TeamID,WithNetContext,currentUserUV,TrimSpace,IsPublic,TeamInviteName,NewNormalizedUsername,ParseAddress,LoadMe,TeamInviteIDFromString,ToTeamID,InviteEmailMember,Name,postTeamInvites,AtKey,GetDecode,Sprintf,LoadV2,GenerateSIKey,String,AssociateWithTLFID,GetRunMode,GetUID,RootAncestorName,WithName,CWarningf,Delete,NormalizedUsername,NewHTTPArgs,ExportToTeamPlusApplicationKeys,CompleteSocialInvitesFor,IsRootTeam,deleteRoot,IsNil,SeitanIKey,Errorf,NewResolveThenIdentify2,ResolveWithBody,IsOrAbove,GetTeamLoader,Unix,RunEngine,IsTeamID,ChangeMembershipPermanent,AsUserOrTeam,NewAPIArgWithNetContext,TeamNameFromString,F,parseSocial,GetNormalizedUsername,Load]
ListMyAccessRequests returns a list of all the names of the teams who have access to ChangeTeamSettings changes the user s ID to the given team name.
Is there a better way to do this?
@@ -608,8 +608,13 @@ const OSSL_ALGORITHM *ossl_provider_query_operation(const OSSL_PROVIDER *prov, * never knows. */ static const OSSL_ITEM param_types[] = { - { OSSL_PARAM_UTF8_PTR, "openssl-version" }, - { OSSL_PARAM_UTF8_PTR, "provider-name" }, + { OSSL_PARAM_UTF8_PTR, OSSL_PROV_PARAM_OPENSSL_VERSION }, + { OSSL_PARAM_UTF8_PTR, OSSL_PROV_PARAM_PROV_NAME }, + { OSSL_PARAM_UTF8_PTR, OSSL_PROV_PARAM_MODULE_FILENAME }, + { OSSL_PARAM_UTF8_PTR, OSSL_PROV_PARAM_MODULE_MAC }, + { OSSL_PARAM_UTF8_PTR, OSSL_PROV_PARAM_INSTALL_VERSION }, + { OSSL_PARAM_UTF8_PTR, OSSL_PROV_PARAM_INSTALL_MAC }, + { OSSL_PARAM_UTF8_PTR, OSSL_PROV_PARAM_INSTALL_STATUS }, { 0, NULL } };
[No CFG could be retrieved]
Private functions for the provider region ParamList OSSLParameter OSSLParameter OSSLParameterOSSLParameterProvider.
I'm unsure about the added names here, for the simple reason that they aren't anything that the provider functionality offers *internally*, but are rather stuff it receives from external sources. Quite frankly, the get_types function needs a review, perhaps even its format.
@@ -121,4 +121,11 @@ LocalStream.prototype.getId = function () { return this.stream.getTracks()[0].id; }; +LocalStream.prototype.stopTracks = function() { + var originalStream = this.getOriginalStream(); + originalStream && originalStream.getTracks().forEach(function(track) { + track.stop(); + }) +}; + module.exports = LocalStream;
[No CFG could be retrieved]
Get the id of the local track.
Could you please indent with four space to keep code consistent.
@@ -106,7 +106,7 @@ class TopicView end def show_read_indicator? - return false unless @user || topic.private_message? + return false if !@user || !topic.private_message? topic.allowed_groups.any? do |group| group.publish_read_state? && group.users.include?(@user)
[TopicView->[relative_url->[relative_url],participant_count->[participant_count],title->[title],setup_filtered_posts->[summary,has_deleted?],initialize->[chunk_size,print_chunk_size],filter_posts_by_ids->[filter_post_types],image_url->[image_url],unfiltered_posts->[filter_post_types],filter_posts_by_post_number->[get_sort_order],calculate_page->[is_mega_topic?],published_page->[published_page],like_count->[like_count]]]
Checks if a node is a read indicator for the current user.
That's why I _hate_ `unless` when there's more than one condition
@@ -163,9 +163,14 @@ func (b *broadcaster) awaitInitialSubscribers() { } func (b *broadcaster) Register(listener Listener, opts ListenerOpts) (unsubscribe func()) { - if len(opts.Logs) < 1 { + if len(opts.Logs) == 0 && len(opts.LogsWithTopics) == 0 { logger.Fatal("Must supply at least 1 Log to Register") } + + if len(opts.Logs) > 0 && len(opts.LogsWithTopics) > 0 { + logger.Fatal("Must use either Logs or LogsWithTopics but not both") + } + b.addSubscriber.Deliver(registration{listener, opts}) return func() { b.rmSubscriber.Deliver(registration{listener, opts})
[awaitInitialSubscribers->[onAddSubscribers,startResubscribeLoop,Notify,onRmSubscribers,Done,AwaitDependents],Stop->[StopOnce,Wait],onNewHeads->[sendLogs,Tracew,EthFinalityDepth,Errorf,RetrieveLatestAndClear,getLogsToSend],onAddSubscribers->[addSubscriber,Errorf,Retrieve,Debugw],IsConnected->[IsSet],TrackedAddressesCount->[LoadUint32],startResubscribeLoop->[appendLogChannel,Warn,createSubscription,Unsubscribe,backfillLogs,eventLoop,addressesAndTopics,StoreUint32,UnSet,Err,Set,Logs,Done,Debug],Start->[StartOnce,awaitInitialSubscribers,Warn,Debugw,Add],eventLoop->[Stop,NewTicker,onNewHeads,onAddSubscribers,Notify,onNewLog,onRmSubscribers,Debug],OnNewLongestChain->[Deliver],onNewLog->[addLog,isAddressRegistered],onRmSubscribers->[removeSubscriber,Errorf,Retrieve,Debugw],Register->[Deliver,Fatal],NewDependentAwaiter,NewMailbox,New]
Register registers a listener with the broadcaster. It returns a function that unsubscribes.
Will this crash the app on a bad job spec? Or must it necessarily be a programming error?
@@ -590,7 +590,13 @@ class GraphConverter: if original_type_name in MODULE_EXCEPT_LIST: pass # do nothing elif original_type_name == OpTypeName.LayerChoice: - m_attrs = self._handle_layerchoice(module) + graph = Graph(ir_model, -100, module_name, _internal=True) # graph_id is not used now + candidate_name_list = [f'layerchoice_{module.label}_{cand_name}' for cand_name in module.names] + for cand_name, cand in zip(candidate_name_list, module): + cand_type = '__torch__.' + get_importable_name(cand.__class__) + graph.add_node(cand_name, cand_type, get_init_parameters_or_fail(cand)) + graph._register() + return graph, {'mutation': 'layerchoice', 'label': module.label, 'candidates': candidate_name_list} elif original_type_name == OpTypeName.InputChoice: m_attrs = self._handle_inputchoice(module) elif original_type_name == OpTypeName.ValueChoice:
[convert_to_graph->[GraphConverter],GraphConverter->[_add_edge->[_add_edge_handle_source_node],refine_graph->[merge_aten_slices,remove_unconnected_nodes],convert_module->[refine_graph,_handle_layerchoice,_handle_valuechoice,handle_graph_nodes,_handle_inputchoice],handle_graph_nodes->[handle_if_node->[_add_edge,handle_if_condition],handle_if_condition->[_generate_expr->[_generate_expr],_generate_expr],handle_function_callmethod->[_add_edge,_remove_mangle,_add_edge_handle_source_node,handle_graph_nodes],handle_single_node->[handle_prim_attr_node,handle_if_node,_remove_mangle,_add_edge,create_prim_constant_node,handle_function_callmethod],handle_single_node]]]
Convert a module to its graph ir along with its input arguments Initialize the IrGraph and return it.
this subgraph is a little strange because it has no connection, which would be not friendly to graph visualization
@@ -55,7 +55,7 @@ class MaskedLanguageModelingReader(DatasetReader): import sys # You can call pytest with either `pytest` or `py.test`. if 'test' not in sys.argv[0]: - raise RuntimeError('_read is only implemented for unit tests at the moment') + logger.error('_read is only implemented for unit tests at the moment') with open(file_path, "r") as text_file: for sentence in text_file: tokens = self._tokenizer.tokenize(sentence)
[MaskedLanguageModelingReader->[text_to_instance->[Token,append,tokenize,len,IndexField,ValueError,enumerate,Instance,TextField,ListField],__init__->[SingleIdTokenIndexer,super,JustSpacesWordSplitter,WordTokenizer],_read->[text_to_instance,Token,tokenize,RuntimeError,open]],register,getLogger]
Reads a file and yields a sequence of objects.
How come these can't be `RuntimeErrors`?
@@ -52,6 +52,7 @@ public class DefaultMuleEventTestCase extends AbstractMuleContextTestCase { @Before public void before() throws Exception { + registerServices(muleContext); flow = getTestFlow(muleContext); messageContext = DefaultEventContext.create(flow, TEST_CONNECTOR); muleEvent = Event.builder(messageContext).message(muleMessage).exchangePattern(REQUEST_RESPONSE).flow(flow).build();
[DefaultMuleEventTestCase->[defaultProcessingStrategyOneWay->[thenReturn,build,equalTo,isTransacted,isSynchronous,spy,assertThat,DefaultFlowProcessingStrategy],before->[getTestFlow,create,build],inboundPropertyForceSyncOneWay->[thenReturn,build,equalTo,isTransacted,isSynchronous,assertThat,spy],setSessionVariableCustomDataType->[build,setProperty,getPropertyDataType,like,assertThat],nonBlockingProcessingStrategyRequestResponse->[thenReturn,build,equalTo,isTransacted,isSynchronous,assertThat,spy,NonBlockingProcessingStrategy],nonBlockingProcessingStrategyOneWay->[thenReturn,build,equalTo,isTransacted,isSynchronous,assertThat,spy,NonBlockingProcessingStrategy],syncProcessingStrategyRequestResponse->[thenReturn,build,equalTo,isTransacted,isSynchronous,assertThat,spy],syncProcessingStrategyOneWay->[thenReturn,build,equalTo,isTransacted,isSynchronous,assertThat,spy],inboundPropertyForceSyncRequestResponse->[thenReturn,build,equalTo,isTransacted,isSynchronous,assertThat,spy],defaultProcessingStrategyRequestResponse->[thenReturn,build,equalTo,isTransacted,isSynchronous,spy,assertThat,DefaultFlowProcessingStrategy],setFlowVariableDefaultDataType->[build,assertThat,like,getDataType],setSessionVariableDefaultDataType->[like,assertThat,setProperty,getPropertyDataType],setFlowVariableCustomDataType->[build,assertThat,like,getDataType],build,mock]]
This method is called before the test.
This test extends from AbstractMuleContextTestCase, should not be the services configured there for every sub class?
@@ -280,9 +280,9 @@ public final class GameRunner { ProcessRunnerUtil.populateBasicJavaArgs(commands); final String prefix = "-D"; commands.add(prefix + TRIPLEA_CLIENT + "=true"); - commands.add(prefix + TRIPLEA_PORT + "=" + description.getPort()); + commands.add(prefix + TRIPLEA_PORT + "=" + description.getHostedBy().getPort()); commands.add(prefix + TRIPLEA_HOST + "=" + description.getHostedBy().getAddress().getHostAddress()); - commands.add(prefix + TRIPLEA_NAME + "=" + messengers.getMessenger().getLocalNode().getName()); + commands.add(prefix + TRIPLEA_NAME + "=" + messengers.getLocalNode().getName()); commands.add(Services.loadAny(ApplicationContext.class).getMainClass().getName()); ProcessRunnerUtil.exec(commands); }
[GameRunner->[joinGame->[showMessageDialog],start->[start],Title->[of->[Title]],showConfirmDialog->[showConfirmDialog],clientLeftGame->[showMainFrame],showMessageDialog->[showMessageDialog]]]
Join a game.
So you're sure that network compatibility isn't important for this release? We might want to ask @ron-murhammer and @ssoloff if they're ok with that. (The 2 values are only equivalent in 1.10, they differ in 1.9 depending on the network situation, probably no difference for bots though). I any case you probably want to remove the remaining usage of this method in the lobby controller as well for consistency. I kept it there for compatibility reasons.
@@ -72,7 +72,8 @@ DEFAULT_EXECUTION_OPTIONS = ExecutionOptions( remote_store_chunk_bytes=1024*1024, remote_store_chunk_upload_timeout_seconds=60, remote_store_rpc_retries=2, - process_execution_parallelism=multiprocessing.cpu_count()*2, + local_execution_parallelism=multiprocessing.cpu_count(), + remote_execution_parallelism=128, process_execution_cleanup_local_dirs=True, remote_execution_process_cache_namespace=None, remote_instance_name=None,
[GlobalOptionsRegistrar->[register_options->[register_bootstrap_options]],ExecutionOptions]
This function is called by the bootstrap framework to configure the execution options. Register bootstrap options.
Nitpick: since we have so many of these their names should be namespaced probably. So `process_execution_local_parallelism`, here and elsewhere. Sorry!
@@ -979,13 +979,14 @@ public class SourceColumn implements BeforeShowEvent.Handler, // apply (dynamic) doc property defaults SourceColumn.applyDocPropertyDefaults(newDoc, true, userPrefs_); - EditingTarget target = - addTab(newDoc, Source.OPEN_INTERACTIVE); + EditingTarget target = addTab(newDoc, Source.OPEN_INTERACTIVE); - if (contents != null) + // toggle save commands after the editor has been opened, + // in case this action has created and focused a new editor + if (activeEditor_ != null) { target.forceSaveCommandActive(); - manageSaveCommands(active); + manageSaveCommands(true); } if (resultCallback != null)
[SourceColumn->[setActiveEditor->[onActivate],closeDoc->[closeTab],manageVcsCommands->[getNextActiveEditor],setPhysicalTabIndex->[selectTab],manageChevronVisibility->[manageChevronVisibility],createWidget->[asWidget],manageSaveCommands->[isSaveCommandActive,getNextActiveEditor],cancelTabDrag->[cancelTabDrag],manageCommands->[manageCommands,hasDoc,getNextActiveEditor],onTabClose->[selectTab],showUnsavedChangesDialog->[showUnsavedChangesDialog],newDoc->[onError->[newDoc],onResponseReceived->[newDoc,addTab,manageSaveCommands],ensureVisible,newDoc],closeTabIndex->[manageCommands,getTabCount,fireDocTabsChanged],onSelection->[manageCommands,onActivate,isDebugSelectionPending,clearPendingDebugSelection],initialSelect->[getTabCount,selectTab],onActivate->[onActivate],fireDocTabsChanged->[manageChevronVisibility,syncTabOrder],manageRSConnectCommands->[getNextActiveEditor],onBeforeShow->[newDoc,getTabCount,onBeforeShow],insertCode->[insertCode],addTab->[closeTab,addTab,createWidget,getPhysicalTabIndex,selectTab,fireDocTabsChanged],closeTabs->[closeTab,asWidget],ensureVisible->[ensureVisible],selectTab->[selectTab],getUntitledNum->[getUntitledNum],closeTab->[closeTab],getNextDefaultName->[getUntitledNum],manageMultiTabCommands->[hasDoc,getNextActiveEditor],onTabReorder->[setPhysicalTabIndex,syncTabOrder,fireDocTabsChanged],moveTab->[moveTab],isSaveCommandActive->[isSaveCommandActive],asWidget->[asWidget],getSourceCommand->[getSourceCommand],getTabCount->[getTabCount]]]
Creates a new document in the database.
The active parameter actually represents whether or not this is this column is the current active column. (`activeEditor_` is only populated for the active column) You'll see in `manageSaveCommands` we use `getNextActiveEditor()` instead of directly accessing the active editor, this is a function that will return the editor that would become active if the column became active. When you open a new doc it gets set to the active editor so this code works fine except in the very unlikely case where this function gets called after the user has moved on and is working with a different file. Really it doesn't make a difference, but I think we should pass `this == manager.getActive()` here.
@@ -93,9 +93,7 @@ public class TopicDeleteInjector implements Injector { if (source != null) { try { - ExecutorUtil.executeWithRetries( - () -> topicClient.deleteTopics(ImmutableList.of(source.getKafkaTopicName())), - RetryBehaviour.ALWAYS); + topicClient.deleteTopics(ImmutableList.of(source.getKafkaTopicName())); } catch (Exception e) { throw new KsqlException("Could not delete the corresponding kafka topic: " + source.getKafkaTopicName(), e);
[TopicDeleteInjector->[inject->[getIfExists,formatSql,withStatement,getSuffix,deleteTopics,of,deleteSubjectWithRetries,getFormat,withoutDeleteClause,getStatement,getSource,getKafkaTopicName,isDeleteTopic,KsqlException,executeWithRetries],getSchemaRegistryClient,getTopicClient,getMetaStore,requireNonNull]]
Injects a statement into the schema registry.
why have we removed the retries here? Are there not still cases where we can get retryable exceptions here, e.g. on network issues, that are retryable?
@@ -166,6 +166,7 @@ class MockedCloudClient(MagicMock): raise ValueError("Invalid task run update") +@pytest.mark.filterwarnings("ignore::UserWarning") @pytest.mark.parametrize("executor", ["local", "sync"], indirect=True) def test_simple_two_task_flow(monkeypatch, executor): flow_run_id = str(uuid.uuid4())
[test_scheduled_start_time_is_in_context->[MockedCloudClient,TaskRun,FlowRun],test_simple_two_task_flow->[MockedCloudClient,TaskRun,FlowRun],test_deep_map_with_a_failure->[MockedCloudClient,TaskRun,FlowRun],test_simple_three_task_flow_with_one_failing_task->[MockedCloudClient,error,FlowRun,TaskRun],MockedCloudClient->[get_task_run_info->[TaskRun]],test_simple_two_task_flow_with_final_task_set_to_fail->[MockedCloudClient,TaskRun,FlowRun],test_deep_map->[MockedCloudClient,TaskRun,FlowRun],test_deep_map_with_a_retry->[MockedCloudClient,TaskRun,FlowRun],test_simple_map->[MockedCloudClient,TaskRun,FlowRun],test_simple_three_task_flow_with_first_task_retrying->[MockedCloudClient,error,FlowRun,TaskRun],test_simple_two_task_flow_with_final_task_already_running->[MockedCloudClient,TaskRun,FlowRun]]
Test simple two - task flow.
I wonder if there's a way to do this at the module level?
@@ -29,7 +29,7 @@ class TestGetTensorFromSelectedRows(unittest.TestCase): def check_with_place(self, place): scope = core.Scope() - x_rows = [0, 5, 5, 4, 20] + x_rows = [0, 5, 5, 4, 19] height = 20 row_numel = 2
[TestGetTensorFromSelectedRows->[test_check_output->[get_places,check_with_place]]]
Checks that the selected rows in the network have a tensor with the specified place.
why change this?
@@ -367,7 +367,7 @@ std::unique_ptr<tools::wallet2> make_basic(const boost::program_options::variabl // user specified CA file or fingeprints implies enabled SSL by default epee::net_utils::ssl_options_t ssl_options = epee::net_utils::ssl_support_t::e_ssl_support_enabled; - if (command_line::get_arg(vm, opts.daemon_ssl_allow_any_cert)) + if (daemon_ssl_allow_any_cert) ssl_options.verification = epee::net_utils::ssl_verification_t::none; else if (!daemon_ssl_ca_file.empty() || !daemon_ssl_allowed_fingerprints.empty()) {
[No CFG could be retrieved]
Reads the command line options and sets them on the VM. Reads the list of SSL fingerprints from the command line and creates a list of SSL options.
Why not just drop unused `daemon_ssl_allow_any_cert` variable declaration instead?
@@ -121,8 +121,8 @@ public class AggregatorUtil // TDigest sketch aggregators public static final byte TDIGEST_BUILD_SKETCH_CACHE_TYPE_ID = 0x38; - public static final byte TDIGEST_MERGE_SKETCH_CACHE_TYPE_ID = 0x39; public static final byte TDIGEST_SKETCH_TO_QUANTILES_CACHE_TYPE_ID = 0x40; + public static final byte TDIGEST_SKETCH_TO_QUANTILE_CACHE_TYPE_ID = 0x41; /** * returns the list of dependent postAggregators that should be calculated in order to calculate given postAgg
[AggregatorUtil->[makeColumnValueSelectorWithDoubleDefault->[ExpressionDoubleColumnSelector],makeColumnValueSelectorWithLongDefault->[ExpressionLongColumnSelector],makeColumnValueSelectorWithFloatDefault->[ExpressionFloatColumnSelector],condensedAggregators->[pruneDependentPostAgg]]]
This method returns the list of dependent postAggregators that should be calculated in order to calculate This method is used to prune dependent post - aggregators that are dependent on the last calculated.
Since SKETCH_TO_QUANTILES and SKETCH_TO_QUANTILE are postaggs, their IDs should go in `PostAggregatorIds` instead
@@ -864,6 +864,10 @@ class GitFetchStrategy(VCSFetchStrategy): def source_id(self): return self.commit or self.tag + @property + def source_digest(self): + return self.commit or self.tag or self.branch + def mirror_id(self): repo_ref = self.commit or self.tag or self.branch if repo_ref:
[_from_merged_attrs->[fetcher],CvsFetchStrategy->[_remove_untracked_files->[cvs],fetch->[cvs],reset->[cvs,_remove_untracked_files]],SvnFetchStrategy->[_remove_untracked_files->[svn],fetch->[svn],reset->[svn,_remove_untracked_files]],for_package_version->[_from_merged_attrs,check_pkg_attributes,_extrapolate,_check_version_attributes,fetcher,BundleFetchStrategy],from_list_url->[URLFetchStrategy],_extrapolate->[URLFetchStrategy],S3FetchStrategy->[fetch->[warn_content_type_mismatch]],HgFetchStrategy->[fetch->[hg],reset->[hg]],from_url_scheme->[fetcher],GitFetchStrategy->[fetch->[_repo_info,git],reset->[git],__str__->[_repo_info]],FetchStrategyComposite->[source_id->[source_id]],URLFetchStrategy->[check->[check],reset->[expand],_existing_url->[curl],_check_headers->[warn_content_type_mismatch],_fetch_curl->[_check_headers,curl],_fetch_urllib->[_check_headers]],from_url->[URLFetchStrategy],from_kwargs->[fetcher,matches],CacheURLFetchStrategy->[fetch->[check]],FsCache->[fetcher->[CacheURLFetchStrategy],store->[archive]],GoFetchStrategy->[fetch->[go],expand->[_ensure_one_stage_entry],reset->[go]]]
Get the source ID of the node.
This method isn't used anywhere -- delete
@@ -1513,7 +1513,7 @@ public class NotebookServer extends WebSocketServlet implements LOG.info("Job {} is finished", job.getId()); try { //TODO(khalid): may change interface for JobListener and pass subject from interpreter - note.persist(null); + note.persist(job instanceof Paragraph ? ((Paragraph) job).getAuthenticationInfo() : null); } catch (IOException e) { LOG.error(e.toString(), e); }
[NotebookServer->[multicastToUser->[serializeMessage],pushAngularObjectToRemoteRegistry->[broadcastExcept],broadcastInterpreterBindings->[broadcast],checkpointNotebook->[serializeMessage],onRemove->[broadcast,notebook],onLoad->[broadcast],updateParagraph->[permissionError,getOpenNoteId,broadcast],unicastNoteList->[generateNotebooksInfo,unicast],onOutputAppend->[broadcast],broadcast->[serializeMessage],sendNote->[permissionError,serializeMessage,addConnectionToNote],unsubscribeNotebookJobInfo->[getKey,removeConnectionFromNote],broadcastExcept->[serializeMessage],insertParagraph->[permissionError,getOpenNoteId,broadcastNote,insertParagraph],saveInterpreterBindings->[notebook],generateNotebooksInfo->[notebook],clearParagraphOutput->[permissionError,getOpenNoteId,clearParagraphOutput,broadcastNote],cancelParagraph->[permissionError,getOpenNoteId],onStatusChange->[broadcast],ParagraphListenerImpl->[onOutputAppend->[broadcast],onProgressUpdate->[broadcast],onOutputUpdate->[broadcast],afterStatusChange->[broadcastUpdateNotebookJobInfo,broadcastNote]],removeNote->[permissionError,removeNote,broadcastNoteList],broadcastNote->[broadcast],removeAngularFromRemoteRegistry->[broadcastExcept],onMessage->[notebook],onUpdate->[broadcast,notebook],createNote->[broadcastNoteList,createNote,serializeMessage,addConnectionToNote],sendAllConfigurations->[serializeMessage],unicastNotebookJobInfo->[getKey,serializeMessage,addConnectionToNote],broadcastNoteList->[multicastToUser,generateNotebooksInfo],updateNote->[permissionError,broadcastNote,broadcastNoteList],getEditorSetting->[getOpenNoteId,serializeMessage,getEditorSetting],getNoteByRevision->[getNoteByRevision,serializeMessage],broadcastToNoteBindedInterpreter->[notebook],onOutputUpdated->[broadcast],removeConnectionFromAllNote->[removeConnectionFromNote],getParagraphJobListener->[ParagraphListenerImpl],sendHomeNote->[permissionError,serializeMessage,addConnectionToNote,removeConnectionFromAllNote],angularObjectUpdated->[broadcastExcept],removeParagraph->[permissionError,getOpenNoteId,broadcastNote,removeParagraph],sendAllAngularObjects->[serializeMessage],broadcastReloadedNoteList->[multicastToUser,generateNotebooksInfo],getInterpreterBindings->[getInterpreterBindings,serializeMessage,notebook],moveParagraph->[permissionError,getOpenNoteId,broadcastNote,moveParagraph],broadcastUpdateNotebookJobInfo->[getKey,broadcast,notebook],permissionError->[serializeMessage],runParagraph->[permissionError,getOpenNoteId,serializeMessage,broadcast],unicast->[serializeMessage],completion->[getOpenNoteId,serializeMessage,completion],importNote->[broadcastNoteList,importNote,broadcastNote],pushAngularObjectToLocalRepo->[broadcastExcept],removeAngularObjectFromLocalRepo->[broadcastExcept],NotebookInformationListener->[onParagraphCreate->[getKey,broadcast,notebook],onParagraphRemove->[broadcastUpdateNotebookJobInfo],onParagraphStatusChange->[getKey,broadcast,notebook],onNoteCreate->[getKey,broadcast,notebook],onUnbindInterpreter->[getKey,broadcast,notebook],onNoteRemove->[broadcastUpdateNotebookJobInfo,getKey,broadcast]],getNotebookInformationListener->[NotebookInformationListener],cloneNote->[broadcastNoteList,getOpenNoteId,serializeMessage,addConnectionToNote,cloneNote],listRevisionHistory->[listRevisionHistory,serializeMessage]]]
Broadcasts a message to the notebook server after status change.
@jongyoul can you give little more details on when `Job` would be an instance of `Paragraph` and otherwise
@@ -2262,7 +2262,6 @@ public final class TripleAFrame extends JFrame implements QuitHandler { historySyncher.deactivate(); historySyncher = null; } - historyPanel.goToEnd(); historyPanel = null; mapPanel.getData().removeDataChangeListener(dataChangeListener); statsPanel.setGameData(data);
[TripleAFrame->[waitForEndTurn->[waitForEndTurn],showHistory->[addTab],getOptionPane->[getOptionPane],getInGameLobbyWatcher->[getInGameLobbyWatcher],shutdown->[stopGame],actionPerformed->[gameDataChanged],leaveGame->[stopGame],getPoliticalActionChoice->[requestWindowFocus],showGame->[requestWindowFocus,addTab],showEditMode->[addTab],updateStepFromEdt->[gameDataChanged],getUserActionChoice->[requestWindowFocus],addTab->[addTab],getTechRolls->[requestWindowFocus],setStatusWarningMessage->[setStatus],waitForPlace->[waitForPlace],getBattlePanel->[getBattlePanel],setEditDelegate->[setWidgetActivation,gameDataChanged],getArrowKeyListener->[keyPressed->[getUnitInfo]],gameDataChanged]]
showGame This method is called when the user clicks on the game panel.
I'm concerned this call might have some required unintended side-effect on the underlying GameData which might break things, but ok
@@ -142,6 +142,18 @@ export function registerElement(win, name, implementationClass) { win['customElements'].define(name, klass); } +/** + * Registers a new Bento-based custom element with its implementation class. + * @param {!Window} win The window in which to register the elements. + * @param {string} name Name of the custom element + * @param {typeof ../base-element.BaseElement} implementationClass + * @throws {UserError} if the required Bento experiment is not enabled + */ +export function registerBentoElement(win, name, implementationClass) { + assertBentoExperiment(name.replace(/^amp-/, '')); + registerElement(win, name, implementationClass); +} + /** * In order to provide better error messages we only allow to retrieve * services from other elements if those elements are loaded in the page.
[No CFG could be retrieved]
Registers a new custom element with the specified name and implementation class. Finds the currently registered extended custom element class.
This causes the error to be thrown at registration rather than during layout. Is this problematic?
@@ -336,6 +336,7 @@ define('ARIA_PAGINATION_',''); define('SUCCESS_ADDED_TO_CART_PRODUCT', 'Successfully added Product to the cart ...'); // only for where multiple add to cart is used: define('SUCCESS_ADDED_TO_CART_PRODUCTS', 'Successfully added selected Product(s) to the cart ...'); + define('FAILURE_ADDED_TO_CART_PRODUCTS', 'The selected Product(s) are not currently available for purchase...'); define('TEXT_PRODUCT_WEIGHT_UNIT','lbs');
[No CFG could be retrieved]
description of the function PRIVATE METHODS ADALE - Sale.
I'm puzzled by the name of this new constant: `FAILURE_ADDED_TO_CART_PRODUCTS` Perhaps `FAILED_TO_ADD_UNAVAILABLE_PRODUCTS` might be more intuitive?
@@ -3,13 +3,14 @@ // See the LICENSE file in the project root for more information. using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Reflection; namespace System.Linq { internal static class TypeHelper { - internal static Type FindGenericType(Type definition, Type type) + internal static Type? FindGenericType(Type definition, [NotNull]Type? type) { bool? definitionIsInterface = null; while (type != null && type != typeof(object))
[TypeHelper->[Type->[IsGenericType,FindGenericType,BaseType,GetInterfaces,GetValueOrDefault,HasValue,IsInterface,GetGenericTypeDefinition],GetStaticMethods->[Where,IsStatic]]]
Find the generic type of a given type.
Doesn't really understood why we need `[NotNull]`attribute
@@ -1031,9 +1031,12 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): output_vars = body(*loop_vars) if not isinstance(output_vars, (list, tuple)): output_vars = [output_vars] - if len(output_vars) != len(loop_vars): - raise ValueError("body in while_loop should return the same arity " - "(length and structure) and types as loop_vars") + try: + assert_same_structure(output_vars, loop_vars, check_types=False) + except ValueError as e: + raise ValueError( + "body in while_loop should return the same arity " + "(length and structure) and types as loop_vars: {0}".format(e)) now_cond = cond(*output_vars) map_structure(assign, output_vars, loop_vars) assign(now_cond, pre_cond)
[Switch->[default->[ConditionalBlock,ConditionalBlockGuard],case->[_case_check_args->[],ConditionalBlock,ConditionalBlockGuard]],IfElseBlockGuard->[__exit__->[__exit__],__enter__->[__enter__],__init__->[block]],DynamicRNN->[_parent_block_->[block],block->[array_write,block,increment,less_than,array_to_lod_tensor],static_input->[_assert_in_rnn_block_,shrink_memory],update_memory->[_assert_in_rnn_block_],__init__->[While],output->[_assert_in_rnn_block_,array_write],step_input->[_assert_in_rnn_block_,array_read],memory->[_assert_in_rnn_block_,memory,shrink_memory,array_read]],ConditionalBlock->[complete->[output,block],append_conditional_block_grad->[output,block],block->[ConditionalBlockGuard]],StaticRNN->[_complete_op->[output,_parent_block],step->[BlockGuardWithCompletion],output->[step_output],step_input->[_assert_in_rnn_block_],step_output->[_assert_in_rnn_block_],memory->[_assert_in_rnn_block_,StaticRNNMemoryLink,memory]],while_loop->[block,While],copy_var_to_parent_block->[block],While->[_complete->[output,block],block->[WhileGuard]],cond->[ConditionalBlock,block,select_input,copy_var_to_parent_block],case->[_case_check_args->[_error_message],_case_check_args],switch_case->[_check_args->[equal,_error_message],_check_args],IfElse->[_parent_block->[block],true_block->[IfElseBlockGuard],__init__->[ConditionalBlock],output->[_parent_block],__call__->[merge_lod_tensor],false_block->[IfElseBlockGuard],input->[_parent_block]]]
This function is used to perform a while loop in a control flow. This function runs the main loop of the network. Loop through the loop_vars and return the loop_vars of the loop_vars.
Remove "and types" in the ValueError message because you set `check_types=False` when calling `assert_same_structure`. However, it's not your fault. I found the old code was wrong here, too. :-)
@@ -308,11 +308,6 @@ func getUserMachineAddrAndOpts(context *config.Context) (*grpcutil.PachdAddress, if err != nil { return nil, nil, fmt.Errorf("could not parse 'PACHD_ADDRESS': %v", err) } - - if envAddr.Secured { - options = append(options, WithSystemCAs) - } - options, err := getCertOptionsFromEnv() if err != nil { return nil, nil, err
[Ctx->[AddMetadata],Close->[Close],DeleteAll->[DeleteAll]]
getUserMachineAddrAndOpts returns the address and options that the user should connect to. can be called to check if the pachd address is secured and if server_.
Is this something we should just be removing? This seems like a good thing to have, and if it isn't hooked up at the moment, should we fix it?
@@ -447,6 +447,15 @@ class MNEBrowseFigure(MNEFigure): # hide some ticks ax_main.tick_params(axis='x', which='major', bottom=False) ax_hscroll.tick_params(axis='x', which='both', bottom=False) + else: + # Add Timestamp X-Ticks if activated + if self.mne.show_real_time: + if not isinstance(self.mne.show_real_time, str): + self.mne.show_real_time = "%H:%M:%S" + for _ax in (ax_main, ax_hscroll): + _ax.xaxis.set_major_formatter( + FuncFormatter(self._xtick_timestamp_formatter)) + _ax.set_xlabel(f'Time ({self.mne.show_real_time})') # VERTICAL SCROLLBAR PATCHES (COLORED BY CHANNEL TYPE) ch_order = self.mne.ch_order
[_browse_figure->[_figure,_resize,_update_zen_mode_offsets,_toggle_scrollbars],MNEBrowseFigure->[_update_zen_mode_offsets->[_get_size_px],_toggle_annotation_fig->[_create_annotation_fig],_resize->[_get_size_px],_draw_annotations->[_clear_annotations],_draw_traces->[_hide_scalebars,_show_scalebars],_add_annotation_label->[_update_annotation_fig,_set_active_button,_radiopress],_show_vline->[_recompute_epochs_vlines],_check_update_hscroll_clicked->[_update_hscroll],_toggle_scalebars->[_hide_scalebars,_show_scalebars],_update_trace_offsets->[_make_butterfly_selections_dict,_update_yaxis_labels,_update_picks],_update_annotation_fig->[_set_active_button,_get_annotation_labels],_create_epoch_histogram->[_new_child_figure,_inch_to_rel],_modify_annotation->[_remove_annotation_hover_line],_change_selection_vscroll->[_radiopress],_toggle_proj_fig->[_create_proj_fig],__init__->[_inch_to_rel],_toggle_help_fig->[_create_help_fig],_setup_annotation_colors->[_get_annotation_labels],_create_annotation_fig->[_new_child_figure,_inch_to_rel],_toggle_bad_channel->[_update_bad_sensors,_update_projector],_create_help_fig->[_new_child_figure],_create_proj_fig->[_new_child_figure,_keypress],_check_update_vscroll_clicked->[_update_vscroll],_new_child_figure->[_add_default_callbacks],_toggle_scrollbars->[_update_zen_mode_offsets],_create_ch_location_fig->[_new_child_figure],_toggle_butterfly->[_style_radio_buttons_butterfly],_update_picks->[_make_butterfly_selections_dict],_create_ica_properties_fig->[_new_child_figure],_update_data->[_load_data],_redraw->[_draw_traces,_draw_annotations,_update_data],_create_selection_fig->[_new_child_figure,_style_radio_buttons_butterfly],_create_epoch_image_fig->[_new_child_figure]],MNESelectionFigure->[_keypress->[_keypress]],_line_figure->[_figure,_get_size_px],_figure->[_add_default_callbacks],_psd_figure->[_line_figure],MNEFigure->[_get_size_px->[_get_dpi_ratio],__init__->[MNEFigParams]],MNELineFigure->[_resize->[_get_size_px],__init__->[_inch_to_rel]]]
Initialize the object with a base object and a base ICA object. Zorder is a list of dicts. Each key is a sequence of values. Add a plot of the missing key - value region on the main axes. Plot the missing key - value pair on both axes.
Why not combine this into an `elif` branch?
@@ -217,5 +217,10 @@ function _maybeSendLobbyNotification(origin, message, { dispatch, getState }) { break; } - dispatch(showNotification(notificationProps, isTestModeEnabled(getState()) ? undefined : 5000)); + dispatch( + showNotification( + notificationProps, + isTestModeEnabled(getState()) ? undefined : NOTIFICATION_TIMEOUT_TYPE.MEDIUM + ) + ); }
[No CFG could be retrieved]
END FUNCTIONS.
We should never pass undefined as a timeout.
@@ -43,7 +43,7 @@ class Tag < ActiveRecord::Base SELECT COUNT(topics.id) AS topic_count, tags.id AS tag_id FROM tags LEFT JOIN topic_tags ON tags.id = topic_tags.tag_id - LEFT JOIN topics ON topics.id = topic_tags.topic_id AND topics.deleted_at IS NULL + LEFT JOIN topics ON topics.id = topic_tags.topic_id AND topics.deleted_at IS NULL AND topics.archetype != "private_message" GROUP BY tags.id ) x WHERE x.tag_id = t.id
[Tag->[top_tags->[empty?,max_tags_in_filter_list,allowed_category_ids,pluck,join,exec_sql,id,map],index_search->[index],tags_by_count_query->[order,limit],include_tags?->[show_filter_by_tag,tagging_enabled],update_topic_counts->[exec_sql],full_url->[base_url,name],after_save,has_many,include,validates]]
This method updates the topic count of all tags in the category but only if the topic count.
We need a test case for this too :smile:
@@ -383,14 +383,14 @@ class TestTaskOnSuccessHandler(ResourceReservationTests): kwargs = {'1': 'for the money', 'tags': ['test_tags'], 'routing_key': WORKER_2} mock_request.called_directly = False - task_status = TaskStatusManager.create_task_status(task_id) + task_status = TaskStatus(task_id).save() self.assertEqual(task_status['state'], 'waiting') self.assertEqual(task_status['finish_time'], None) task = tasks.Task() task.on_success(retval, task_id, args, kwargs) - new_task_status = TaskStatusManager.find_by_task_id(task_id) + new_task_status = TaskStatus.objects(task_id=task_id).first() self.assertEqual(new_task_status['state'], 'finished') self.assertEqual(new_task_status['result'], retval) self.assertFalse(new_task_status['finish_time'] is None)
[TestCancel->[tearDown->[tearDown],setUp->[setUp]],TestRegisterSigtermHandler->[test_error_case->[f->[FakeException]]],TestTaskOnFailureHandler->[test_updates_task_status_correctly->[EInfo]],TestQueueReservedTask->[test_loops_and_sleeps_waiting_for_available_worker->[side_effect->[second_call->[BreakOutException]]]]]
This test tests task status updates correctly.
There are lots of first() calls in this module
@@ -155,6 +155,13 @@ func (p *Prospector) createHarvester(state file.State) (*harvester.Harvester, er } func (p *Prospector) startHarvester(state file.State, offset int64) error { + + if p.config.HarvesterLimit > 0 && atomic.LoadUint64(&p.harvesterCounter) >= p.config.HarvesterLimit { + return fmt.Errorf("Harvester limit reached.") + } + + atomic.AddUint64(&p.harvesterCounter, 1) + state.Offset = offset // Create harvester with state h, err := p.createHarvester(state)
[Run->[Run],Init->[Init],startHarvester->[createHarvester]]
startHarvester starts a harvester with the given offset.
if createHarvester fails counter is off. Either move counter right before p.wg.Add(1) or decrement counter on fail (+ explain why it must be decremented and can not be moved past this place).
@@ -26,10 +26,11 @@ public class ConfigDrive { public static final String openStackConfigDriveName = "/openstack/latest/"; /** - * This is the path to iso file relative to mount point - * @return config drive iso file path + * Created the path to ISO file relative to mount point. + * The config driver path will have the following formatt: {@link #CONFIGDRIVEDIR} + / + instanceName + / + {@link #CONFIGDRIVEFILENAME} + * @return config drive ISO file path */ - public static String createConfigDrivePath(final String instanceName) { + public static String createConfigDrivePath(String instanceName) { return ConfigDrive.CONFIGDRIVEDIR + "/" + instanceName + "/" + ConfigDrive.CONFIGDRIVEFILENAME; }
[No CFG could be retrieved]
create config drive path.
What do you think of `Creates the path to ...` instead of `Created`? Change `formatt` to `format`
@@ -177,7 +177,7 @@ public class TestSqlTaskExecution // // test body - assertEquals(taskStateMachine.getState(), TaskState.RUNNING); + assertEquals(taskStateMachine.getState(), RUNNING); switch (executionStrategy) { case UNGROUPED_EXECUTION:
[TestSqlTaskExecution->[TestingBuildOperatorFactory->[Pauser],OutputBufferConsumer->[abort->[abort]],BuildStates->[setNoNewLookups->[setNoNewLookups]],TestingScanOperatorFactory->[TestingScanOperator->[getOutput->[await,finish]],Pauser],TestingCrossJoinOperatorFactory->[TestingCrossJoinOperator->[getOutput->[isFinished]],Pauser]]]
Simple test for all cases where there is no need to create a task. create a new sql task execution with no sources. Add source for pipeline driver group. assert that partial result is produced check if there is a driver group and abort the task.
static import should be separate commit
@@ -45,7 +45,7 @@ def gdrive(make_tmp_dir): # NOTE: temporary workaround tmp_dir = make_tmp_dir("gdrive", dvc=True) - ret = GDrive(GDrive.get_url()) - tree = GDriveTree(tmp_dir.dvc, ret.config) + ret = GDrive(GDrive.get_url(), tmp_dir.dvc) + tree = ret.tree tree._gdrive_create_dir("root", tree.path_info.path) return ret
[gdrive->[get_url,should_test,GDrive],GDrive->[get_url->[_get_storagepath]]]
Get a node in the gdrive system.
Shame gdrive is so complex that we have to reulse GdriveTree in our test fixtures :slightly_frowning_face: But oh well...
@@ -52,6 +52,7 @@ public class ProOtherMoveOptions { private static Map<Territory, ProTerritory> createMaxMoveMap(final List<Map<Territory, ProTerritory>> moveMaps, final PlayerID player, final boolean isAttacker) { + final GameData data = ProData.getData(); final Map<Territory, ProTerritory> result = new HashMap<Territory, ProTerritory>(); final List<PlayerID> players = ProUtils.getOtherPlayersInTurnOrder(player);
[ProOtherMoveOptions->[createMaxMoveMap->[getMaxAmphibUnits,newArrayList,getOtherPlayersInTurnOrder,getMaxUnits,getOwner,isPlayersTurnFirst,get,estimateStrength,isWater,addAll,isEmpty,someMatch,containsKey,put,keySet],getAll->[get],toString->[toString],createMoveMaps->[get,add,containsKey,put,keySet],getMax->[get],createMoveMaps,createMaxMoveMap]]
Create a max move map. if t is a water unit or if t is a move or if t is a land.
Why is this set on line 57 but then only used on line 75?
@@ -42,11 +42,13 @@ namespace Pulumi public readonly struct ResourceTransformationResult { + public string Name { get; } public ResourceArgs Args { get; } public ResourceOptions Options { get; } - public ResourceTransformationResult(ResourceArgs args, ResourceOptions options) + public ResourceTransformationResult(string name, ResourceArgs args, ResourceOptions options) { + Name = name; Args = args; Options = options; }
[No CFG could be retrieved]
Replies the transformation result of a .
This is technically a breaking change. I'm fine with it here since .NET is in preview and this is not a heavily used API in .NET yet. But I'm curious if @mikhailshilkov or @CyrusNajmabadi have thoughts on how we will manage changes like this in the future? Should we add an overload of the constructor?
@@ -21,9 +21,11 @@ from .. import core, framework from ..framework import Program, default_main_program, \ default_startup_program, \ Variable, Parameter, grad_var_name +from details import * LOOKUP_TABLE_TYPE = "lookup_table" LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" +OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName() RPC_OP_ROLE_ATTR_NAME = op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName( ) RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC
[DistributeTranspiler->[_create_table_optimize_block->[_clone_var],_is_op_connected->[_append_inname_remove_beta],_append_pserver_ops->[_get_optimizer_input_shape,same_or_split_var,_orig_varname],get_startup_program->[_get_splited_name_and_shape->[same_or_split_var],_get_splited_name_and_shape],_create_ufind->[_is_op_connected,UnionFind,union],_orig_varname->[find],_get_optimize_pass->[_is_opt_op],transpile->[split_dense_variable,find_op_by_output_arg],_replace_lookup_table_op_with_prefetch->[delete_ops],get_pserver_program->[find,__append_optimize_op__,is_connected],get_trainer_program->[delete_ops,__str__],_get_lr_ops->[_is_op_connected,UnionFind,_is_opt_op,union,is_connected],_is_opt_op_on_pserver->[same_or_split_var]],UnionFind->[is_connected->[find],union->[find]],split_dense_variable->[VarBlock]]
Creates a data structure that keeps track of a set of elements partitioned into a number of This is a convenience method for creating a DisjointSetDataStructure from a list of element.
Could you help to delete the redundant codes? > RPC_OP_ROLE_ATTR_NAME = op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
@@ -23,6 +23,7 @@ namespace Content.Shared.Physics GhostImpassable = 1 << 5, // 32 Things impassible by ghosts/observers, ie blessed tiles or forcefields Underplating = 1 << 6, // 64 Things that are under plating Passable = 1 << 7, // 128 Things that are passable + SingularityImpassable = 1 << 8, // 256 Objects which are specifically impassable by the singularity MapGrid = MapGridHelpers.CollisionGroup, // Map grids, like shuttles. This is the actual grid itself, not the walls or other entities connected to the grid. MobMask = Impassable | MobImpassable | VaultImpassable | SmallImpassable,
[CollisionGroup]
The opaque values are the mask of possible possible objects.
This is silly because it just clutters collision layers and doesn't account for the existing PreventCollideEvent that actually checks for the singulo level.
@@ -16,6 +16,9 @@ package cluster import ( "bytes" + "github.com/pkg/errors" + "go.uber.org/zap" + "github.com/gogo/protobuf/proto" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb"
[HandleReportSplit->[checkSplitRegion],HandleBatchReportSplit->[checkSplitRegions],HandleAskBatchSplit->[ValidRequestRegion]]
HandleRegionHeartbeat processes a region heartbeat from the client. HandleAskSplit handles the ask - split request.
rm empty line
@@ -174,6 +174,13 @@ public class BloomDimFilter implements DimFilter return extractionFn; } + @JsonInclude(JsonInclude.Include.NON_NULL) + @JsonProperty + public FilterTuning getFilterTuning() + { + return filterTuning; + } + @Override public String toString() {
[BloomDimFilter->[hashCode->[hashCode],toString->[toString],equals->[equals]]]
Returns the extraction function or the string representation of this .
This method should be `@Nullable`
@@ -39,7 +39,7 @@ namespace Content.Client.Singularity { SingularityQuery(args.Viewport.Eye); - var viewportWB = args.WorldBounds; + var viewportWB = _eyeManager.GetWorldViewport(); // Has to be correctly handled because of the way intensity/falloff transform works so just do it. _shader?.SetParameter("renderScale", args.Viewport.RenderScale); foreach (SingularityShaderInstance instance in _singularities.Values)
[SingularityOverlay->[SingularityQuery->[Clear,CurrentMapCoords,TryGetEntity,Contains,Position,Remove,Keys,Intensity,singuloEntity,_entityManager,Uid,Owner,SinguloQualifies,Add,Falloff],Draw->[White,CurrentMapCoords,WorldToLocal,Eye,Values,UseShader,SingularityQuery,RenderScale,Y,Intensity,DrawRect,WorldBounds,WorldHandle,Falloff],OverwriteTargetFrameBuffer->[Count],SinguloQualifies->[FromMap,MapId,MapID,InRange,ParentUid],WorldSpace,InjectDependencies,Duplicate]]
Draw the overlay.
Seems suss with multi-viewport but not like singulo works with it anyway.
@@ -156,7 +156,6 @@ func NewConfig() *Config { fs.StringVar(&cfg.Log.Level, "L", "", "log level: debug, info, warn, error, fatal (default 'info')") fs.StringVar(&cfg.Log.File.Filename, "log-file", "", "log file path") - fs.BoolVar(&cfg.Log.File.LogRotate, "log-rotate", true, "rotate log") fs.StringVar(&cfg.Security.CAPath, "cacert", "", "Path of file that contains list of trusted TLS CAs") fs.StringVar(&cfg.Security.CertPath, "cert", "", "Path of file that contains X509 certificate in PEM format")
[MigrateDeprecatedFlags->[migrateConfigurationMap],Parse->[Parse],IsDefined->[IsDefined],Adjust->[CheckUndecoded,Parse,IsDefined,Validate,Child],parseDeprecatedFlag->[IsDefined],adjustLog->[IsDefined],adjust->[Validate,IsDefined],Parse]
Config file variables Configuration for one - member cluster.
not supported now?
@@ -120,8 +120,8 @@ public class MetadataNegativeTestCase extends MetadataExtensionFunctionalTestCas @Test public void failToGetMetadataFromNonExistingSource() throws IOException { - final SourceId notExistingSource = new SourceId(FLOW_WITHOUT_SOURCE); - final MetadataResult<MetadataKeysContainer> result = metadataService.getMetadataKeys(notExistingSource); + location = builder().globalName(FLOW_WITHOUT_SOURCE).addPart("source").build(); + final MetadataResult<MetadataKeysContainer> result = metadataService.getMetadataKeys(location); assertFailureResult(result, 1); assertMetadataFailure(result.getFailures().get(0), SOURCE_DOES_NOT_EXIST,
[MetadataNegativeTestCase->[processorIsNotMetadataProvider->[ProcessorId,getName,getComponentDynamicMetadata,get,assertMetadataFailure],processorDoesNotExist->[assertFailureResult,ProcessorId,getName,getComponentDynamicMetadata,get,format,assertMetadataFailure],failWithDynamicConfigurationWhenRetrievingMetadata->[assertFailureResult,ProcessorId,getName,build,getComponentDynamicMetadata,get,assertMetadataFailure],failToGetMetadataFromDynamicConfig->[getName,get,getMetadataKeys,ConfigurationId,format,assertMetadataFailure],getKeysWithRuntimeException->[assertFailureResult,ProcessorId,getName,get,getMetadataKeys,assertMetadataFailure],failToGetMetadataFromNonExistingConfig->[assertFailureResult,getName,get,getMetadataKeys,ConfigurationId,format,assertMetadataFailure],failToGetMetadataWithMissingMetadataKeyLevels->[ProcessorId,build,getComponentDynamicMetadata,get,assumeThat,is,assertMetadataFailure],getOperationMetadataWithRuntimeException->[assertFailureResult,ProcessorId,getComponentDynamicMetadata,get,assertMetadataFailure],flowDoesNotExist->[assertFailureResult,ProcessorId,getName,getComponentDynamicMetadata,get,format,assertMetadataFailure],processorIsNotEntityMetadataProvider->[assertFailureResult,ProcessorId,getName,get,getEntityMetadata,assertMetadataFailure],failToGetMetadataFromNonExistingSource->[assertFailureResult,getName,get,getMetadataKeys,SourceId,assertMetadataFailure],fetchMissingElementFromCache->[assertMetadataFailure,getComponentDynamicMetadata,get,ProcessorId],getOperationMetadataWithResolvingException->[assertFailureResult,ProcessorId,getFailures,getComponentDynamicMetadata,get,is,isSuccess,assertMetadataFailure,assertThat]]]
Fail to get metadata from non existing source.
Maybe the builder should have an `addSource()` method
@@ -1214,6 +1214,12 @@ ATTACHMENT_SITE_URL = PROTOCOL + ATTACHMENT_HOST _PROD_ATTACHMENT_ORIGIN = 'demos-origin.mdn.mozit.cloud' ATTACHMENT_ORIGIN = config('ATTACHMENT_ORIGIN', default=_PROD_ATTACHMENT_ORIGIN) +BETA_HOST = config('BETA_HOST', default='beta.' + DOMAIN) +BETA_ORIGIN = config('BETA_ORIGIN', default='beta.mdn.mozit.cloud') +BETA_SITE_URL = PROTOCOL + BETA_HOST +WIKI_HOST = config('WIKI_HOST', default='wiki.' + DOMAIN) +WIKI_SITE_URL = PROTOCOL + WIKI_HOST + # This should never be false for the production and stage deployments. ENABLE_RESTRICTIONS_BY_HOST = config( 'ENABLE_RESTRICTIONS_BY_HOST',
[pipeline_one_scss->[pipeline_scss],_get_locales->[path],parse_iframe_url,path,_get_locales,pipeline_scss]
Creates a list of all the possible values for a single n - node object. Allow robots no path restrictions.
I'm surprised there isn't a WIKI_ORIGIN here (just because I expected it to parallel what is needed for the beta case, not because I understand)
@@ -165,6 +165,14 @@ class CryptographyClient(KeyVaultClientBase): :dedent: 8 """ self._initialize(**kwargs) + iv = kwargs.pop("iv", None) + tag = kwargs.pop("authentication_tag", None) + aad = kwargs.pop("additional_authenticated_data", None) + try: + _validate_arguments(operation="decrypt", algorithm=algorithm, iv=iv, tag=tag, aad=aad) + except ValueError: + raise + if self._local_provider.supports(KeyOperation.decrypt, algorithm): try: return self._local_provider.decrypt(algorithm, ciphertext)
[CryptographyClient->[unwrap_key->[_initialize,unwrap_key],decrypt->[_initialize,decrypt],verify->[_initialize,verify],sign->[_initialize,sign],encrypt->[_initialize,encrypt],wrap_key->[_initialize,wrap_key]]]
Decrypts a single block of encrypted data using the client s key.
If you want to propagate errors from the method, you can just call it. This try/except doesn't do anything.
@@ -267,6 +267,13 @@ public class JobContext { } } + @Subscribe + public void handleNewOutputTaskStateEvent(NewOutputTaskStateEvent newOutputTaskStateEvent) { + this.jobState.addTaskState(newOutputTaskStateEvent.getTaskState()); + // Update the job execution history store upon every task completion + storeJobExecutionInfo(); + } + /** * Finalize the {@link JobState} before committing the job. */
[JobContext->[setTaskFailureException->[setTaskFailureException],persistDatasetState->[persistDatasetState]]]
This method stores the job execution information in the job history store.
Calling this for every `newOutputTaskStateEvent`, what's the overhead? Is it possible to call this method after a certain number of `newOutputTaskStateEvent` have been received?
@@ -204,7 +204,8 @@ LLDSPEC bool_t gdisp_lld_init(GDisplay *g) { uint8_t* src = PRIV(g)->frame_buffer; for (int y=0;y<GDISP_SCREEN_HEIGHT;y++) { for (int x=0;x<GDISP_SCREEN_WIDTH;x++) { - PRIV(g)->write_buffer[get_led_address(g, x, y)]=CIE1931_CURVE[*src]; + uint8_t val = (uint16_t)*src * g->g.Backlight / 100; + PRIV(g)->write_buffer[get_led_address(g, x, y)]=CIE1931_CURVE[val]; ++src; } }
[No CFG could be retrieved]
region 0 - length LEDs on all 8 - length pages and enable the mask - 8 region LED buffer functions.
What you could try here is to disable the brightness control, by using the `*src` value directly, like the old code.
@@ -69,13 +69,14 @@ class RangeResolver(object): def _resolve_local(self, search_ref, version_range): local_found = search_recipes(self._client_cache, search_ref) if local_found: - resolved_version = self._resolve_version(version_range, local_found) - if resolved_version: - return resolved_version + return self._resolve_version(version_range, local_found) def _resolve_remote(self, search_ref, version_range, remote_name): # We should use ignorecase=False, we want the exact case! - remote_found = self._remote_search.search_remotes(search_ref, remote_name) + remote_found = self._cached_remote_found.get(search_ref) + if remote_found is None: + remote_found = self._remote_search.search_remotes(search_ref, remote_name) + self._cached_remote_found[search_ref] = remote_found if remote_found: return self._resolve_version(version_range, remote_found)
[RangeResolver->[_resolve_local->[_resolve_version,search_recipes],resolve->[ConanException,_resolve_local,str,ConanFileReference,success,_resolve_version,_resolve_remote],_resolve_remote->[search_remotes,_resolve_version],_resolve_version->[get,satisfying]],satisfying->[max_satisfying,get,str,replace,warn,SemVer]]
Resolve a version in the local cache.
Two questions: - Is it not needed to store the remote in the cache? - Is it not recommended to store the "None" or something when it is not found to not search again?
@@ -648,6 +648,9 @@ func sendPayment(mctx libkb.MetaContext, walletState *WalletState, sendArg SendP // submit the transaction rres, err := walletState.SubmitPayment(mctx.Ctx(), post) if err != nil { + if rerr := walletState.RemovePendingTx(mctx.Ctx(), senderAccountID, stellar1.TransactionID(txID)); rerr != nil { + mctx.CDebugf("error calling RemovePendingTx: %s", rerr) + } return res, err } mctx.CDebugf("sent payment (direct) kbTxID:%v txID:%v pending:%v", rres.KeybaseID, rres.StellarID, rres.Pending)
[StatusCode,GetMeUV,IsNativeXLM,NewPaymentID,BackgroundWithLogTags,RunEngine2,LocalSigchainGuard,StartStandaloneChat,WithUID,Post,New,SendMsgByName,AccountID,PaymentXLMTransaction,Details,Time,IsAppStatusErrorCode,NewMetaContext,IsResolutionNotFoundError,RuneCountInString,Lookup,RelocateTransaction,RecipientInput,Relay,informAcceptedDisclaimer,Split,WithCtx,WithPublicKeyOptional,Network,DecryptB64,SendMsgByNameNonblock,DeviceID,Result,Sync,IsValid,GetStellar,SubmitPayment,WithDesc,NewSeedStr,Verbose,GetCurrencyLocal,PaymentDetails,FetchAccountBundle,CreateAccountXLMTransaction,LookupByAddress,SeedStr,ShouldCreate,SubmitRelayPayment,SubmitRelayClaim,Stellar,Now,WriteByte,AddPendingTx,InformHasWallet,RecentPayments,Add,CreateNewAccount,ToSocialAssertion,Direct,OutsideCurrencyCode,GetNormalizedName,AssertionParse,ExchangeRate,AdvanceAccounts,GetUPAKLoader,AddAccount,Since,AddressStr,Refresh,ParseStellarSecretKey,AssetNative,Create,NewLoadUserByNameArg,NewLoadUserByUIDArg,ParseAmount,Typ,NewPerUserKeyUpgrade,GetUsernameAndUserVersionIfValid,UpdateUnreadCount,NewInitial,SecretKey,KickAutoClaimRunner,RemovePendingTx,Load,GetPerUserKeyring,NewAddressStr,AdvanceBundle,FetchSecretlessBundle,LookupUsername,GetServerDefinitions,WithForcePoll,CurrentGeneration,Eq,PrimaryAccount,ConvertOutsideToXLM,NewLoadUserArgWithMetaContext,CDebugf,SecureNoLogString,Balances,GetAccountDisplayCurrency,CTraceTimed,GetKey,WithNetContext,ActiveDevice,NewNormalizedUsername,Equal,ParseStellarAccountID,PostWithChainlink,Sprintf,LoadV2,BuildPaymentID,CachedHasWallet,String,StellarSimplifyAmount,EncodeToString,ToUserVersion,NewChatConversationID,NewMessageBodyWithSendpayment,MarkAsRead,SetDefaultDisplayCurrency,SubmitRequest,StringFromStellarAmount,CWarningf,Slice,Username,StellarAccountID,FloatString,Error,UID,Seconds,TransactionID,Ctx,FetchBundleWithGens,LoadUser,IsNil,Errorf,GetName,NewResolveThenIdentify2,Clock,Debug,SliceStable,NewMessageBodyWithRequestpayment,Unix,ConvertXLMToOutside,Join,hasAcceptedDisclaimer,Contains,IsAvailable,CurrentUsername,G,Background,RandBytes,ParseStellarAmount,LookupUnverified,ServerTimeboundsRecommendation]
sequence number is the sequence number of the last transaction that was sent. Send a single message from the user to the chat channel.
this is the fix 1 of 2.
@@ -13,6 +13,7 @@ #include "DdsDcpsCoreC.h" #include "Marked_Default_Qos.h" #include "dds/DCPS/SafetyProfileStreams.h" +#include "DCPS_Utils.h" #ifndef DDS_HAS_MINIMUM_BIT #include "DdsDcpsCoreTypeSupportImpl.h"
[No CFG could be retrieved]
Creates a list of bit topics from a given participant. Creates bit topic and bit topic for given participant.
This is the only line changed in this file. Is it needed?
@@ -67,7 +67,7 @@ def build_wheels(builder, pep517_requirements, legacy_requirements, session): # install for those. builder.build( legacy_requirements, - session=session, autobuilding=True + autobuilding=True, ) return build_failures
[InstallCommand->[run->[build_wheels]],build_wheels->[is_wheel_installed]]
Build wheels for requirements depending on whether wheel is installed.
nit: Add trailing commas while we're changing this?
@@ -463,6 +463,14 @@ class Asset < ApplicationRecord save end + def editable?(user) + objects = %w(step result) + my_module = send(objects.find { |object| send(object) }).my_module + Canaid::PermissionsHolder.instance.eval(:manage_experiment, user, my_module.experiment) && + !locked? && + %r{^image/#{Regexp.union(Constants::WHITELISTED_IMAGE_TYPES_EDITABLE)}} === file.content_type + end + protected # Checks if attachments is an image (in post processing imagemagick will
[Asset->[extract_asset_text->[is_stored_on_s3?],url->[is_stored_on_s3?,url],post_process_file->[text?],new_empty->[file_empty],presigned_url->[is_stored_on_s3?,presigned_url],open->[is_stored_on_s3?,open]]]
Updates the contents of the file with a new file object.
Style/CaseEquality: Avoid the use of the case equality operator ===.<br>Metrics/LineLength: Line is too long. [99/80]
@@ -511,7 +511,7 @@ void Config_Postprocess() { // Mesh (Manual) Bed Leveling // - bool leveling_is_on; + uint8_t leveling_is_on; uint8_t mesh_num_x, mesh_num_y; EEPROM_READ(leveling_is_on); EEPROM_READ(dummy);
[No CFG could be retrieved]
These are the default values for the parameters. region Mesh Leveling.
This instance will conceal the previous instance on line 475.
@@ -137,7 +137,7 @@ class Calibrator(object): break if ops_type[ - search_end_index] not in Calibrator.const_sign_op_type and ops_type[ + search_end_index] not in Calibrator.supported_int8_memory_op_type and ops_type[ search_end_index] != 'conv2d': return Calibrator.s8_max
[Calibrator->[__update_program->[__update_int8_output_var_op_index_dict,__get_max_range_by_var_name],__get_optimal_scaling_factor->[__expand_quantized_bins,__safe_entropy],__get_quantize_dequantize_combination->[__check_force_fp32_attr_by_output_var,__get_op_index_by_output_var,__get_op_index_by_input_var,__check_var_source_dt,__check_op_type_with_specified_var_as_input],__sampling->[_is_close,__get_max_range_by_var_name]]]
This method returns the maximum value of the specified variable in the current block. Returns the maximum sequence number found in the sequence table.
This check would work the same if it was written as `if ops_type[search_end_index] not in Calibrator.supported_int8_op_type:`. Is there a reason why it's not written like that?
@@ -39,6 +39,7 @@ class Hipsparse(CMakePackage): patch('e79985dccde22d826aceb3badfc643a3227979d2.patch', when='@3.5.0') patch('530047af4a0f437dafc02f76b3a17e3b1536c7ec.patch', when='@3.5.0') + patch('hipsparse-remove-opt-rocm-reference.patch', when='@4.2.0:') def cmake_args(self): return [
[Hipsparse->[setup_build_environment->[set],cmake_args->[define],depends_on,version,patch]]
Return a list of cmake - cXX - standard build - clients - sampled build -.
Can you share what system version got picked up? Also here, it should pick up spack's hip.
@@ -45,6 +45,7 @@ func dataSourceAwsRedshiftServiceAccountRead(d *schema.ResourceData, meta interf if accid, ok := redshiftServiceAccountPerRegionMap[region]; ok { d.SetId(accid) + d.Set("arn", fmt.Sprintf("arn:%s:iam::%s:user/logs", meta.(*AWSClient).partition, accid)) return nil }
[SetId,GetOk,Errorf]
region - > .
I don't think we should use fmt.Sprintf here - I think we should use the ARN package as part of AWS
@@ -172,7 +172,7 @@ def perform_registration(acme, config): class Client(object): - """ACME protocol client. + """Certbot's client. :ivar .IConfig config: Client configuration. :ivar .Account account: Account registered with `register`.
[sample_user_agent->[DummyConfig,determine_user_agent],perform_registration->[perform_registration,register],Client->[obtain_and_enroll_certificate->[obtain_certificate],obtain_certificate->[obtain_certificate_from_csr],__init__->[acme_from_config_key]],view_config_changes->[view_config_changes],register->[acme_from_config_key]]
A class to manage a new registration resource. Obtain the certificate from the config.
This makes more sense right?
@@ -94,7 +94,6 @@ func checkVersion() { {"gopkg.in/ini.v1", ini.Version, "1.8.4"}, {"gopkg.in/macaron.v1", macaron.Version, "1.1.7"}, {"github.com/go-gitea/git", git.Version, "0.4.1"}, - {"github.com/gogits/go-gogs-client", gogs.Version, "0.12.1"}, } for _, c := range checkers { if !version.Compare(c.Version(), c.Expected, ">=") {
[Handle,FileMode,GitHookService,Gziper,MustAsset,NewFuncMap,ServeData,Close,RepoRef,SetURLPrefix,MultipartForm,Static,Redirect,IsSet,Set,GetBranchCommit,Info,ListenAndServeTLS,Error,CanEnableEditor,ReadFile,Compare,Post,New,NotFound,Captchaer,Route,ListenUnix,Logger,I18n,CommitsCount,Any,Join,Toggle,RequireRepoAdmin,ServeFileContent,Sessioner,RequireRepoWriter,Head,Recovery,Remove,AssetDir,RegisterRoutes,ListenAndServe,Get,RepoAssignment,Serve,GetAttachmentByUUID,InitMailRender,Csrfer,Header,Renderer,Use,Cacher,GlobalInit,IsErrAttachmentNotExist,LocalPath,Version,Contexter,Sprintf,Toolboxer,OrgAssignment,Chmod,String,Fatal,Combo,Open,Replace,Group,Params]
checkVersion checks the binary version of the package. newMacaron initializes a new Macaron instance.
Why drop the check?
@@ -205,7 +205,12 @@ async def compute_java_third_party_artifact_mapping( # Default to exposing the `group` name as a package. packages = (f"{coord.group}.**",) for package in packages: - insert(mapping, package, addresses) + insert(mapping, package, addresses, False) + + # Mark types that have strong first-party declarations as first-party + for tgt in all_jvm_type_providing_tgts: + for provides_types in tgt[JvmProvidesTypesField].value or []: + insert(mapping, provides_types, [], True) return ThirdPartyPackageToArtifactMapping(FrozenTrieNode(mapping))
[find_artifact_mapping->[find_child],find_all_jvm_artifact_targets->[AllJvmArtifactTargets],MutableTrieNode->[ensure_child->[MutableTrieNode]],UnversionedCoordinate->[from_coord_str->[UnversionedCoordinate]],find_available_third_party_artifacts->[UnversionedCoordinate,AvailableThirdPartyArtifacts],compute_java_third_party_artifact_mapping->[insert->[ensure_child],MutableTrieNode,ThirdPartyPackageToArtifactMapping,from_coord_str,insert,FrozenTrieNode],FrozenTrieNode->[__init__->[FrozenTrieNode]]]
Implements the mapping logic from the jvm_artifact and java - infer help. This function checks if there is a node in the mapping tree that matches the given .
Naming nit: Since the loop traverses a list, `provides_type` is more accurate of a name.
@@ -1156,7 +1156,9 @@ describes.realWin('CustomElement', {amp: true}, (env) => { expect(element2.sizerElement.style.paddingTop).to.equal('1%'); }); - it('should rediscover sizer to apply heights in SSR', () => { + // TODO BEFORE MERGE: is this test still valid? Why should this ever be "rediscovered". + // This is broken because we always call applySizesAndMediaQuery within connectedCallback now. + it.skip('should rediscover sizer to apply heights in SSR', () => { element1.setAttribute('i-amphtml-layout', 'responsive'); element1.setAttribute('layout', 'responsive'); element1.setAttribute('width', '200px');
[No CFG could be retrieved]
Tests that the element is built and that it is ready to be built. should NOT enqueue actions when in template.
@dvoytenko: do you know in which cases we are meant to rediscover a sizer / if this is still a valid need? This test fails with the new InOb path
@@ -65,9 +65,9 @@ SharedVideoThumb.prototype.createContainer = function(spanId) { }; /** - * Sets the display name for the thumb. + * Updates the display name component for the given video span id. */ -SharedVideoThumb.prototype.setDisplayName = function(displayName) { +SharedVideoThumb.prototype.setDisplayName = function() { if (!this.container) { logger.warn(`Unable to set displayName - ${this.videoSpanId } does not exist`);
[No CFG could be retrieved]
Adds a display name to the thumb.
maybe rename it to `updateDisplayName` then?
@@ -1435,6 +1435,16 @@ public class TestHoodieDeltaStreamer extends TestHoodieDeltaStreamerBase { testParquetDFSSource(true, Collections.singletonList(TripsWithDistanceTransformer.class.getName())); } + @Test + public void testORCDFSSourceWithoutSchemaProviderAndNoTransformer() throws Exception { + testORCDFSSource(false, null); + } + + @Test + public void testORCDFSSourceWithSchemaFilesAndTransformer() throws Exception { + testORCDFSSource(true, Collections.singletonList(TripsWithDistanceTransformer.class.getName())); + } + private void prepareCsvDFSSource( boolean hasHeader, char sep, boolean useSchemaProvider, boolean hasTransformer) throws IOException { String sourceRoot = dfsBasePath + "/csvFiles";
[TestHoodieDeltaStreamer->[teardown->[teardown],testJdbcSourceIncrementalFetchInContinuousMode->[assertAtleastNCompactionCommits,deltaStreamerTestRunner,makeConfig,assertRecordCount],testKafkaTimestampType->[prepareJsonKafkaDFSSource,prepareJsonKafkaDFSFiles,makeConfig,assertRecordCount],testParquetDFSSourceWithoutSchemaProviderAndTransformer->[testParquetDFSSource],testCsvDFSSourceWithHeaderWithoutSchemaProviderAndNoTransformer->[testCsvDFSSource],testInlineClustering->[assertAtLeastNReplaceCommits,deltaStreamerTestRunner,assertAtLeastNCommits,makeConfig],testCsvDFSSourceNoHeaderWithoutSchemaProviderAndNoTransformer->[testCsvDFSSource],testDeltaStreamerWithSpecifiedOperation->[assertDistanceCount,makeConfig,assertCommitMetadata,assertRecordCount],testCsvDFSSourceNoHeaderWithSchemaProviderAndNoTransformer->[testCsvDFSSource],testFilterDupes->[makeConfig,countsPerCommit,assertCommitMetadata,makeDropAllConfig,assertRecordCount],testCsvDFSSource->[makeConfig,prepareCsvDFSSource,assertRecordCount],testCsvDFSSourceNoHeaderWithSchemaProviderAndTransformer->[testCsvDFSSource],testParquetSourceToKafkaSourceLatestAutoResetValue->[testDeltaStreamerTransitionFromParquetToKafkaSource],testCsvDFSSourceWithHeaderAndSepWithoutSchemaProviderAndWithTransformer->[testCsvDFSSource],testAsyncClusteringServiceWithCompaction->[deltaStreamerTestRunner,assertAtLeastNCommits,makeConfig,getAsyncServicesConfigs,assertAtLeastNReplaceCommits,assertAtleastNCompactionCommits],testDeltaStreamerTransitionFromParquetToKafkaSource->[prepareJsonKafkaDFSSource,makeConfig,prepareJsonKafkaDFSFiles,prepareParquetDFSSource,assertRecordCount],testTableCreation->[makeConfig],testParquetSourceToKafkaSourceEarliestAutoResetValue->[testDeltaStreamerTransitionFromParquetToKafkaSource],testParquetDFSSource->[prepareParquetDFSSource,makeConfig,assertRecordCount],testParquetDFSSourceWithSourceSchemaFileAndNoTransformer->[testParquetDFSSource],testBulkInsertsAndUpsertsWithBootstrap->[makeConfig,countsPerCommit,assertCommitMetadata,assertDistanceCount,assertRecordCount],TripsWithDistanceTransformer->[apply->[DistanceUDF]],testPropsWithInvalidKeyGenerator->[makeConfig],testLatestCheckpointCarryOverWithMultipleWriters->[makeConfig,assertAtleastNDeltaCommits,testLatestCheckpointCarryOverWithMultipleWriters,prepareMultiWriterProps,assertAtleastNCompactionCommits,assertDistanceCount,assertRecordCount],TestHelpers->[makeConfig->[makeConfig]],runJobsInParallel->[assertAtleastNDeltaCommitsAfterCommit,assertAtleastNCompactionCommitsAfterCommit,assertDistanceCount,assertRecordCount],prepareParquetDFSSource->[prepareParquetDFSSource],testParquetDFSSourceWithSchemaFilesAndTransformer->[testParquetDFSSource],testUpsertsContinuousModeWithMultipleWriters->[makeConfig,assertAtleastNDeltaCommits,prepareMultiWriterProps,assertAtleastNCompactionCommits,assertDistanceCount,assertRecordCount],testHoodieAsyncClusteringJobWithScheduleAndExecute->[deltaStreamerTestRunner,initialHoodieClusteringJob,assertAtLeastNCommits,initialHoodieDeltaStreamer,assertAtLeastNReplaceRequests,assertAtLeastNReplaceCommits,assertNoReplaceCommits],testCsvDFSSourceWithHeaderAndSepWithoutSchemaProviderAndNoTransformer->[testCsvDFSSource],testCsvDFSSourceWithHeaderAndSepWithSchemaProviderAndTransformer->[testCsvDFSSource],testParquetDFSSourceWithoutSchemaProviderAndNoTransformer->[testParquetDFSSource],testAsyncClusteringService->[deltaStreamerTestRunner,assertAtLeastNCommits,makeConfig,getAsyncServicesConfigs,assertAtLeastNReplaceCommits],provideValidCliArgs->[getBaseConfig],cleanupClass->[cleanupClass],testNullSchemaProvider->[makeConfig],testHoodieAsyncClusteringJob->[deltaStreamerTestRunner,initialHoodieClusteringJob,buildHoodieClusteringUtilConfig,assertAtLeastNCommits,initialHoodieDeltaStreamer,assertAtLeastNReplaceCommits],deltaStreamerTestRunner->[deltaStreamerTestRunner,waitTillCondition],testBulkInsertsAndUpsertsWithSQLBasedTransformerFor2StepPipeline->[makeConfig,countsPerCommit,assertCommitMetadata,assertDistanceCountWithExactValue,assertDistanceCount,makeConfigForHudiIncrSrc,assertRecordCount],buildHoodieClusteringUtilConfig->[buildHoodieClusteringUtilConfig],testPayloadClassUpdateWithCOWTable->[makeConfig,assertRecordCount],testPayloadClassUpdate->[makeConfig,assertRecordCount],testJsonKafkaDFSSource->[prepareJsonKafkaDFSSource,prepareJsonKafkaDFSFiles,makeConfig,assertRecordCount],testCsvDFSSourceNoHeaderWithoutSchemaProviderAndWithTransformer->[testCsvDFSSource],setup->[setup],testKafkaConnectCheckpointProvider->[makeDropAllConfig],testUpsertsContinuousMode->[makeConfig,assertAtleastNDeltaCommits,assertAtleastNCompactionCommits,assertDistanceCount,assertRecordCount],testCsvDFSSourceWithHeaderAndSepWithSchemaProviderAndNoTransformer->[testCsvDFSSource]]]
This method is used to prepare a csv file with schema files and transformer. Generate records for missing records.
can we use `@ParameterizedTest` here? with `@MethodSource` returning `Stream<Arguments>` to make it cleaner
@@ -45,7 +45,12 @@ export default class Video extends Component<*> { * values: 0 for the remote video(s) which appear in the background, and * 1 for the local video(s) which appear above the remote video(s). */ - zOrder: PropTypes.number + zOrder: PropTypes.number, + + /** + * Indicates if zooming (pinch & zoom and/or drag) is enabled or not. + */ + zoomEnabled: PropTypes.bool }; /**
[No CFG could be retrieved]
A component that is invoked when a video is mounted. The element to render.
maybe instead of making this feature specific we could just expose the 'objectFit' property and pass it over to the RTCView ? make the decision on the upper level
@@ -671,6 +671,9 @@ namespace Microsoft.Xna.Framework.Graphics // Get Direct3D 11.1 context _d3dContext = _d3dDevice.ImmediateContext.QueryInterface<SharpDX.Direct3D11.DeviceContext>(); + + // Create a new instance of GraphicsDebug because we support it on Windows platforms. + _graphicsDebug = new GraphicsDebug(this); } internal void SetHardwareFullscreen()
[No CFG could be retrieved]
The base implementation of the functions that are used to create the device. This function clears the render targets and depth - stencil views.
This should be moved to the common non-platform specific code if we really plan to support it across all platforms. It is better than the developer having to check for `null` on `GraphicsDebug`.
@@ -154,9 +154,11 @@ public class MockitoAnnotatedObjectsShouldBeInitializedCheck extends IssuableSub for (VariableTree field : collected) { if (field.type().symbolType().is("org.mockito.junit.MockitoRule")) { ExpressionTree initializer = field.initializer(); + Symbol symbol = field.symbol(); if (initializer != null && initializer.is(Tree.Kind.METHOD_INVOCATION) && - MOCKITO_JUNIT_RULE.matches((MethodInvocationTree) initializer) && - field.symbol().metadata().isAnnotatedWith(RULE_ANNOTATION)) { + symbol.metadata().isAnnotatedWith(RULE_ANNOTATION) && + symbol.isPublic() && + isInitializedWithRule((MethodInvocationTree) initializer)) { return true; } }
[MockitoAnnotatedObjectsShouldBeInitializedCheck->[isMetaAnnotated->[isMetaAnnotated],leaveFile->[leaveFile],mocksAreProperlyInitialized->[hasAnnotation]]]
Checks if a mockito. junit. MockitoRule is invoked.
if it's not `public`, what does happen? The mock is not initialized, or the test crashes? Doesn't it depends of the version of JUnit? JUnit5 relaxed visibility constraints. Now: * If the unit test crashes, I would simply drop the "isPublic" test. It will anyway crash. * if the unit test stays silent and it seems it works while the mock is indeed not initialized, I would keep the case, but we need to be more explicit about the issue message. We basically **know** that there is an issue if it's not public, so we should report it correctly (what about a secondary?)
@@ -47,13 +47,16 @@ public class TimestampParser public DateTime apply(String input) { Preconditions.checkArgument(input != null && !input.isEmpty(), "null timestamp"); + String inputWithoutTimeZone = ParserUtils.removeTimeZone(input); + DateTimeZone timeZone = ParserUtils.extractTimeZone(input); + for (int i = 0; i < input.length(); i++) { if (input.charAt(i) < '0' || input.charAt(i) > '9') { - return parser.parseDateTime(ParserUtils.stripQuotes(input)); + return new DateTime(parser.parseDateTime(ParserUtils.stripQuotes(inputWithoutTimeZone)), timeZone); } } - return DateTimes.utc(Long.parseLong(input)); + return new DateTime(Long.parseLong(inputWithoutTimeZone), timeZone); } }; } else if (format.equalsIgnoreCase("iso")) {
[TimestampParser->[createObjectTimestampParser->[apply->[apply],createNumericTimestampParser,createTimestampParser],createTimestampParser->[apply->[apply]]]]
Creates a parser which parses a timestamp according to the specified format. returns a function that converts a string into a date - time.
I think the above two methods are reasonable to call only inside this loop, inside the condition `(input.charAt(i) < '0' || input.charAt(i) > '9')`. Because I don't think we expect input like `1234567890 UTC`. Also those methods could be joined together and optimized by checking counts of `' '` (space) occurrences and using `indexOf()` and `substring()`, rather than unconditionally splitting using regex string, that incurs creation of `Pattern` and `Matcher` objects, and then string concatenation again.
@@ -241,6 +241,7 @@ class SpringMuleContextServiceConfigurator { .put(OBJECT_SCHEDULER_BASE_CONFIG, getBeanDefinition(SchedulerBaseConfigFactory.class)) .put(OBJECT_CLUSTER_SERVICE, getBeanDefinition(DefaultClusterService.class)) .put(LAZY_COMPONENT_INITIALIZER_SERVICE_KEY, getBeanDefinition(NoOpLazyComponentInitializer.class)) + .put(METADATA_CACHE_MANAGER_KEY, getBeanDefinition(DefaultPersistentMetadataCacheManager.class)) .build(); private final SpringConfigurationComponentLocator componentLocator;
[SpringMuleContextServiceConfigurator->[absorbOriginalRegistry->[registerBeanDefinition],getConstantObjectBeanDefinition->[getBeanDefinition],createLocalObjectStoreBeanDefinitions->[getCustomServiceBeanDefinition,registerBeanDefinition],createLocalLockFactoryBeanDefinitions->[getCustomServiceBeanDefinition,registerBeanDefinition],createQueueManagerBeanDefinitions->[getCustomServiceBeanDefinition,registerBeanDefinition],registerBeanDefinition->[registerBeanDefinition],getFixedTypeConstantObjectBeanDefinition->[getBeanDefinition],getPrimaryBeanDefinition->[getBeanDefinition],getBeanDefinition->[getBeanDefinition]]]
This method is invoked by the MuleContextServiceConfigurator. It is called by the M.
is this only to be able to inject things into instances? cant you use the muleContext's injector instead?
@@ -198,6 +198,14 @@ EVENTS = { Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.), }, + EventName.startupWhitePanda: { + ET.PERMANENT: Alert( + "WARNING: White panda is deprecated", + "Upgrade to comma two or black panda", + AlertStatus.userPrompt, AlertSize.mid, + Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.), + }, + EventName.startupMaster: { ET.PERMANENT: Alert( "WARNING: This branch is not tested",
[calibration_incomplete_alert->[Alert],below_steer_speed_alert->[Alert],NoEntryAlert,EngagementAlert,ImmediateDisableAlert,SoftDisableAlert,Alert]
Create a Alert object that is not in the list of alerts. A list of events that fire when a specific unsupported car is detected.
The other alert capitalizes panda
@@ -76,10 +76,9 @@ public final class ParameterGroupArgumentResolver<T> implements ArgumentResolver T group = type.newInstance(); for (Field parameterField : parameterFields) { - Object value = operationContext.getParameter(getAlias(parameterField)); - if (value != null) + if (operationContext.hasParameter(getAlias(parameterField))) { - parameterField.set(group, value); + parameterField.set(group, operationContext.getParameter(getAlias(parameterField))); } }
[ParameterGroupArgumentResolver->[resolve->[resolve]]]
Resolves a parameter group.
this doesn't seem like the exact same behaviour. If the parameter was not applied, then nothing is set. That means that a field with a default value will no longer be assigned to `null`
@@ -556,7 +556,7 @@ class TFLiteConverterBaseV2(TFLiteConverterBase): # We only support integer types for post training integer quantization # as we have statistical information to quantize the input and output. if quant_mode.is_post_training_integer_quantize(): - all_types = default_types + [constants.INT8, constants.QUANTIZED_UINT8] + all_types = default_types + [constants.INT8, constants.INT16, constants.QUANTIZED_UINT8] if self.inference_input_type not in all_types or \ self.inference_output_type not in all_types: all_types_names = ["tf." + t.name for t in all_types]
[TFLiteConverterV2->[from_saved_model->[from_saved_model,TFLiteSavedModelConverterV2],from_keras_model->[TFLiteKerasModelConverterV2]],TFLiteConverter->[from_saved_model->[TFLiteSavedModelConverter],from_keras_model_file->[TFLiteKerasModelConverter]],TFLiteConverterBaseV2->[_validate_inference_input_output_types->[is_post_training_integer_quantize],convert->[QuantizationMode,_get_base_converter_args,_calibrate_quantize_model,_validate_inference_input_output_types,quantizer_flags,_is_unknown_shapes_allowed,converter_flags]],TFLiteConverterBase->[_calibrate_quantize_model->[RepresentativeDataset],_parse_saved_model_args->[_contains_function_with_implements_attr],__init__->[TargetSpec]],TFLiteSavedModelConverterV2->[__init__->[_parse_saved_model_args]],TFLiteKerasModelConverterV2->[convert->[_grappler_config,_convert_as_saved_model],_convert_as_saved_model->[_parse_saved_model_args]],TFLiteConverterBaseV1->[__setattr__->[__setattr__],_set_batch_size->[_has_valid_tensors],__getattribute__->[__getattribute__],convert->[contains_training_quant_op,_grappler_config,_get_base_converter_args,converter_flags,_calibrate_quantize_model,quantizer_flags,_is_unknown_shapes_allowed,_validate_quantized_input_stats,QuantizationMode]],TFLiteFrozenGraphConverterV2->[convert->[_grappler_config]],TFLiteSavedModelConverter->[__init__->[_parse_saved_model_args]],TFLiteFrozenGraphConverter->[__init__->[_has_valid_tensors]],TFLiteKerasModelConverter->[convert->[_convert_as_saved_model],_convert_as_saved_model->[_parse_saved_model_args]],TocoConverter->[from_saved_model->[from_saved_model],from_keras_model_file->[from_keras_model_file],from_session->[from_session],from_frozen_graph->[from_frozen_graph]],QuantizationMode->[is_post_training_integer_quantize->[post_training_int8_allow_float,post_training_int8_no_float],fp32_execution->[post_training_fp16,training_time_int8_allow_float,post_training_dynamic_range_int8,post_training_int8_allow_float,post_training_int16x8_no_float,post_training_int16x8_allow_float,post_training_int8_no_float],quantizer_flags->[post_training_int8_allow_float,post_training_int16x8_no_float,activations_type,post_training_int16x8_allow_float,post_training_int8_no_float],_validate_int8_required->[RepresentativeDataset],converter_flags->[post_training_fp16,training_time_int8_allow_float,is_post_training_integer_quantize,post_training_dynamic_range_int8,activations_type]]]
Validate inference_input_type and inference_output_type flags.
change this line to (for consistency): `all_types = default_types + [constants.INT8, constants.QUANTIZED_UINT8, constants.INT16]`
@@ -56,6 +56,13 @@ type RaftCluster struct { wg sync.WaitGroup quit chan struct{} + + status *ClusterStatus +} + +// ClusterStatus saves some state information +type ClusterStatus struct { + RaftBootstrapTime *time.Time `json:"raft_bootstrap_time,omitempty"` } func newRaftCluster(s *Server, clusterID uint64) *RaftCluster {
[putStore->[putStore],RemoveStore->[putStore],bootstrapCluster->[start,getClusterRootPath],stop->[stop],checkStores->[BuryStore],stopRaftCluster->[stop],runBackgroundJobs->[checkStores,collectMetrics],createRaftCluster->[start,isRunning],GetRaftCluster->[isRunning],BuryStore->[putStore]]
newRaftCluster returns a RaftCluster object that can be used to manage a single met returns a managed object that can be used to manage a specific node in the cluster.
don't use the pointer here.
@@ -240,6 +240,12 @@ class InvoiceUpdate(ModelMutation): ) instance.status = JobStatus.SUCCESS instance.save(update_fields=["external_url", "number", "updated_at", "status"]) + instance.order.events.create( + type=OrderEvents.INVOICE_GENERATED, + order=instance.order, + user=info.context.user, + parameters={"invoice_number": cleaned_input["number"]}, + ) return InvoiceUpdate(invoice=instance)
[InvoiceRequestDelete->[perform_mutation->[InvoiceRequestDelete]],InvoiceRequest->[perform_mutation->[clean_order,InvoiceRequest]],InvoiceSendEmail->[perform_mutation->[InvoiceSendEmail,clean_instance]],InvoiceCreate->[Arguments->[InvoiceCreateInput],perform_mutation->[clean_order,InvoiceCreate,clean_input]],InvoiceUpdate->[Arguments->[UpdateInvoiceInput],perform_mutation->[clean_input,InvoiceUpdate]]]
Perform a mutation on a orphan invoice.
Maby `InvoiceUpdate` should use a different event type?
@@ -51,9 +51,6 @@ class MapDescriptionYamlGenerator { final String mapName = mapFolder.getFileName().toString(); - final Path propsFile = mapFolder.resolveSibling(mapName + ".properties"); - final int downloadVersion = Files.exists(propsFile) ? readDownloadVersion(propsFile) : 0; - final List<MapDescriptionYaml.MapGame> games = readGameInformationFromXmls(mapFolder); if (games.isEmpty()) { return Optional.empty();
[MapDescriptionYamlGenerator->[readDownloadVersion->[getMessage,error,toAbsolutePath,newInputStream,orElse,Properties,load],readGameInformationFromXmls->[toList,collect],generateYamlDataForMap->[resolveSibling,resolve,isValid,checkArgument,readDownloadVersion,build,isPresent,toAbsolutePath,isDirectory,readGameInformationFromXmls,empty,writeYmlPojoToFile,toString,isEmpty,warn,exists,delete],parseXmlTags->[mapXmlToObject,of,toAbsolutePath,newInputStream,empty,info],readGameNameFromXml->[filter,not]]]
Generates a yaml file for a given map folder.
Before when we were generating 'map.yaml' files we read the properties file to get the map version. With map version now removed, we no longer need to check for a properties file and can simply remove it a little further on.
@@ -86,6 +86,7 @@ static bool sta_config_equal(const station_config& lhs, const station_config& rh // ----------------------------------------------------------------------------------------------------------------------- bool ESP8266WiFiSTAClass::_useStaticIp = false; +bool ESP8266WiFiSTAClass::_useInsecureWEP = false; /** * Start Wifi connection
[begin->[begin],_smartConfigCallback->[stopSmartConfig],hostname->[hostname],beginWPSConfig->[disconnect]]
Checks if two STA configuration objects are equal. Returns a new instance of the class that will be used to create the class.
Is this here on purpose, or did it slip in? I'm ok with what I see of the code, but given the nature of this functionality I suggest splitting it into its own PR.
@@ -1140,7 +1140,7 @@ def download_git_stored_file(request, version_id, filename): raise http.Http404 if version.channel == amo.RELEASE_CHANNEL_LISTED: - is_owner = acl.check_addon_ownership(request, addon, dev=True) + is_owner = acl.check_addon_ownership(request, addon, allow_developer=True) if not (acl.is_reviewer(request, addon) or is_owner): raise PermissionDenied else:
[leaderboard->[context],queue_extension->[_queue],queue_recommended->[_queue],ReviewAddonVersionDraftCommentViewSet->[get_object->[_verify_object_permissions,get_queryset],get_extra_comment_data->[get_version_object],get_serializer_context->[get_extra_comment_data,get_version_object],get_version_object->[_verify_object_permissions,get_addon_object]],review->[determine_channel,context],queue_auto_approved->[_queue],queue_pending_rejection->[_queue],queue_scanners->[_queue],ReviewAddonVersionViewSet->[list->[get_queryset]],abuse_reports->[context],_queue->[is_admin_reviewer,filter_admin_review_for_legacy_queue,context],unlisted_pending_manual_approval->[_queue],ReviewAddonVersionMixin->[check_permissions->[get_addon_object]],eula->[policy_viewer],whiteboard->[determine_channel],fetch_queue_counts->[count_from_registered_table->[construct_count_queryset_from_queryset],construct_count_queryset_from_queryset,count_from_registered_table],queue_content_review->[_queue],policy_viewer->[determine_channel],queue_theme_nominated->[_queue],privacy->[policy_viewer],AddonReviewerViewSet->[deny_resubmission->[deny_resubmission],allow_resubmission->[allow_resubmission]],save_motd->[context],dashboard->[context],queue_theme_pending->[_queue],ratings_moderation_log->[context],queue_moderated->[context],unlisted_list->[_queue],motd->[context],ReviewAddonVersionCompareViewSet->[retrieve->[get_objects,get_serializer],get_objects->[filter_queryset,get_queryset,check_object_permissions],get_serializer->[get_serializer_context]],ratings_moderation_log_detail->[context],queue_mad->[_queue],reviewlog->[context]]
Download a git blob or tree item from a version.
do we want to allow_site_permission here too?
@@ -4498,7 +4498,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac String.format("VM %s is at %s and we received a %s report while there is no pending jobs on it" , vm.getInstanceName(), vm.getState(), vm.getPowerState())); } - if(vm.isHaEnabled() && vm.getState() == State.Running + if((HighAvailabilityManager.ForceHA.value() || vm.isHaEnabled()) && vm.getState() == State.Running && HaVmRestartHostUp.value() && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) {
[VirtualMachineManagerImpl->[handleVmWorkJob->[handleVmWorkJob],plugNic->[findById],handlePowerOffReportWithNoPendingJobsOnVM->[sendStop,stateTransitTo,getVmGuru],migrateWithStorage->[expunge],orchestrateMigrateAway->[migrate,orchestrateMigrateAway,findById,advanceStop],getCandidateStoragePoolsToMigrateLocalVolume->[isStorageCrossClusterMigration],CleanupTask->[runInContext->[cleanup]],orchestrateStart->[areAffinityGroupsAssociated,orchestrateStart,changeState,setupAgentSecurity,getVmGuru,changeToStartState,findById],cleanup->[sendStop,getControlNicIpForVM,getExecuteInSequence],storageMigration->[expunge],sendStop->[getExecuteInSequence,getVolumesToDisconnect],replugNic->[findById],migrateVmForScaleThroughJobQueue->[VmJobVirtualMachineOutcome],orchestrateMigrate->[migrate,orchestrateMigrate,findById],upgradeVmDb->[findById],removeNicFromVmThroughJobQueue->[VmJobVirtualMachineOutcome],orchestrateRemoveNicFromVm->[findById,toNicTO,orchestrateRemoveNicFromVm],migrate->[getExecuteInSequence,expunge,changeState,cleanup,checkVmOnHost,getVmGuru,stateTransitTo],orchestrateStop->[orchestrateStop,findById,advanceStop],orchestrateMigrateForScale->[getExecuteInSequence,toVmTO,stateTransitTo,changeState,cleanup,getVmGuru,orchestrateMigrateForScale,findById,checkVmOnHost],migrateVmWithStorageThroughJobQueue->[VmStateSyncOutcome],removeVmFromNetworkThroughJobQueue->[VmJobVirtualMachineOutcome],scanStalledVMInTransitionStateOnUpHost->[handlePowerOnReportWithNoPendingJobsOnVM,handlePowerOffReportWithNoPendingJobsOnVM,findById],destroy->[doInTransactionWithoutResult->[stateTransitTo],advanceStop],moveVmToMigratingState->[changeState],orchestrateReconfigure->[reConfigureVm,findById],removeNicFromVm->[expunge],advanceStop->[getExecuteInSequence,expunge,changeState,cleanup,getVmGuru],VmJobVirtualMachineOutcome->[retrieve->[findById],checkCondition->[findById]],processAnswers->[syncVMMetaData],addExtraConfig->[addExtraConfig],migrateThroughHypervisorOrStorage->[afterHypervisorMigrationCleanup,storageMigration,attemptHypervisorMigration],preStorageMigrationStateCheck->[stateTransitTo],start->[start],advanceStart->[advanceStart,expunge],unplugNic->[findById],allocate->[doInTransactionWithoutResult->[allocate],allocate],orchestrateReConfigureVm->[findById,upgradeVmDb],getVmTO->[toVmTO],migrateForScale->[expunge],handlePowerOnReportWithNoPendingJobsOnVM->[stateTransitTo],stopVmThroughJobQueue->[VmStateSyncOutcome],addVmToNetworkThroughJobQueue->[VmJobVirtualMachineOutcome],restoreVirtualMachineThroughJobQueue->[VmJobVirtualMachineOutcome],orchestrateStorageMigration->[orchestrateStorageMigration,findById,stateTransitTo],VmStateSyncOutcome->[retrieve->[findById],checkCondition->[findById]],startVmThroughJobQueue->[VmStateSyncOutcome],toNicTO->[toNicTO],scanStalledVMInTransitionStateOnDisconnectedHosts->[findById],rebootVmThroughJobQueue->[VmJobVirtualMachineOutcome],reconfigureVmThroughJobQueue->[VmJobVirtualMachineOutcome],reConfigureVm->[expunge],orchestrateRemoveVmFromNetwork->[findById,toNicTO,orchestrateRemoveVmFromNetwork],migrateVmStorageThroughJobQueue->[VmJobVirtualMachineOutcome],changeToStartState->[checkWorkItems],orchestrateAddVmToNetwork->[orchestrateAddVmToNetwork,findById],HandlePowerStateReport->[findById],orchestrateReboot->[getExecuteInSequence,orchestrateReboot,findById],migrateAway->[expunge],findById->[findById],findHostAndMigrate->[findById],migrateVmThroughJobQueue->[VmStateSyncOutcome],migrateVmAwayThroughJobQueue->[VmStateSyncOutcome],orchestrateRestoreVirtualMachine->[restoreVirtualMachine,findById,orchestrateRestoreVirtualMachine],advanceExpunge->[advanceExpunge,getVmGuru],addVmToNetwork->[expunge],restoreVirtualMachine->[createPlaceHolderWork,expunge,findById],advanceReboot->[expunge],checkIfCanUpgrade->[isVirtualMachineUpgradable],moveVmOutofMigratingStateOnSuccess->[changeState],orchestrateMigrateWithStorage->[moveVmToMigratingState,stateTransitTo,cleanup,getVmGuru,moveVmOutofMigratingStateOnSuccess,createMappingVolumeAndStoragePool,orchestrateMigrateWithStorage,findById,checkVmOnHost],isVirtualMachineUpgradable->[isVirtualMachineUpgradable]]]
Handles power - off report with no pending jobs on the given VM. DeployIn - deploy in state.
ForceHA setting is scoped to cluster, I think we need to use ForceHA.valueIn(clusterid) method
@@ -15,6 +15,7 @@ public class ConfigurationBuilderHolder { private ConfigurationBuilder currentConfigurationBuilder; private final Map<Class<? extends ConfigurationParser>, ParserContext> parserContexts; private final WeakReference<ClassLoader> classLoader; + private String defaultCacheName; public ConfigurationBuilderHolder() { this(Thread.currentThread().getContextClassLoader());
[ConfigurationBuilderHolder->[getClassLoader->[get],getParserContext->[get],setParserContext->[put],newConfigurationBuilder->[ConfigurationBuilder,read,build,put],getContextClassLoader,GlobalConfigurationBuilder,ConfigurationBuilder]]
package for testing.
why do we need the `defaultCacheName`? I think that it is defined somewhere as a public constant...
@@ -96,6 +96,12 @@ namespace Content.Server.GameObjects } if (entity.HasComponent(typeof(SpeciesComponent))) { + if (!entity.TryGetComponent<IMoverComponent>(out var mover)) return; + if (!entity.TryGetComponent<ITransformComponent>(out var entityTransform)) return; + if (!Owner.TryGetComponent<ITransformComponent>(out var ownerTransform)) return; + + var dotProduct = Vector2.Dot(mover.VelocityDir.Normalized, (entityTransform.WorldPosition - ownerTransform.WorldPosition).Normalized); + if (dotProduct <= -0.9f) TryOpen(entity); } }
[ServerDoorComponent->[Open->[Open,SetAppearance],TryClose->[CanClose],OnUpdate->[Open,Close,CanClose],OnRemove->[OnRemove],Initialize->[Initialize],CanOpen->[CanOpen],ExposeData->[ExposeData],Activate->[ActivateImpl],Deny->[SetAppearance,Deny],CanClose->[CanClose],TryOpen->[CanOpen],Close->[SetAppearance]]]
CollideWith method.
This shouldn't be necessary. As an entity, `Owner` is guaranteed to have a transform component, accessible through `Owner.Transform`. Same for `entity.Transform`.
@@ -87,6 +87,8 @@ public class UnclosedResourcesCheck extends SECheck { public String excludedTypes = ""; private final List<String> excludedTypesList = new ArrayList<>(); + private final Set<TryStatementTree> tryWithResourcesTrees = new HashSet<>(); + private final Set<Tree> knownResources = new HashSet<>(); private Type visitedMethodOwnerType; private static final String JAVA_IO_AUTO_CLOSEABLE = "java.lang.AutoCloseable";
[UnclosedResourcesCheck->[reportIssue->[reportIssue],PreStatementVisitor->[visitIdentifier->[closeResource,isTryStatementResource],visitMethodInvocation->[WrappedValueFactory,name],closeResource->[closeResource,wrappedValue],visitNewClass->[WrappedValueFactory,isOpeningResource],shouldWrapArgument->[isCloseable]],isOpeningResource->[excludedByRuleOption,needsClosing],WrappedValueFactory->[createSymbolicValue->[ResourceWrapperSymbolicValue]],isCloseable->[isCloseable],name->[name],PostStatementVisitor->[methodOpeningResource->[excludedByRuleOption,isWithinTryHeader,needsClosing],visitNewClass->[isOpeningResource]]]]
Creates a constraint which can be used to check if a resource is open or closed. This method is used to build a unique identifier for the stream.
It was not clear to me that the content of this set was used only to know if you already collected the resources from the try. Maybe renaming it as `visitedTryWithResourcesTrees ` or something like this would help?
@@ -28,7 +28,7 @@ namespace Microsoft.Extensions.Logging /// <summary> /// Creates a new <see cref="LoggerFactory"/> instance. /// </summary> - public LoggerFactory() : this(Enumerable.Empty<ILoggerProvider>()) + public LoggerFactory() : this(Array.Empty<ILoggerProvider>()) { }
[LoggerFactory->[Dispose->[Dispose],DisposingLoggerFactory->[Dispose->[Dispose],AddProvider->[AddProvider]]]]
Creates a new LoggerFactory with the given providers and options. unexpectedTag - The tag to use in producing.
do we have an analyser|compiler warning for places where `Array.Empty` could be used instead of `Enumerable.Empty`?
@@ -30,6 +30,14 @@ import ( type ShutdownHook func() error +type LoginHook interface { + OnLogin() +} + +type LogoutHook interface { + OnLogout() +} + type GlobalContext struct { Log logger.Logger // Handles all logging VDL *VDebugLog // verbose debug log
[ResetLoginState->[createLoginStateLocked],GetStoredSecretServiceName->[GetStoredSecretServiceName],GetStoredSecretAccessGroup->[GetStoredSecretAccessGroup],GetCacheDir->[GetCacheDir],ConfigureUsage->[ConfigureKeyring,ConfigureAPI,ConfigureCaches,UseKeyring,ConfigureMerkleClient,ConfigureTimers,ConfigureExportedStreams,ConfigureConfig,Configure],GetRuntimeDir->[GetRuntimeDir],SetCommandLine->[SetCommandLine],GetUsersWithStoredSecrets->[GetUsersWithStoredSecrets],Logout->[createLoginStateLocked,Logout],Configure->[SetCommandLine,ConfigureLogging],Shutdown->[LoginState,Shutdown],GetMyUID->[LoginState],GetRunMode->[GetRunMode],ConfigReload->[ConfigureConfig],Init]
The global context for a single keybase object. cache of identify2 results for fast - pathing identify2 RPCs.
should we change the login notifications we send to KBFS and Electron to use this? Does "Login" mean "authenticate to session server"?
@@ -167,12 +167,17 @@ def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor: boxes_xyxy = _box_xywh_to_xyxy(boxes) if out_fmt == "cxcywh": boxes_converted = _box_xyxy_to_cxcywh(boxes_xyxy) + else: + raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt") elif in_fmt == "cxcywh": boxes_xyxy = _box_cxcywh_to_xyxy(boxes) if out_fmt == "xywh": boxes_converted = _box_xyxy_to_xywh(boxes_xyxy) - + else: + raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt") + else: + raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt") # convert one to xyxy and change either in_fmt or out_fmt to xyxy else: if in_fmt == "xyxy":
[box_iou->[box_area],batched_nms->[nms],nms->[nms],generalized_box_iou->[box_area]]
Converts boxes from given input format to given output format. Convert boxes in the box_format to xyxy or cxcywh.
You don't actually need those to make torchscript happy. The problem is that `boxes_converted` is not defined in the other branch. You can define it outside of the `if` so that torchscript knows its type.
@@ -37,6 +37,7 @@ func (s *APIServer) APIHandler(h http.HandlerFunc) http.HandlerFunc { c := context.WithValue(r.Context(), "decoder", s.Decoder) //nolint c = context.WithValue(c, "runtime", s.Runtime) //nolint c = context.WithValue(c, "shutdownFunc", s.Shutdown) //nolint + c = context.WithValue(c, "idletracker", s.idleTracker) //nolint r = r.WithContext(c) h(w, r)
[APIHandler->[Context,ParseForm,InternalServerError,Infof,Warnf,WithContext,WithValue,String,Errorf,Stack,Debugf]]
APIHandler returns a http. HandlerFunc that wraps the given http. HandlerFunc in a.
Added idletracker to context so that it can be accessed during hijacking
@@ -49,6 +49,14 @@ module Admin redirect_to "/admin/users/#{@user.id}/edit" end + def export_data + user = User.find(params[:id]) + send_to_admin = JSON.parse(params[:send_to_admin]) + ExportContentWorker.perform_async(user.id, send_to_admin: send_to_admin) + flash[:success] = "Data exported to the #{send_to_admin ? 'admin' : 'user'}. The job will complete momentarily." + redirect_to admin_users_edit_path(user.id) + end + def banish Moderator::BanishUserWorker.perform_async(current_user.id, params[:id].to_i) flash[:success] = "This user is being banished in the background. The job will complete soon."
[UsersController->[remove_credits->[remove_from,to_i],add_note->[create,id],banish->[redirect_to,perform_async,id,to_i],unlock_access->[redirect_to,unlock_access!,find,admin_user_path],user_params->[permit],add_credits->[to_i,add_to],show->[limit,find,verified_at,order,includes],full_delete->[call,redirect_to,find,message,username,id,presence],send_email->[redirect_back,deliver_now],remove_identity->[redirect_to,id,find,user,update,message,backup!,capitalize,provider,delete],verify_email_ownership->[redirect_back,deliver_now],edit->[find,load],add_org_credits->[to_i,find,add_to],update->[redirect_to,find],index->[per],user_status->[redirect_to,find,message,handle_user_roles,id],merge->[call,redirect_to,find,message,id],set_feedback_messages->[limit],recover_identity->[instance_user,redirect_to,find,message,recover!,id,provider],remove_org_credits->[to_i,remove_from,find],log,layout,after_action,dup]]
Checks if user has not been updated and if so updates it.
Why do you use `JSON.parse` here? I would think the param would be a simple true/false.
@@ -25,6 +25,7 @@ namespace DotNetNuke.Build.Tasks public override void Run(Context context) { this.RenameResourcesFor98xUpgrades(context); + this.RenameResourcesFor910xUpgrades(context); context.CreateDirectory(context.ArtifactsFolder); var excludes = new string[context.PackagingPatterns.InstallExclude.Length + context.PackagingPatterns.UpgradeExclude.Length]; context.PackagingPatterns.InstallExclude.CopyTo(excludes, 0);
[CreateUpgrade->[Run->[Zip,Add,GetBuildNumber,CopyTo,Information,Count,WebsiteFolder,GetFiles,ArtifactsFolder,CreateDirectory,RenameResourcesFor98xUpgrades,GetFilesByPatterns,Length],RenameResourcesFor98xUpgrades->[GetFilesByPatterns,ToString,Move,WebsiteFolder]]]
Creates the necessary folders and zips the upgrade.
Considering that this code is only running for the current version releases, and is internal to the build process. Could we combine the existing method and this method to something such as `RenameResourcesForUpgrades` which is more descriptive of the action necessary and leave the `[obsolete]` tag?
@@ -48,14 +48,14 @@ class CheckoutQueries(graphene.ObjectType): def resolve_checkout(self, *_args, token): return resolve_checkout(token) - @permission_required(OrderPermissions.MANAGE_ORDERS) + @permission_required(CheckoutPermissions.MANAGE_CHECKOUTS) def resolve_checkouts(self, *_args, **_kwargs): resolve_checkouts() def resolve_checkout_line(self, info, id): return graphene.Node.get_node_from_global_id(info, id, CheckoutLine) - @permission_required(OrderPermissions.MANAGE_ORDERS) + @permission_required(CheckoutPermissions.MANAGE_CHECKOUTS) def resolve_checkout_lines(self, *_args, **_kwargs): return resolve_checkout_lines()
[CheckoutQueries->[resolve_checkout->[resolve_checkout],resolve_checkout_lines->[resolve_checkout_lines],resolve_checkouts->[resolve_checkouts]]]
Resolve a node for a checkout.
It won't work. This method doesn't return anything
@@ -0,0 +1,18 @@ +const OriginToken = artifacts.require('./token/OriginToken.sol') +const VA_Marketplace = artifacts.require('./V00_Marketplace.sol') + +module.exports = function(deployer, network) { + return createTokenWhitelist(network) +} + +async function createTokenWhitelist(network) { + const token = await OriginToken.deployed() + const tokenOwner = await token.owner() + + + // Marketplace must be able to send OGN to any address (for refunds) and + // receive OGN from any address (for offers with commissions). + const marketplace = await VA_Marketplace.deployed() + await token.addAllowedTransactor(marketplace.address, { from: tokenOwner }) + console.log(`Added marketplace ${marketplace.address} to whitelist`) +}
[No CFG could be retrieved]
No Summary Found.
This should refer to VA not V00 :)
@@ -34,7 +34,6 @@ with this program; if not, write to the Free Software Foundation, Inc., #include "util/string.h" #include "nodedef.h" -extern MainGameCallback *g_gamecallback; int ModApiClient::l_get_current_modname(lua_State *L) {
[No CFG could be retrieved]
2016 - 11 - 26 check for the name of the module that is being run.
note, this is a dup
@@ -406,6 +406,15 @@ class RaidenAPI(object): return all_channels + def get_node_network_state(self, node_address): + """ Returns the currently network status of `node_address`. """ + return self.raiden.protocol.nodeaddresses_networkstatuses[node_address] + + def start_health_check_for(self, node_address): + """ Returns the currently network status of `node_address`. """ + self.raiden.start_health_check_for(node_address) + return self.raiden.protocol.nodeaddresses_networkstatuses[node_address] + def get_tokens_list(self): """Returns a list of tokens the node knows about""" tokens_list = list(self.raiden.channelgraphs.iterkeys())
[RaidenAPI->[create_default_identifier->[create_default_identifier],transfer_async->[transfer_async],settle->[settle],deposit->[deposit],close->[close]]]
Returns a list of all channels associated with the optionally given token_address and partner_address.
Q: how does accessing the dictionary "start" the health check?
@@ -184,7 +184,7 @@ func (s *podStorage) Merge(source string, change interface{}) error { if len(deletes.Pods) > 0 { s.updates <- *deletes } - if len(adds.Pods) > 0 { + if len(adds.Pods) > 0 || firstSet { s.updates <- *adds } if len(updates.Pods) > 0 {
[Channel->[Insert,Lock,Unlock,Channel],SeenAllSources->[seenSources,Infof,List,V,HasAll],markSourceSet->[Insert,Lock,Unlock],seenSources->[Lock,Unlock,HasAll],MergedState->[Copy,RUnlock,Errorf,RLock],Sync->[MergedState,Sync,Lock,Unlock],merge->[markSourceSet,Unlock,Infof,Warningf,GetPodFullName,V,Lock],Merge->[MergedState,Unlock,Sprintf,merge,Lock],Wait->[Forever],Has,NewFieldDuplicate,FormatPodName,Infof,NewTimestamp,NewAggregate,NewMux,Sprintf,Warningf,GetPodFullName,DeepEqual,GetString,V,Eventf,Insert,ValidatePod]
Merge merges the given change into the storage. If the change is nil the storage is updated.
Where is `firstSet` from? I don't see it.
@@ -41,6 +41,16 @@ public class QuarkusGenerateCode extends QuarkusTask { super("Performs Quarkus pre-build preparations, such as sources generation"); } + /** + * Create a dependency on classpath resolution. This makes sure included build are build this task runs. + * + * @return resolved compile classpath + */ + @CompileClasspath + public FileCollection getClasspath() { + return QuarkusGradleUtils.getSourceSet(getProject(), SourceSet.MAIN_SOURCE_SET_NAME).getCompileClasspath(); + } + @TaskAction public void prepareQuarkus() { getLogger().lifecycle("preparing quarkus application");
[QuarkusGenerateCode->[prepareQuarkus->[bootstrap,findAny,getAbsolutePath,setPaths,createDeploymentClassLoader,GradleException,forEach,lifecycle,getOutputPaths,getBuildSystemProperties,isEmpty,getAppModel,debug,loadClass,isPresent,invoke,next,getProject,getName,getAppArtifact,getAppModelResolver,findByName,getConvention,add,toPath,findPlugin]]]
Prepares the quarkus application. This method is called when the QuarkusPrepare task is executed.
Just curious, do you know what it was before this change?
@@ -860,11 +860,17 @@ static int hda_dma_remove(struct dma *dma) static int hda_dma_avail_data_size(struct hda_chan_data *chan) { + uint32_t status; int32_t read_ptr; int32_t write_ptr; int size; - if (!(host_dma_reg_read(chan->dma, chan->index, DGCS) & DGCS_BNE)) + status = host_dma_reg_read(chan->dma, chan->index, DGCS); + + if (status & DGCS_BF) + return chan->buffer_bytes; + + if (!(status & DGCS_BNE)) return 0; read_ptr = host_dma_reg_read(chan->dma, chan->index, DGBRP);
[inline->[dma_chan_base,io_reg_write,io_reg_update_bits,io_reg_read,host_dma_reg_write],void->[hda_dma_inc_link_fp,hda_dma_stop,spin_unlock_irq,atomic_sub,trace_hddma,host_dma_reg_read,cb,hda_dma_channel_put_unlocked,hda_dma_get_dbg_vals,spin_lock_irq,pm_runtime_put,hda_dma_ptr_trace,dma_get_drvdata,hda_update_bits,trace_hddma_error],int->[hda_dma_link_copy_ch,hda_dma_inc_link_fp,spin_unlock_irq,atomic_init,trace_hddma,hda_dma_wait_for_buffer_empty,hda_dma_avail_data_size,tracev_hddma,dma_set_drvdata,rfree,hda_dma_wait_for_buffer_full,host_dma_reg_read,hda_dma_get_dbg_vals,hda_dma_post_copy,dma_get_drvdata,trace_hddma_error,clock_ms_to_ticks,host_dma_reg_write,spin_lock_irq,hda_dma_ptr_trace,ALIGN_UP,atomic_add,hda_dma_inc_fp,rzalloc,platform_timer_get,hda_dma_enable_unlock,timer_get_system,hda_dma_free_data_size,hda_dma_dbg_count_reset,hda_update_bits]]
This method returns the number of bytes available on the channel.
No need to havev a new line here.