patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -135,7 +135,7 @@ public class GlobalInboundInvocationHandler implements InboundInvocationHandler //we must/can run in this thread runnable.run(); } else { - remoteCommandsExecutor.execute(runnable); + blockingExecutor.execute(runnable); } }
[GlobalInboundInvocationHandler->[handleFromCluster->[exceptionHandlingCommand],ResponseConsumer->[convertToResponse->[exceptionHandlingCommand,shuttingDownResponse]]]]
Handles a reply to a remote command.
So we assume any `ReplicableCommand` is blocking?
@@ -508,6 +508,12 @@ export class AmpStoryPlayer { ); }); + const customUIConfig = this.getCustomUI_(); + + if (customUIConfig) { + messaging.sendRequest('setStoryCustomUI', customUIConfig, false); + } + resolve(messaging); }, (err) => {
[No CFG could be retrieved]
Provides a method to manage the current story. Creates an iframe for each story in the list of stories.
Do we really need to pass all the configuration? Can you link to your design doc / tickets for this new viewer messaging? If the goal is to inject new controls we should have a more accurate message name and API that better reflect our goal. `customUI` could be anything
@@ -629,7 +629,7 @@ class ProCombatMoveAI { } // Determine counter attack results for each transport territory - double totalEnemyTUVSwing = 0.0; + double totalEnemyTuvSwing = 0.0; for (final Territory unloadTerritory : territoryTransportAndBombardMap.keySet()) { if (enemyAttackOptions.getMax(unloadTerritory) != null) { final List<Unit> enemyAttackers = enemyAttackOptions.getMax(unloadTerritory).getMaxUnits();
[ProCombatMoveAI->[doCombatMove->[getLandOptions,determineTerritoriesThatCanBeHeld,equals,populateAttackOptions,territoryHasLocalLandSuperiority,addAll,populateEnemyAttackOptions,info,determineUnitsToAttackWith,getNeighbors,findTerritoryValues,getTerritory,logAttackMoves,prioritizeAttackOptions,checkContestedSeaTerritories,isEmpty,populateEnemyDefenseOptions,removeTerritoriesThatCantBeConquered,removeTerritoriesWhereTransportsAreExposed,debug,moveOneDefenderToLandTerritoriesBorderingEnemy,getData,getTerritoryMap,removeAttacksUntilCapitalCanBeHeld,ProTerritoryManager,removeTerritoriesThatArentWorthAttacking,determineTerritoriesToAttack,doMove,getPlayer,calculateAmphibRoutes,setStoredStrafingTerritories,getStrafingTerritories,add],determineTerritoriesThatCanBeHeld->[size,getMaxUnits,getMaxBombardUnits,getMaxEnemyDefenders,addAll,info,getMaxEnemyUnits,getWinPercentage,getTerritory,getTUVSwing,isWater,setMaxEnemyUnits,getEnemyAttackOptions,getMax,getMaxAmphibUnits,debug,isHasLandUnitRemaining,getTerritoryMap,invert,getAverageAttackersRemaining,getMatches,match,estimateAttackBattleResults,calculateBattleResults,setCanHold,get,isStrafing,setMaxEnemyBombardUnits],removeTerritoriesWhereTransportsAreExposed->[unitIsAlliedNotOwned,size,removeAll,getMaxUnits,getUnits,getMaxEnemyDefenders,addAll,info,containsKey,put,getTerritory,getProduction,min,getTUVSwing,trace,isEmpty,isWater,getEnemyAttackOptions,getMax,debug,getTerritoryMap,isCapital,getMatches,populateDefenseOptions,calculateBattleResults,getName,get,isStrafing,add,clear,keySet],moveOneDefenderToLandTerritoriesBorderingEnemy->[unitIsOwnedBy,remove,getMatches,debug,getTerritory,removeAll,add,getUnits,getType,getInt,someMatch,isEmpty,info,territoryIsEnemyNonNeutralAndHasEnemyUnitMatching,getUnitMoveMap,isWater,getNeighbors,unitCantBeMovedAndIsAlliedDefenderAndNotInfra],tryToAttackTerritories->[getValue,getTransportCasualtiesRestricted,checkForOverwhelmingWin,territoryCanMoveSeaUnits,size,equals,setBattleResult,getMaxUnits,getUnits,getType,getMaxEnemyDefenders,getInt,isTransporting,addAll,remove,containsKey,put,getNeighbors,estimateStrengthDifference,isUnitAllied,addUnit,getWinPercentage,getTerritory,isCanHold,sortUnitNeededOptions,putAmphibAttackMap,trace,getTransportList,someMatch,isEmpty,getUnitMoveMap,isWater,getTransport,getEnemyAttackOptions,values,getMax,hasNext,getIsAir,addUnits,noneMatch,territoryCanMoveAirUnitsAndNoAA,isHasLandUnitRemaining,getTerritoryMap,getBattleResult,getMovementLeft,getBombardMap,iterator,invert,sortUnitMoveOptions,getMatches,match,next,getUnitsToTransportFromTerritories,estimateAttackBattleResults,firstKey,getDistance_IgnoreEndForCondition,getName,get,isCurrentlyWins,isStrafing,containsAll,getIsDestroyer,sortUnitNeededOptionsThenAttack,getTransportMoveMap,contains,add,clear,unitIsEnemyAndNotInfa,keySet],removeAttacksUntilCapitalCanBeHeld->[getValue,size,removeAll,setBattleResult,getMaxUnits,getUnits,getMaxBombardUnits,addAll,populateEnemyAttackOptions,info,remove,getNeighbors,isUnitAllied,getTerritory,getTUVSwing,trace,findMaxPurchaseDefenders,isEmpty,getEnemyAttackOptions,getMax,getMaxAmphibUnits,debug,isHasLandUnitRemaining,getTerritoryMap,getMatches,match,estimateDefendBattleResults,unitCanBeMovedAndIsOwnedLand,getName,get,contains,add,clear,keySet],removeTerritoriesThatArentWorthAttacking->[getValue,getMaxUnits,getMaxEnemyDefenders,remove,info,getNeighbors,estimateStrengthDifference,getTerritory,isCanHold,someMatch,isWater,getEnemyAttackOptions,retainAll,getMax,hasNext,isNull,getMaxAmphibUnits,debug,iterator,next,getName,get,containsAll,territoryIsEnemyNotNeutralLand,isNeedAmphibUnits,add,enemyUnit],logAttackMoves->[getValue,toStringNoOwner,getMaxUnits,getUnits,getMaxBombardUnits,getMaxEnemyDefenders,addAll,containsKey,put,getMaxEnemyUnits,getTUVSwing,trace,getMaxAmphibUnits,debug,getTerritoryMap,getName,get,getMaxEnemyBombardUnits,keySet],prioritizeAttackOptions->[getValue,getMaxUnits,getMaxEnemyDefenders,info,remove,isFFA,getNeighbors,estimateStrengthDifference,getTerritory,getProduction,isCanHold,getTUVSwing,trace,setValue,isTerritoryEnemy,isEmpty,isWater,hasNext,isNull,territoryCanMoveLandUnits,debug,sort,iterator,isCapital,getDistance,territoryHasUnitsOwnedBy,getMatches,match,next,findTerritoryAttackValue,territoryIsEnemyNotNeutralOrAllied,getDistance_IgnoreEndForCondition,getName,compare,get,isNeedAmphibUnits,add,unitIsEnemyAndNotInfa],canAirSafelyLandAfterAttack->[getDistance_IgnoreEndForCondition,territoryCanMoveAirUnitsAndNoAA,get,getMovementLeft,match],determineTerritoriesToAttack->[size,removeAll,setBattleResult,getUnits,tryToAttackTerritories,getMaxEnemyDefenders,info,remove,haveUsedAllAttackTransports,estimateStrengthDifference,getWinPercentage,getTerritory,min,add,trace,getStrengthEstimate,subList,getResultString,debug,isHasLandUnitRemaining,getBattleResult,setStrengthEstimate,estimateAttackBattleResults,getName,get,isStrafing,isNeedAmphibUnits,setCanAttack,keySet],doMove->[calculateMoveRoutes,calculateBombingRoutes,calculateBombardMoveRoutes,calculateAmphibRoutes,doMove,clear],checkContestedSeaTerritories->[ProTerritory,addUnits,info,getTerritoryMap,get,containsKey,isWater,isEmpty,territoryCanMoveSeaUnitsThrough,getMatches,unitCanBeMovedAndIsOwnedSea,put,match,next,getNeighbors],determineUnitsToAttackWith->[checkForOverwhelmingWin,size,equals,removeAll,setBattleResult,getMaxUnits,getUnits,tryToAttackTerritories,getType,getMaxEnemyDefenders,getInt,getMaxBombardUnits,addAll,info,remove,isFFA,getMaxEnemyUnits,getBomberMoveMap,getWinPercentage,addUnit,of,getTerritory,getProduction,isCanHold,getTUVSwing,trace,empty,someMatch,isEmpty,getUnitMoveMap,isWater,getEnemyAttackOptions,getMax,hasNext,getIsAir,getMaxAmphibUnits,isNull,getResultString,debug,isPresent,isHasLandUnitRemaining,getTerritoryMap,getOwner,getBattleResult,iterator,getAverageAttackersRemaining,invert,isCapital,getMatches,match,next,estimateAttackBattleResults,calculateBattleResults,getIsSea,getBattleRounds,getName,getPlayerProduction,get,isStrafing,sortUnitNeededOptionsThenAttack,canAirSafelyLandAfterAttack,contains,add,clear,keySet],getCalc]]
Remove territories where transports are exposed. Find all transport territory and bombard units for each unload territory. This method calculates the best battle results for the given territory. This method is called when a territory is not in amphib.
`enemyTuvSwing` is perhaps better here instead of `totalEnemyTuvSwing` Tuv = 'total unit value' So the variable 'totalEnemyTuvSwing' spelled out would be: total enemy total unit value swing
@@ -37,6 +37,9 @@ </tr> <?php endforeach; ?> </table> +<p><?php printf( _n( 'You have %d post that is already well optimized, well done!', 'You have %d posts that are already well optimized, well done!', $good_posts, 'wordpress-seo' ), $good_posts ); ?></p> +<p><?php printf( __( 'Don\'t know how to optimize your content? Read our %1$sContent SEO eBook%2$s!', 'wordpress-seo' ), '<a href="https://yoast.com/ebooks/content-seo/">', '</a>' ); ?></p> +<br/> <?php if ( ! empty( $onpage ) && WPSEO_Utils::grant_access() ) : ?> <div class="onpage"> <h4 class="hide-if-no-js"><?php
[No CFG could be retrieved]
Print out the header of the n - node indexable post. Print the content of a specific KnowledgeBase article.
Shouldn't we add some `utm` tags?
@@ -264,6 +264,7 @@ func (i *UIAdapter) rowPartial(mctx libkb.MetaContext, proof keybase1.RemoteProo } } row.ProofURL = i.makeSigchainViewURL(mctx, proof.SigID) + iconKey = serviceType.GetLogoKey() case keybase1.ProofType_GENERIC_WEB_SITE: protocol := "https" if proof.Key != "https" {
[plumbUncheckedProof->[rowPartial],displayKey->[priority],FinishWebProofCheck->[finishRemoteCheck],Finish->[sendResult],Cancel->[sendResult],FinishSocialProofCheck->[finishRemoteCheck],plumbStellarAccount->[updateRow,makeSigchainViewURL,priority],plumbCryptocurrency->[updateRow,makeSigchainViewURL,priority],finishRemoteCheck->[rowPartial,setRowStatus],plumbRevokeds->[plumbRevoked],plumbRevoked->[updateRow,rowPartial],LaunchNetworkChecks->[plumbUncheckedProofs,plumbRevokeds],rowPartial->[priority],sendResult->[shouldSkipSendResult]]
rowPartial returns a row with the given remote proof as a partial of the proof. row. SiteURL = profileURL InjectionURL InjectionURL InjectionURL totally.
this line is crashing my service locally when `serviceType` is nil. it's a test user using a service type that no longer exists in the configs because my local setup is a little janky. but it seems like this could also reappear in prod if we ever remove a config (which we definitely might could do).
@@ -256,6 +256,7 @@ $(document).ready(function () { // Give article headings direct links to anchors $('article h1, article h2, article h3, article h4, article h5, article h6') + .not('.card') .filter('[id]') .each(function () { var isMainTitle = $(this).prop('nodeName') === 'H1';
[No CFG could be retrieved]
Adds a tab to the tab content and tab group. Create a popper that shows the link of the current button popover.
need to change to '.card article h1, article h2, article h3, article h4, article h5, article h6'
@@ -82,6 +82,8 @@ func Details(ctx context.Context, g *libkb.GlobalContext, name string) (res keyb } res.AnnotatedActiveInvites = annotatedInvites + membersHideInactiveDuplicates(ctx, g, &res.Members) + res.Settings.Open = t.IsOpen() res.Settings.JoinAs = t.chain().inner.OpenTeamJoinAs return res, nil
[Nanoseconds,MapUIDsToUsernamePackages,GetProofSet,GetMeUV,UploadImage,SeitanIKeyV2,RunEngine2,CTimeTracer,myRole,WithUID,IsOpen,ChangeMembership,Post,New,InviteMember,deleteSubteam,AsTeam,Time,NewMetaContext,ImplicitAdmins,OpenTeamJoinAs,IsSubteam,TeamInviteTypeFromString,Stage,Generation,Split,WithPublicKeyOptional,FindActiveInvite,Finish,UserVersionByUID,CanSkipKeyRotation,AddMemberByAssertionOrEmail,KeybaseUserVersion,FindKID,GenerateSignature,ResolveAndCheck,IsMember,Exists,Leave,GetBool,GetIdentifyOutcome,Now,PostTeamSettings,Add,FindNextMerkleRootAfterTeamRemoval,GenerateAcceptanceKey,MemberRole,GetNormalizedName,ImplicitTeamDisplayName,CTrace,GetUPAKLoader,InviteSeitan,IsWriterOrAbove,AddMemberByUsername,ToLower,InviteSeitanV2,NewLoadUserArg,IsTeamName,HasActiveInvite,WithTimeout,ResolveNameToIDUntrusted,chain,ToTeamName,Rotate,ChangeMembershipWithOptions,ToTime,ReAddMemberToImplicitTeam,Members,WithForcePoll,FieldsFunc,UsersWithRole,Eq,GenerateTeamInviteID,GetUsername,NewLoadUserArgWithMetaContext,CDebugf,CTraceTimed,notifyNoChainChange,TeamID,WithNetContext,currentUserUV,FindActiveKeybaseInvite,TrimSpace,IsPublic,TeamInviteName,NewNormalizedUsername,Equal,ParseAddress,NormalizedUsernameFromUPK2,TeamInviteIDFromString,Name,ToTeamID,InviteEmailMember,postTeamInvites,AtKey,LookupUID,GetDecode,Sprintf,LoadV2,GenerateSIKey,LatestKBFSTLFID,String,AssociateWithTLFID,ToUserVersion,GetRunMode,DeepCopy,RootAncestorName,WithName,CWarningf,Delete,NewHTTPArgs,ExportToTeamPlusApplicationKeys,CompleteSocialInvitesFor,IsRootTeam,deleteRoot,IsNil,SeitanIKey,Ctx,Errorf,NewResolveThenIdentify2,IsOrAbove,Clock,Sub,GetTeamLoader,Unix,IsTeamID,NewAPIArgWithNetContext,AsUserOrTeam,TeamNameFromString,F,IsImplicit,G,GetActiveAndObsoleteInvites,IsReaderOrAbove,Load]
Details returns a list of team members for the specified team. userVersionsToDetails returns a list of team member details for the given users.
I'm not a huge fan, but I'm guessing this is needed because you can't get rid of that owner (at least at this stage) and they will haunt the member list for ever, right? My other concern is that we have this `strict mode` going on at the server that has never been enabled, but we are planning to eventually. Maybe Keybase Teams Release Anniversary is a good time. Anyway it disallows having multiple versions of the same user in the same team on database assertion level.
@@ -54,4 +54,13 @@ public interface Log extends BasicLogger { @Message(value = "The '%s' cache does not support commands of type %s", id = 28014) CacheException cacheDoesNotSupportCommand(String cacheName, String commandType); + + @Message(value = "Cache '%s' with storage type '%s' cannot be queried. Please configure the cache encoding as " + + "'application/x-protostream' or 'application/x-java-object'", id = 28015) + CacheException cacheNotQueryable(String cacheName, String storage); + + @LogMessage(level = WARN) + @Message(id = 28016, value = "Query done in a cache ('%s') that has an unknown format configuration. " + + "Please configure the cache encoding as 'application/x-protostream' or 'application/x-java-object'") + void warnNoMediaType(String cacheName); }
[No CFG could be retrieved]
Cache does not support commands of the given type.
I took the liberty to change "Query done" to "Query peformed".
@@ -78,8 +78,9 @@ class {package_name}TestConan(ConanFile): def build(self): cmake = CMake(self.settings) - self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line)) - self.run("cmake --build . %s" % cmake.build_config) + # Current dir is "test_package/build/<build_id>" and CMakeLists.txt is in "test_package" + cmake.configure(self, source_dir="../../", build_dir="./") + cmake.build(self) def imports(self): self.copy("*.dll", "bin", "bin")
[No CFG could be retrieved]
A function to generate the source code of a single . \ n \ n.
why not using conanfile_directory?
@@ -76,10 +76,11 @@ namespace Dynamo.Utilities msg.SessionId = sessionId; msg.SendAnswer += SendAnswerToWebSocket; - Application.Current.Dispatcher.Invoke(new Action(() => msg.Execute(Controller.DynamoViewModel))); + (Application.Current != null ? Application.Current.Dispatcher : Dispatcher.CurrentDispatcher) + .Invoke(new Action(() => msg.Execute(Controller.DynamoViewModel))); } - private static void SendAnswerToWebSocket(string answer, string sessionId) + public static void SendAnswerToWebSocket(string answer, string sessionId) { WebSocketServer.SendResponse(new ComputationResponse() {
[dynSettings->[EnableServer->[Start,Info,ReceivedMessage,Error],FormatFileName->[RemoveChars],RemoveChars->[Replace,Aggregate],LogInfo->[Log],ExecuteMessageFromSocket->[SendAnswer,Invoke,SessionId,Execute,Deserialize,DynamoViewModel],SendAnswerToWebSocket->[SendResponse,Success],ReturnFocusToSearch->[OnRequestReturnFocusToSearch,Empty],LogError->[Log],CustomNodeManager,GetAllNodeNames]]
Execute message from websocket.
The purpose of `SendAnswerToWebSocket` is ambiguous, I think it ends up sending message to the client. If I see it right, could we rename this to `SendResponseToClient` or `RespondToClient` or something similar?
@@ -3874,7 +3874,14 @@ dfs_removexattr(dfs_t *dfs, dfs_obj_t *obj, const char *name) if (obj == NULL) return EINVAL; - rc = check_access(dfs, geteuid(), getegid(), obj->mode, W_OK); + mode = obj->mode; + + /* Patch in user read permissions here for trusted namespaces */ + if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || + !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ) + mode |= S_IRUSR; + + rc = check_access(dfs, geteuid(), getegid(), mode, W_OK); if (rc) return rc;
[dfs_mount_root_cont->[dfs_cont_create,dfs_mount],dfs_umount_root_cont->[dfs_umount],dfs_lookup->[dfs_lookup],dfs_obj_local2global->[dfs_get_chunk_size],dfs_chmod->[dfs_lookup,dfs_release],dfs_access->[dfs_lookup,dfs_release]]
Remove an extended attribute from an object. - - - - - - - - - - - - - - - - - -.
(style) space prohibited before that close parenthesis ')'
@@ -7,5 +7,6 @@ package org.ray.api.function; */ @FunctionalInterface public interface RayFunc0<R> extends RayFunc { - R apply(); + + R apply() throws Exception; }
[No CFG could be retrieved]
R Apply the function to the current context.
I guess I missed something else, just wondering how does this relate to supporting void return?
@@ -987,6 +987,7 @@ func (mod *modContext) sdkImports(nested, utilities bool) []string { if nested { imports = append(imports, fmt.Sprintf("import * as inputs from \"%s/types/input\";", relRoot)) imports = append(imports, fmt.Sprintf("import * as outputs from \"%s/types/output\";", relRoot)) + imports = append(imports, fmt.Sprintf("import * as enums from \"%s/types/enums\";", relRoot)) } if utilities { imports = append(imports, fmt.Sprintf("import * as utilities from \"%s/utilities\";", relRoot))
[genResource->[getConstValue,genAlias,getDefaultValue,genPlainType,typeString],genTypes->[genHeader,sdkImports,getImports],genType->[details,genPlainType],gen->[genResource,genTypes,sdkImports,genFunction,isReservedSourceFileName,getImports,genHeader,add,genConfig],genFunction->[genPlainType],getTypeImports->[getTypeImports],genConfig->[getImports,genHeader,getDefaultValue,configGetter,typeString],genIndex->[genHeader],getImports->[getTypeImports],genNamespace->[genType,details,genNamespace],tokenToResource->[tokenToModName],configGetter->[typeString],genPlainType->[typeString],tokenToType->[tokenToModName],typeString->[tokenToType,tokenToResource,typeString],gen,details,typeString,add]
sdkImports returns the imports for the SDK.
Should this import be conditional based on whether or not any enums are present? Probably not a big deal.
@@ -436,8 +436,8 @@ namespace Dynamo.UI.Controls // Count width. double x = 0; - x = WpfUtilities.FindUpVisualTree<SearchView>(this.PlacementTarget).ActualWidth - + gap * 2 + targetLocation.X * (-1); + x = (WpfUtilities.FindUpVisualTree<SearchView>(this.PlacementTarget).ActualWidth + + gap * 2 + targetLocation.X * (-1)) * xfactor; // Count height. var availableHeight = dynamoWindow.ActualHeight - popup.Height
[DynamoTextBox->[OnPreviewKeyDown->[OnRequestReturnFocusToSearch],NodeModel->[NodeModel]],LibraryToolTipPopup->[PlacementCallback->[SetDataContext],LibraryToolTipTimer->[Start->[Start],Stop->[Stop],OnTimerElapsed->[Stop]]]]
Callback for placement.
Do we still need gap \* 2 if we're scaling the whole expression?
@@ -174,11 +174,16 @@ namespace Dynamo.Logging //reporting approved status. preferences = dynamoModel.PreferenceSettings; + adpAnalyticsUI = new ADPAnalyticsUI(); + if (Session == null) Session = new DynamoAnalyticsSession(); //Setup Analytics service, StabilityCookie, Heartbeat and UsageLog. Session.Start(dynamoModel); + Service.Instance.AddTrackerFactoryFilter(GATrackerFactory.Name, () => true == ReportingAnalytics); + Service.Instance.AddTrackerFactoryFilter(ADPTrackerFactory.Name, () => true == ReportingADPAnalytics); + //Dynamo app version. var appversion = dynamoModel.AppVersion;
[DynamoAnalyticsClient->[Dispose->[Dispose],TrackException->[TrackException],ShutDown->[Dispose],Start],DynamoAnalyticsSession->[Dispose->[Dispose]]]
Creates a new instance of DynamoAnalyticsClient with the given DynamoModel. Reports are approved by default.
These filters will ensure that if ADP is **off** then no events will be passed to ADP. Same for google, if GoogleAnalytics are turned off...then no events will be processed by the GoogleTracker.
@@ -241,7 +241,6 @@ namespace Dynamo.Controls //capture the mouse input even if the mouse is dragged outside the canvas this.CaptureMouse(); base.OnMouseLeftButtonDown(e); - e.Handled = true; } }
[DragCanvas->[OnMouseLeftButtonDown->[OnMouseLeftButtonDown],OnIsKeyboardFocusWithinChanged->[OnIsKeyboardFocusWithinChanged]]]
Override OnMouseLeftButtonDown in order to capture the mouse input even if the mouse is.
I'm afraid at this place. I think this can cause regressions or bugs. We don't handle event in DragCanvas, because if we click on WorkSpace and InCanvasSearch is open, then WorkSpace should handle click event and close search bar. I've tried manually and I didn't find any bugs... But it doesn't mean that there is no regressions. Just wanted to pay your attention here!
@@ -180,6 +180,7 @@ func resourceAwsIAMServerCertificateDelete(d *schema.ResourceData, meta interfac if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "DeleteConflict" && strings.Contains(awsErr.Message(), "currently in use by arn") { + currentlyInUseBy(awsErr.Message(), meta) log.Printf("[WARN] Conflict deleting server certificate: %s, retrying", awsErr.Message()) return resource.RetryableError(err) }
[DeleteServerCertificate,UniqueId,Message,GetServerCertificate,NonRetryableError,Set,Code,GetOk,Sum,Errorf,SetId,RetryableError,TrimSpace,Contains,UploadServerCertificate,PrefixedUniqueId,Id,Get,Printf,String,EncodeToString,Retry]
The following functions are functions related to AWS IAM. normalizeCert returns a list of resource data if the private key is found in the given meta.
Nitpick: Do you mind changing the second argument to `*elb.ELB` instead of all `meta`?
@@ -9,7 +9,10 @@ module Verify end def create - if step.complete + result = step.submit + analytics.track_event(Analytics::IDV_FINANCE_CONFIRMATION, result) + + if result[:success] redirect_to verify_phone_url else render_form
[FinanceController->[idv_finance_form->[new],step->[new]]]
create a new nag node.
I understand that you're trying to get consistency in the controller between how a `Step` works and how a `Form` works. Still, calling a method `submit` on a `Step` feels a little awkward, grammatically. I submit a form. I complete a step. It smells a little like false equivalence. OTOH, naming things is the Hardest Problem, and this isn't a blocker for me.
@@ -385,7 +385,14 @@ class CollectionAddProducts(BaseMutation): collection = cls.get_node_or_error( info, collection_id, field="collection_id", only_type=Collection ) - products = cls.get_nodes_or_error(products, "products", Product) + products = cls.get_nodes_or_error( + products, + "products", + Product, + qs=models.Product.objects.prefetch_related( + "attributes", "collections", "variants", "category" + ), + ) cls.clean_products(products) collection.products.add(*products) if collection.sale_set.exists():
[ProductMediaUpdate->[Arguments->[ProductMediaUpdateInput],perform_mutation->[save,ProductMediaUpdate]],CollectionCreate->[save->[save],Arguments->[CollectionCreateInput],perform_mutation->[CollectionCreate]],CollectionReorderProducts->[perform_mutation->[CollectionReorderProducts]],ProductTypeUpdate->[Arguments->[ProductTypeInput]],CollectionDelete->[perform_mutation->[CollectionDelete]],ProductVariantReorder->[perform_mutation->[save,ProductVariantReorder]],ProductUpdate->[save->[save],Arguments->[ProductInput]],ProductMediaDelete->[perform_mutation->[ProductMediaDelete]],CollectionUpdate->[save->[save],Arguments->[CollectionInput]],VariantMediaUnassign->[perform_mutation->[VariantMediaUnassign]],ProductVariantDelete->[success_response->[save]],CategoryCreate->[save->[save],Arguments->[CategoryInput]],ProductMediaReorder->[perform_mutation->[save,ProductMediaReorder]],ProductMediaCreate->[Arguments->[ProductMediaCreateInput],perform_mutation->[ProductMediaCreate,validate_input]],ProductVariantSetDefault->[perform_mutation->[save,ProductVariantSetDefault]],ProductVariantUpdate->[Arguments->[ProductVariantInput]],CollectionAddProducts->[perform_mutation->[CollectionAddProducts]],CategoryUpdate->[Arguments->[CategoryInput]],ProductCreate->[save->[save],Arguments->[ProductCreateInput],clean_attributes->[clean_input],clean_input->[clean_attributes]],ProductTypeCreate->[Arguments->[ProductTypeInput]],ProductVariantCreate->[save->[save],Arguments->[ProductVariantCreateInput],clean_attributes->[clean_input],clean_input->[validate_duplicated_attribute_values,clean_attributes]],VariantMediaAssign->[perform_mutation->[VariantMediaAssign]],CollectionRemoveProducts->[perform_mutation->[CollectionRemoveProducts]]]
Perform a mutation on a collection.
Please double check the `attributes` prefetch. I think that it misses some relations.
@@ -2,7 +2,7 @@ module TwoFactorAuthentication class PhoneSelectionPresenter < SelectionPresenter def type if MfaContext.new(configuration&.user).phone_configurations.many? - "#{super}_#{configuration.id}" + "#{super}:#{configuration.id}" else super end
[PhoneSelectionPresenter->[info->[masked_number,present?,t,phone],masked_number->[blank?],type->[many?,id]]]
Returns a sequence number that can be used to generate a unique identifier for this user.
This needs to be `_` rather than `:` due to an issue Moncef found. The controller that splits on `:` needs to separate on the last `_`.
@@ -94,6 +94,12 @@ namespace Content.Server.Construction private async Task<IEntity?> Construct(IEntity user, string materialContainer, ConstructionGraphPrototype graph, ConstructionGraphEdge edge, ConstructionGraphNode targetNode) { + if (user.IsInContainer()) + { + user.PopupMessageCursor(Loc.GetString("construction-system-inside-container")); + return null; + } + // We need a place to hold our construction items! var container = ContainerHelpers.EnsureContainer<Container>(user, materialContainer, out var existed);
[ConstructionSystem->[HandleStartStructureConstruction->[Construct],Construct->[EnumerateNearby],Initialize->[Initialize],HandleStartItemConstruction->[Construct]]]
Construct a new material container. Checks if an entity is valid in the system and if so adds it to the stack. This method is called when a new entity is not found in the system. It will be.
You should put this in `HandleStartStructureConstruction` instead. Crafting items inside a container should be fine.
@@ -1,7 +1,7 @@ module ProtocolsImporter include RenamingUtil - def import_new_protocol(protocol_json, organization, type, user) + def import_new_protocol(protocol_json, team, type, user) remove_empty_inputs(protocol_json) protocol = Protocol.new( name: protocol_json["name"],
[remove_empty_inputs->[remove_empty_inputs,each,kind_of?],import_new_protocol->[invalid?,populate_protocol,new,remove_empty_inputs,save!,now,rename_record],populate_protocol->[post_process_file,new,save!,file_content_type,file,decode64,create!,file_file_name,each,reload,id,organization],import_into_existing->[unlink,reload,destroy_contents,populate_protocol],include]
Imports a new protocol from a JSON file.
Unused method argument - team. If it's necessary, use _ or _team as an argument name to indicate that it won't be used.
@@ -115,6 +115,17 @@ public class RestClientRequestContext extends AbstractResteasyReactiveContext<Re restart(abortHandlerChain); } + @Override + protected Throwable unwrapException(Throwable t) { + var res = super.unwrapException(t); + if (res instanceof WebApplicationException) { + WebApplicationException webApplicationException = (WebApplicationException) res; + return new ClientWebApplicationException(webApplicationException.getMessage(), webApplicationException, + webApplicationException.getResponse()); + } + return res; + } + public <T> T readEntity(InputStream in, GenericType<T> responseType, MediaType mediaType, MultivaluedMap<String, Object> metadata)
[RestClientRequestContext->[close->[close],isMultipart->[getEntity]]]
This method aborts the current request and returns the entity with the given identifier.
The `WebApplicationException` cause should be propagated here?
@@ -21,6 +21,12 @@ type kubernetesFeatureSetting struct { func kubernetesContainerAddonSettingsInit(profile *api.Properties) map[string]kubernetesFeatureSetting { return map[string]kubernetesFeatureSetting{ + DefaultHeapsterAddonName: { + "kubernetesmasteraddons-heapster-deployment.yaml", + "kube-heapster-deployment.yaml", + !common.IsKubernetesVersionGe(profile.OrchestratorProfile.OrchestratorVersion, "1.12.0"), + profile.OrchestratorProfile.KubernetesConfig.GetAddonScript(DefaultKubeHeapsterDeploymentAddonName), + }, DefaultMetricsServerAddonName: { "kubernetesmasteraddons-metrics-server-deployment.yaml", "kube-metrics-server-deployment.yaml",
[IsAADPodIdentityEnabled,IsMetricsServerEnabled,IsClusterAutoscalerEnabled,IsDashboardEnabled,IsTrueBoolPointer,IsACIConnectorEnabled,IsKubernetesVersionGe,IsSMBFlexVolumeEnabled,IsBlobfuseFlexVolumeEnabled,IsKeyVaultFlexVolumeEnabled,GetAddonScript,IsContainerMonitoringEnabled,IsReschedulerEnabled,Join,IsNVIDIADevicePluginEnabled,Split,IsAzureCNI,Sprintf,Replace,IsTillerEnabled]
This function returns a map of feature settings for the given container. Missing name for missing cluster - autoscaler addon.
let's change this to 1.13.0, so that we don't change the presence of heapster in pre-existing 1.12 clusters
@@ -1333,7 +1333,7 @@ func (orm *ORM) IncrFluxMonitorRoundSubmissions(aggregator common.Address, round ?, ?, 0, 1 ) ON CONFLICT (aggregator, round_id) DO UPDATE - SET num_submissions = excluded.num_submissions + 1 + SET num_submissions = flux_monitor_round_stats.num_submissions + 1 `, aggregator, roundID).Error }
[FindTxsBySenderAndRecipient->[MustEnsureAdvisoryLock],Close->[Close],JobRunsFor->[MustEnsureAdvisoryLock,preloadJobRuns],SetConfigValue->[MustEnsureAdvisoryLock],IdempotentInsertEthTaskRunTx->[Transaction],SaveUser->[MustEnsureAdvisoryLock],FindInitiator->[MustEnsureAdvisoryLock],FindTxByAttempt->[FindTx,MustEnsureAdvisoryLock],ClobberDiskKeyStoreWithDBKeys->[Keys],CreateSession->[MustEnsureAdvisoryLock,FindUser],TxAttempts->[MustEnsureAdvisoryLock],CreateTx->[MustEnsureAdvisoryLock,convenientTransaction],FindUser->[MustEnsureAdvisoryLock],FindExternalInitiatorByName->[MustEnsureAdvisoryLock],CreateLogConsumption->[MustEnsureAdvisoryLock],DeleteUser->[MustEnsureAdvisoryLock,FindUser,convenientTransaction],DeleteEncryptedSecretVRFKey->[MustEnsureAdvisoryLock],RawDB->[MustEnsureAdvisoryLock],CreateJob->[MustEnsureAdvisoryLock,convenientTransaction],preloadJobRuns->[Unscoped],ArchiveJob->[MustEnsureAdvisoryLock,FindJob,convenientTransaction],JobRunsSortedFor->[MustEnsureAdvisoryLock,JobRunsCountFor,preloadJobRuns],CreateBridgeType->[MustEnsureAdvisoryLock],UpdateBridgeType->[MustEnsureAdvisoryLock],AddTxAttempt->[MustEnsureAdvisoryLock],UnconfirmedTxAttempts->[MustEnsureAdvisoryLock],AuthorizedUserWithSession->[MustEnsureAdvisoryLock,FindUser],AnyJobWithType->[MustEnsureAdvisoryLock],DeleteUserSession->[MustEnsureAdvisoryLock],FindBridgesByNames->[MustEnsureAdvisoryLock],SaveJobRun->[MustEnsureAdvisoryLock,Unscoped,convenientTransaction],CreateServiceAgreement->[MustEnsureAdvisoryLock,createJob,convenientTransaction],FirstOrCreateKey->[MustEnsureAdvisoryLock],JobRunsCountFor->[MustEnsureAdvisoryLock],FindExternalInitiator->[MustEnsureAdvisoryLock],SaveSession->[MustEnsureAdvisoryLock],CreateJobRun->[MustEnsureAdvisoryLock],FindOrCreateFluxMonitorRoundStats->[MustEnsureAdvisoryLock],FindBridge->[MustEnsureAdvisoryLock],FindEncryptedSecretVRFKeys->[MustEnsureAdvisoryLock],UnscopedJobRunsWithStatus->[MustEnsureAdvisoryLock,Unscoped,preloadJobRuns],FindJobRun->[MustEnsureAdvisoryLock,preloadJobRuns],GetConfigValue->[MustEnsureAdvisoryLock],CountOf->[MustEnsureAdvisoryLock],Jobs->[MustEnsureAdvisoryLock,Unscoped,preloadJobs],PendingBridgeType->[MustEnsureAdvisoryLock,FindBridge],DeleteStaleSessions->[MustEnsureAdvisoryLock],BulkDeleteRuns->[MustEnsureAdvisoryLock,convenientTransaction],TxFrom->[MustEnsureAdvisoryLock],DeleteBridgeType->[MustEnsureAdvisoryLock],JobsSorted->[MustEnsureAdvisoryLock],GetLastNonce->[MustEnsureAdvisoryLock],BridgeTypes->[MustEnsureAdvisoryLock],preloadJobs->[Unscoped],FirstOrCreateEncryptedSecretVRFKey->[MustEnsureAdvisoryLock],Transactions->[MustEnsureAdvisoryLock],getRecords->[MustEnsureAdvisoryLock],FindTx->[MustEnsureAdvisoryLock],IncrFluxMonitorRoundSubmissions->[MustEnsureAdvisoryLock],FindServiceAgreement->[MustEnsureAdvisoryLock],MarkTxSafe->[MustEnsureAdvisoryLock],AllSyncEvents->[MustEnsureAdvisoryLock],DeleteTransaction->[MustEnsureAdvisoryLock,convenientTransaction],FindAllTxsInNonceRange->[MustEnsureAdvisoryLock],createJob->[MustEnsureAdvisoryLock],ClearNonCurrentSessions->[MustEnsureAdvisoryLock],CreateInitiator->[MustEnsureAdvisoryLock],FindLogConsumer->[MustEnsureAdvisoryLock,FindJob],MarkRan->[MustEnsureAdvisoryLock,convenientTransaction],SaveTx->[MustEnsureAdvisoryLock],DeleteFluxMonitorRoundsBackThrough->[MustEnsureAdvisoryLock],LinkEarnedFor->[MustEnsureAdvisoryLock],convenientTransaction->[MustEnsureAdvisoryLock,Transaction],FindTxAttempt->[MustEnsureAdvisoryLock],ClearSessions->[MustEnsureAdvisoryLock],DeleteExternalInitiator->[MustEnsureAdvisoryLock],Unscoped->[Unscoped],FindJob->[MustEnsureAdvisoryLock],Sessions->[MustEnsureAdvisoryLock],JobRunsSorted->[MustEnsureAdvisoryLock],MostRecentFluxMonitorRoundID->[MustEnsureAdvisoryLock],getDefaultKey->[Keys],CreateExternalInitiator->[MustEnsureAdvisoryLock],Unscoped]
IncrFluxMonitorRoundSubmissions increments the number of pending flux monitor requests for a.
Im surprised about two things: 1. That the original version didn't work 2. That it failed in a way that wasn't caught by tests
@@ -155,7 +155,8 @@ namespace { // (anonymous) * the number of replaced diagonal entries to match the * local number of rows. */ - TEST_EQUALITY(numReplacedDiagEntries, matrix->getNodeNumRows()); + const LO lclNumRows = static_cast<LO> (matrix->getNodeNumRows ()); + TEST_EQUALITY(numReplacedDiagEntries, lclNumRows); /* Test for successful replacement *
[No CFG could be retrieved]
This function reduces all the elements of a matrix into a single non - zero value. Check if all processes got this far.
Does this indentation not quite line up here? Looks like TEST_EQUALITY() is indented one space too many.
@@ -107,7 +107,7 @@ tests_requirements = [ "mock>=3.0.0", "xmltodict>=0.11.0", "awscli>=1.16.125", - "google-compute-engine", + "google-compute-engine==2.8.13", "pywin32; sys_platform == 'win32'", "Pygments", # required by collective.checkdocs, "collective.checkdocs",
[build_py->[pin_version->[write,mkpath,format,join,open],run->[run,execute]],find_packages,setup,append,dirname,read,exec,join,open]
MacOS specific version of . Creates a new version of dvc that is compatible with the current platform.
Btw, I think I remember adding this one because of binary built by pyinstaller, when using it with gs remote. Can we try rather fixing the version of this package instead of removing it? Just to be safe.
@@ -1001,7 +1001,7 @@ export class AmpStoryPage extends AMP.BaseElement { * @private */ toggleLoadingSpinner_(isActive) { - this.getVsync().mutate(() => { + this.resources_.mutateElement(this.element, () => { if (!this.loadingSpinner_) { this.buildAndAppendLoadingSpinner_(); }
[AmpStoryPage->[setState->[PLAYING,dev,NOT_ACTIVE,PAUSED],pauseCallback->[DESKTOP_PANELS,UI_STATE],constructor->[resolve,timerFor,NOT_ACTIVE,getAmpdoc,forElement,getStoreService,resourcesForDoc,platformFor,promise,debounce,reject],emitProgress_->[dict,PAGE_PROGRESS,dispatch],unmuteAllMedia->[removeAttribute,unmute,muted],getPreviousPageId->[id,tagName],renderOpenAttachmentUI_->[buildOpenAttachmentElement,AMP_STORY_PAGE_ATTACHMENT_OPEN_LABEL,localizationService,textContent],firstAttachedCallback->[matches],rewindAllMedia_->[rewindToBeginning,currentTime],findAndPrepareEmbeddedComponents_->[debounceEmbedResize,hasAttribute,listen,SIZE_CHANGED,scopedQuerySelectorAll,debouncePrepareForAnimation],preloadAllMedia_->[preload],registerAllMedia_->[register],playAllMedia_->[play],buildAndAppendPlayMessage_->[AMP_STORY_PAGE_PLAY_VIDEO,blessAll,getLocalizedString,localizationService,textContent,buildPlayMessageElement],maybeApplyFirstAnimationFrame->[resolve],setDistance->[min],hasVideoWithAudio_->[prototype,hasAttribute],pauseAllMedia_->[pause],waitForMediaLayout_->[addEventListener,resolve,all,tagName,prototype,LOAD_END,ALL_AMP_MEDIA,signals,readyState],maybeCreateAnimationManager_->[create,hasAnimations],layoutCallback->[upgradeBackgroundAudio,debounce,all],switchTo_->[dict,SWITCH_PAGE,dispatch],getMediaBySelector_->[scopedQuerySelectorAll,iterateCursor,getFriendlyIframeEmbedOptional,push,win],getAdjacentPageIds->[push,isExperimentOn],muteAllMedia->[setAttribute,mute,muted],isLayoutSupported->[CONTAINER],markPageAsLoaded_->[PAGE_LOADED,dispatch],openAttachment->[open,getImpl],delegateVideoAutoplay->[iterateCursor],checkPageHasAudio_->[TOGGLE_PAGE_HAS_AUDIO],getDistance->[parseInt],getAllMedia_->[ALL_MEDIA],next->[NEXT],whenAllMediaElements_->[prototype,all,callbackFn],stopListeningToVideoEvents_->[unlisten],togglePlayMessage_->[dev,toggle],previous->[PREVIOUS,dispatch,SHOW_NO_PREVIOUS_PAGE_HELP],getAllVideos_->[ALL_VIDEO],markMediaElementsWithPreload_->[setAttribute,prototype],initializeMediaPool_->[dev,closestAncestorElementBySelector,getImpl,for],actions_->[getAttribute,split,search,indexOf,reduce,prototype,forEach,slice,push,map],getNextPageId->[tagName,id,isExperimentOn],reportDevModeErrors_->[getMode,getLogEntries,dispatch,DEV_LOG_ENTRIES_AVAILABLE],startListeningToVideoEvents_->[prototype,length,listen],BaseElement],keys,dev,htmlFor,prepareForAnimation,debounce,unlisten]
Toggle loading spinner.
`this.mutateElement(() => {...})`
@@ -108,7 +108,7 @@ log message: component. Check pod logs and recreate it with the correct CA cert. Routers and registries won't work properly with the wrong CA. * If it is from a node IP, the client is likely a node. Check the - openshift-node logs and reconfigure with the correct CA cert. + atomic-openshift-node logs and reconfigure with the correct CA cert. Nodes will be unable to create pods until this is corrected. * If it is from an external IP, it is likely from a user (CLI, browser, etc.). Command line clients should be configured with the correct
[MustCompile,Sprintf,Warn]
This function checks the master s server certificate and the server certificate for a node. show once for every client failing to connect.
I'm torn as to whether or not we should make this messaging more general and say 'the node logs' or perhaps 'origin-node or atomic-openshift-node logs' ? Not sure it's worth changing from what you've proposed.
@@ -365,8 +365,14 @@ inline void Battleground::_ProcessResurrect(uint32 diff) Player* player = ObjectAccessor::FindPlayer(guid); if (!player) continue; + Pet* pet = player->GetPet(); + float x, y, z; + player->GetPosition(x, y, z); player->ResurrectPlayer(1.0f); - player->CastSpell(player, 6962, true); + if (!pet) + { + player->SummonPet(0, x, y, z, player->GetOrientation(), HUNTER_PET, 0, 0, player->GetGUID(), PET_LOAD_BG_RESURRECT); + } player->CastSpell(player, SPELL_SPIRIT_HEAL_MANA, true); player->SpawnCorpseBones(false); }
[AddPlayer->[SendPacketToTeam],HandleKillPlayer->[UpdatePlayerScore],AddSpiritGuide->[AddCreature],EndBattleground->[PlaySoundToAll],CheckArenaAfterTimerConditions->[EndBattleground],SendMessageToAll->[BroadcastWorker],PlayerAddedToBGCheckIfBGIsRunning->[BlockMovement],SpectatorsSendPacket->[],SendMessage2ToAll->[BroadcastWorker],RemovePlayerAtLeave->[SendPacketToTeam],void->[GetPrematureWinner],UpdateWorldState->[SendPacketToAll],CheckArenaWinConditions->[EndBattleground,GetAlivePlayersCountByTeam],StartTimedAchievement->[StartTimedAchievement],HasFreeSlots->[GetFreeSlotsForTeam],HandleTriggerBuff->[SpawnBGObject],UpdateArenaWorldState->[UpdateWorldState,GetAlivePlayersCountByTeam],PSendMessageToAll->[BroadcastWorker],GetMaxFreeSlots->[GetFreeSlotsForTeam],PlaySoundToAll->[SendPacketToAll]]
This method is called when a new object is resurrected. ResurrectQueue. Clear.
remove double space and add a newline between this and the if statement above.
@@ -8,7 +8,6 @@ from textwrap import dedent from typing import Iterable, List, Sequence, Tuple, Type import pytest - from pants.base.specs import ( FilesystemGlobSpec, FilesystemLiteralSpec,
[TestDependencies->[test_dependency_injection->[assert_injected],test_normal_resolution->[assert_dependencies_resolved],test_dependency_inference->[assert_dependencies_resolved],test_explicit_file_dependencies->[assert_dependencies_resolved]],GraphTest->[test_resolve_generated_subtarget->[MockTarget],test_cycle_direct->[assert_failed_cycle],test_transitive_targets->[get_target],test_cycle_self->[assert_failed_cycle],test_cycle_indirect->[assert_failed_cycle]],TestCodegen->[test_generate_sources->[AvroSources,GenerateSmalltalkFromAvroRequest],test_works_with_subclass_fields->[CustomAvroSources],test_cannot_generate_language->[AvroSources]],generate_smalltalk_from_avro->[generate_fortran],test_validate_explicit_file_dep->[assert_raises],infer_smalltalk_dependencies->[infer],TestFindValidFieldSets->[test_find_valid_field_sets->[InvalidTarget,find_valid_field_sets,FortranTarget]],TestSources->[test_expected_file_extensions->[ExpectedExtensionsSources],test_default_globs->[DefaultSources],test_output_type->[SourcesSubclass],test_expected_num_files->[hydrate]]]
Imports a single object from the system. Create a test object for metal object.
Ditto on bad import order.
@@ -239,11 +239,11 @@ def add_single_spec(spec, mirror_root, categories, **kwargs): tty.msg("{name} : already added".format(name=name)) else: spec_exists_in_mirror = False - fetcher.fetch() - if not kwargs.get('no_checksum', False): - fetcher.check() - tty.msg("{name} : checksum passed".format(name=name)) - + validate = not kwargs.get('no_checksum', False) + fetcher.expected_archive_files = stage.expected_archive_files # noqa: ignore=E501 + fetcher.fetch( + validate=validate, expanded_source_tree=False + ) # Fetchers have to know how to archive their files. Use # that to move/copy/create an archive in the mirror. fetcher.archive(archive_path)
[add_single_spec->[mirror_archive_path],mirror_archive_path->[mirror_archive_filename],create->[get_matching_versions]]
Add a single spec to the list of categories. Add error category.
We should ban the use of `# noqa: ignore=E501`. It just makes the line longer -- if you have to, use a `\`, but generally when I have to do this it just means I wrote the code with too much nesting and should refactor.
@@ -1048,7 +1048,7 @@ public class TripleAFrame extends MainGameFrame { for (final Entry<Territory, Collection<Unit>> entry : possibleUnitsToAttack.entrySet()) { final List<Unit> units = new ArrayList<>(entry.getValue()); Collections.sort(units, - new UnitBattleComparator(false, TuvUtils.getCostsForTuvForAllPlayersMergedAndAveraged(data), + new UnitBattleComparator(false, TuvUtils.getCostsForTuv(units.get(0).getOwner(), data), TerritoryEffectHelper.getEffects(entry.getKey()), data, true, false)); Collections.reverse(units); possibleUnitsToAttackStringForm.put(entry.getKey().getName(), units);
[TripleAFrame->[waitForPlace->[waitForPlace],setShowChatTime->[setShowChatTime],getInGameLobbyWatcher->[getInGameLobbyWatcher,getGame],shutdown->[stopGame],waitForEndTurn->[waitForEndTurn],updateStep->[updateStep],getTechRolls->[requestWindowFocus],getBattlePanel->[getBattlePanel],setStatusWarningMessage->[setStatus],actionPerformed->[showMapOnly,showGame,showHistory,gameDataChanged],setWidgetActivation->[setWidgetActivation],setEditDelegate->[setWidgetActivation,gameDataChanged],leaveGame->[stopGame],setScale->[setScale],getPoliticalActionChoice->[requestWindowFocus],getScale->[getScale],getEditMode->[getEditMode],getUserActionChoice->[requestWindowFocus]]]
Selects all possible units to attack using the given attack resource token. Creates a dialog that selects units to Suicide Attack.
Is there a possibility that `units` can be empty here?
@@ -431,6 +431,15 @@ export class AmpForm { checkUserValidityAfterInteraction_(dev().assertElement(e.target)); this.validator_.onInput(e); }); + + // Ctrl/Cmd + Enter on textarea or contenteditable triggers form submission. + this.form_.querySelectorAll('textarea,[contenteditable]').forEach(el => + el.addEventListener('keydown', e => { + if (e.key == Keys.ENTER && (e.ctrlKey || e.metaKey)) { + this.handleSubmitEvent_(e); + } + }) + ); } /** @private */
[No CFG could be retrieved]
Adds event listeners for the given and triggers the form submit event in the AMP Adds the fields of the form to the data for analytics.
This will trigger on Windows Key + Enter too btw but maybe that's expected?
@@ -55,6 +55,12 @@ module Engine @variant.each { |k, v| instance_variable_set("@#{k}", v) } end + def update_end_of_life(rusts_on, obsolete_on) + @rusts_on = rusts_on + @obsolete_on = obsolete_on + @variants.each { |_, v| v.merge!(rusts_on: rusts_on, obsolete_on: obsolete_on) } + end + def names_to_prices @variants.transform_values { |v| v[:price] } end
[Train->[init_variants->[transform_values],min_price->[from_depot?],names_to_prices->[transform_values],from_depot?->[is_a?],price->[name,dig],rust!->[remove_train],initialize->[select,init_variants],include,attr_writer,attr_accessor,instance_variable_set,attr_reader,each],require_relative]
Initialize a new Categorical object.
why is this only called in 18mex? should this just be done inline in 18mex? will this be used for other games?
@@ -196,9 +196,14 @@ class Jetpack_Twitter_Cards { static function site_tag() { $site_tag = get_option( 'jetpack-twitter-cards-site-tag' ); if ( empty( $site_tag ) ) { - $site_tag = ( defined( 'IS_WPCOM' ) && IS_WPCOM ) ? 'wordpressdotcom' : 'jetpack'; + if ( defined( 'IS_WPCOM' ) && IS_WPCOM ) { + return 'wordpressdotcom'; + } else { + return; + } + } else { + return $site_tag; } - return $site_tag; } static function settings_field() {
[No CFG could be retrieved]
Displays a hidden field that displays the Twitter tag of the site.
The logic would probably read as cleaner if this wans't an else, and just fell through to the prior `return $site_tag`
@@ -339,7 +339,7 @@ public class DefaultComponentLifecycleAdapter implements LifecycleAdapter { if (componentObject == null) { - throw new ComponentException(createStaticMessage("componentObject is null"), RequestContext.getEvent(), component); + throw new ComponentException(createStaticMessage("componentObject is null"), event, component); } // Use the overriding entrypoint resolver if one is set if (component.getEntryPointResolverSet() != null)
[DefaultComponentLifecycleAdapter->[start->[start],invoke->[invoke],stop->[stop]]]
Invoke the method that can be overridden by the component.
I think that is not correct to use the event. That could fix this particular issue, but suppose that in the exception strategy there is another component that uses Requestcontext.getEvent... that event will be incorrect.
@@ -77,7 +77,7 @@ describe "Api::V1::Casts" do "description_en" => cast.character.description_en, "description_source" => cast.character.description_source, "description_source_en" => cast.character.description_source_en, - "favorite_characters_count" => cast.character.favorite_characters_count, + "favorite_characters_count" => cast.character.favorite_users_count, }, "person" => { "id" => cast.person.id,
[create,favorite_characters_count,twitter_username,let,describe,api,nickname,favorite_people_count,twitter_username_en,blood_type,title_kana,released_at_about,it,twitter_image_url,name,media,birthday,work_records_with_body_count,to,blood_type_en,weight_en,facebook_og_image_url,sort_number,before,description_source_en,description_source,watchers_count,let!,gender_text,recommended_image_url,twitter_hashtag,nationality_en,name_kana,age,staffs_count,occupation_en,official_site_url,to_s,height,age_en,include,name_en,url_en,strftime,casts_count,media_text,nationality,twitter_avatar_url,episodes_count,url,wikipedia_url_en,title,nickname_en,id,context,token,wikipedia_url,get,no_episodes?,birthday_en,weight,eq,occupation,description_en,description,height_en]
This method returns a list of all the properties of a character. missing nanoseconds for missing values.
Style/TrailingCommaInHashLiteral: Avoid comma after the last item of a hash.
@@ -42,7 +42,7 @@ def find_version(*file_paths): long_description = read('README.rst') tests_require = ['pytest', 'virtualenv>=1.10', 'scripttest>=1.3', 'mock', - 'pretend'] + 'pretend', 'pytest-catchlog', 'freezegun'] setup(
[find_version->[read],PyTest->[finalize_options->[finalize_options]],find_version,read]
Initialize the options. Displays a list of available packages and their dependencies.
This should not be in this PR
@@ -675,7 +675,10 @@ func applyTransformations(t, name string, props Input, resource Resource, opts [ res := transformation(args) if res != nil { - resOptions := merge(res.Opts...) + resOptions, err := tryMergeWithoutAwaiting(res.Opts...) + if err != nil { + return nil, nil, nil, err + } if resOptions.Parent != nil && resOptions.Parent.URN() != options.Parent.URN() { return nil, nil, nil, errors.New("transformations cannot currently be used to change the `parent` of a resource")
[ReadResource->[ReadResource,DryRun],RegisterComponentResource->[RegisterResource],collapseAliases->[Project,Stack],registerResource->[RegisterResource,DryRun,getResource],Invoke->[Invoke,DryRun],resolve->[resolve],RegisterRemoteComponentResource->[registerResource],prepareResourceInputs->[DryRun],RegisterResourceOutputs->[DryRun,endRPC,RegisterResourceOutputs,beginRPC],Close->[Close],getResource->[Invoke,DryRun]]
registerResource registers a component resource with the given name and input. It returns the input options Copy returns a copy of the specific package if any.
Currently will fail if asked to await, however we can switch this to await here. This runs within contexts that are resolving resources already.
@@ -103,11 +103,17 @@ public class TestInformationSchemaConnector public void testLargeData() { long metadataCallsCountBeforeTests = METADATA_CALLS_COUNTER.get(); + assertQuery("SELECT count(*) from test_catalog.information_schema.schemata WHERE schema_name LIKE 'test_sch_ma1'", "VALUES 1"); + assertQuery("SELECT count(*) from test_catalog.information_schema.schemata WHERE schema_name LIKE 'test_sch_ma1' AND schema_name IN ('test_schema1', 'test_schema2')", "VALUES 1"); assertQuery("SELECT count(*) from test_catalog.information_schema.tables", "VALUES 300008"); assertQuery("SELECT count(*) from test_catalog.information_schema.tables WHERE table_schema = 'test_schema1'", "VALUES 100000"); + assertQuery("SELECT count(*) from test_catalog.information_schema.tables WHERE table_schema LIKE 'test_sch_ma1'", "VALUES 100000"); + assertQuery("SELECT count(*) from test_catalog.information_schema.tables WHERE table_schema LIKE 'test_sch_ma1' AND table_schema IN ('test_schema1', 'test_schema2')", "VALUES 100000"); assertQuery("SELECT count(*) from test_catalog.information_schema.tables WHERE table_name = 'test_table1'", "VALUES 2"); + assertQuery("SELECT count(*) from test_catalog.information_schema.tables WHERE table_name LIKE 'test_t_ble1'", "VALUES 2"); + assertQuery("SELECT count(*) from test_catalog.information_schema.tables WHERE table_name LIKE 'test_t_ble1' AND table_name IN ('test_table1', 'test_table2')", "VALUES 2"); assertQuery("SELECT count(*) from test_catalog.information_schema.columns WHERE table_schema = 'test_schema1' AND table_name = 'test_table1'", "VALUES 100"); - assertEquals(METADATA_CALLS_COUNTER.get() - metadataCallsCountBeforeTests, 12); + assertEquals(METADATA_CALLS_COUNTER.get() - metadataCallsCountBeforeTests, 39); } private static DistributedQueryRunner createQueryRunner()
[TestInformationSchemaConnector->[testSchemaNamePredicate->[assertQuery],createQueryRunner->[getConnectorFactories->[build,of,collect,toImmutableList],installPlugin,of,build,Plugin,createCatalog,close,TpchPlugin],testBasic->[assertQuery],testTableNamePredicate->[assertQuery],testProject->[assertQuery],testLargeData->[assertQuery,get,assertEquals],testMixedPredicate->[assertQuery],AtomicLong]]
This test tests the large data. This method is used to create a mock catalog.
can you move queries with `LIKE` to separate test method so when something change it is easier to track what actually changed.
@@ -1057,7 +1057,7 @@ class ClientNetwork(object): # pylint: disable=too-many-instance-attributes if self.REPLAY_NONCE_HEADER in response.headers: nonce = response.headers[self.REPLAY_NONCE_HEADER] try: - decoded_nonce = jws.Header._fields['nonce'].decode(nonce) + decoded_nonce = jws.Header._fields['nonce'].decode(nonce) # pylint: disable=no-member except jose.DeserializationError as error: raise errors.BadNonce(nonce, error) logger.debug('Storing nonce: %s', nonce)
[ClientV2->[new_order->[_authzr_from_response,_post],revoke->[_revoke],finalize_order->[_post],poll_authorizations->[_authzr_from_response],new_account->[_regr_from_response,_post]],Client->[revoke->[_revoke],agree_to_tos->[update_registration],request_challenges->[_authzr_from_response,_post],check_cert->[_get_cert],fetch_chain->[_get_cert],refresh->[check_cert],poll_and_request_issuance->[retry_after,poll,request_issuance],request_domain_challenges->[request_challenges],request_issuance->[_post],register->[_regr_from_response,_post]],ClientNetwork->[_get_nonce->[_add_nonce,head],_post_once->[_check_response,_wrap_in_jws,_send_request,_add_nonce,_get_nonce],get->[_check_response,_send_request],head->[_send_request]],BackwardsCompatibleClientV2->[new_order->[new_order,request_domain_challenges],revoke->[revoke],finalize_order->[fetch_chain,finalize_order,request_issuance],new_account_and_tos->[agree_to_tos,_assess_tos,new_account,register],__init__->[ClientV2,Client]],ClientBase->[_revoke->[_post],poll->[_authzr_from_response],update_registration->[_send_recv_regr],deactivate_registration->[update_registration],_send_recv_regr->[_regr_from_response],query_registration->[_send_recv_regr],answer_challenge->[_post]]]
Adds a nonce to the list of nonces.
I think this `pylint: disable` can be deleted.
@@ -103,10 +103,11 @@ class EthDistributor { } }) if (existingTxn) { - return this.error(res, `Address ${ethAddress} already used this code.`) + return this._error(res, `Address ${ethAddress} already used this code.`) } // Create a FaucetTxn row in Pending status. + let txnHash = null const faucetTxn = await db.FaucetTxn.create({ campaignId: campaign.id, status: enums.FaucetTxnStatuses.Pending,
[No CFG could be retrieved]
This function checks all existing transactions in the pending status and creates a FaucetTxn Send a transaction to a specific ethernet address.
probably one `await` too much
@@ -233,14 +233,16 @@ public class LogAppender implements Consumer<LogAppender.WriteOperation> { if (currentOffset != 0 && currentOffset + actualLength > maxFileSize) { // switch to next file logFile.close(); - compactor.completeFile(logFile.fileId, currentOffset); + compactor.completeFile(logFile.fileId, currentOffset, nextExpirationTime); completePendingLogRequests(); logFile = fileProvider.getFileForLog(); + nextExpirationTime = -1; currentOffset = 0; log.tracef("Appending records to %s", logFile.fileId); } long seqId = nextSeqId(); - log.tracef("Apppending record to %s:%s", logFile.fileId, currentOffset); + log.tracef("Appending record to %s:%s", logFile.fileId, currentOffset); + nextExpirationTime = ExpiryHelper.mostRecentExpirationTime(nextExpirationTime, actualRequest.getExpiration()); EntryRecord.writeEntry(logFile.fileChannel, REUSED_BUFFER, writeOperation.serializedKey, writeOperation.serializedMetadata, writeOperation.serializedInternalMetadata, writeOperation.serializedValue, seqId, actualRequest.getExpiration(), actualRequest.getCreated(),
[LogAppender->[sendToWriteProcessor->[fromLogRequest],WriteOperation->[fromLogRequest->[WriteOperation]],storeRequest->[storeRequest],deleteRequest->[handleRequestCompletion,deleteRequest]]]
Append a new record to the log file.
Don't we also need to set `nextExpirationTime = -1;` when `actualRequest.isClear()`?
@@ -508,8 +508,7 @@ func printPropertyValueDiff( if shouldPrintOld && shouldPrintNew { if diff.Old.IsArchive() && - diff.New.IsArchive() && - !causedReplace { + diff.New.IsArchive() { printArchiveDiff( b, titleFunc, diff.Old.ArchiveValue(), diff.New.ArchiveValue(),
[IsObject,IsAssets,ArrayValue,NewArchiveProperty,DiffLinesToChars,Assertf,BoolValue,DiffMain,IsBool,PropertyKey,IsNull,Diff,IsOutput,Strings,Color,Itoa,NewAssetProperty,IgnoreError,New,GetAssets,ParseReference,StableKeys,Assert,Len,NumberValue,Prefix,IsComputed,TypeString,TrimSpace,ID,Failf,TypeOf,IsAsset,IsText,AssetValue,GetPath,DiffCharsToLines,Split,IsString,StringValue,IsURI,IsArchive,GetURI,Sprintf,ObjectValue,IsNumber,ArchiveValue,IsArray,RawPrefix,Keys,String,WriteString,URN,MassageIfUserProgramCodeAsset]
elemTitleFunc prints the i - th element of a branch. Check if the object has a primitive value.
note: this !causedReplace line was something that was here since some of the earliest days of pulumi. I never touched it because i wasn't certain what impact it might have. Now that i've run into this issue first hand, i def think this is something we do *not* want, and we should essentially always print two archives as a diff, regardless of what the cause was.
@@ -75,7 +75,7 @@ import org.kohsuke.stapler.interceptor.RequirePOST; public class OldDataMonitor extends AdministrativeMonitor { private static final Logger LOGGER = Logger.getLogger(OldDataMonitor.class.getName()); - private HashMap<SaveableReference,VersionRange> data = new HashMap<SaveableReference,VersionRange>(); + private ConcurrentMap<SaveableReference,VersionRange> data = new ConcurrentHashMap<SaveableReference,VersionRange>(); static OldDataMonitor get(Jenkins j) { return (OldDataMonitor) j.getAdministrativeMonitor("OldData");
[OldDataMonitor->[getVersionList->[add],report->[report,get],saveAndRemoveEntries->[get,apply,add],RunSaveableReference->[hashCode->[hashCode],equals->[equals]],getData->[get],SimpleSaveableReference->[hashCode->[hashCode],equals->[equals]],VersionRange->[toString->[toString]],remove->[get,remove],onDeleted->[remove],onChange->[remove]]]
Get the old data monitor.
default `concurrencyLevel` is 16. I think we should be ok with that.
@@ -255,7 +255,15 @@ class History(object): "base_prefix": context.root_prefix, "minimum_version": minimum_major_minor, } - raise CondaUpgradeError(message) + message += dedent(""" + To work around this restriction, one can also set the config parameter + 'allow_conda_downgrades' to False at their own risk. + """) + + # we need to rethink this. It's fine as a warning to try to get users + # to avoid breaking their system. However, right now it is preventing + # normal conda operation after downgrading conda. + # raise CondaUpgradeError(message) return res
[pretty_content->[pretty_diff,is_diff],History->[get_requested_specs_map->[get_user_requests,update],write_changes->[write_head],get_state->[construct_states],object_log->[parse,is_diff],_parse_comment_line->[_parse_old_format_specs_string],construct_states->[parse,is_diff],get_user_requests->[_parse_comment_line,parse,update],print_log->[pretty_content,parse]],get_requested_specs_map,get_user_requests,History]
Get a list of user requested items. Returns a dict mapping package names to specs.
Maybe add a `TODO` here so this is easier to find later on?
@@ -560,7 +560,12 @@ public class IncrementalIndex implements Iterable<Row> int valsIndex = 0; while (retVal == 0 && valsIndex < lhsVals.length) { - retVal = lhsVals[valsIndex].compareTo(rhsVals[valsIndex]); + final String lhsVal = lhsVals[valsIndex]; + final String rhsVal = rhsVals[valsIndex]; + if(lhsVal == null && rhsVal == null) return 0; + else if(lhsVal == null) return -1; + else if(rhsVal == null) return 1; + else retVal = lhsVal.compareTo(rhsVal); ++valsIndex; } ++index;
[IncrementalIndex->[getDimensionIndex->[get],getMaxTimeMillis->[getTimestamp],size->[get],getMetricType->[get],getDimVals->[size,get,add],getDimension->[get,isEmpty],getMetricIndex->[get],iterableWithPostAggregations->[iterator->[apply->[get,getTimestamp],iterator]],TimeAndDims->[compareTo->[compareTo]],getMaxTime->[getMaxTimeMillis,isEmpty],getInterval->[getMaxTimeMillis,isEmpty],DimDim->[size->[size],keySet->[keySet],getId->[get],get->[get],getValue->[get],add->[size],sort->[size,sort,keySet]],getMinTimeMillis->[getTimestamp],add->[get,add],getMinTime->[isEmpty,getMinTimeMillis],isEmpty->[get],DimensionHolder->[get->[get],add->[get]],iterator->[apply->[],iterator]]]
Compares two TimeAndDims.
can we fix formatting here?
@@ -297,6 +297,16 @@ def main(): signalTime ) + sys.stderr.flush() + + # Run the internal tests (does the same as run_cpp_tests.py) + print('Running internal tests', file=sys.stderr) + try: + Tester.SetVerbosity(Tester.Verbosity.TESTS_OUTPUTS) + Tester.RunAllTestCases() + except: + print('[Warning]: Internal tests were unable to run', file=sys.stderr) + sys.exit(commander.exitCode)
[GetAvailableApplication->[GetModulePath],main->[GetModulePath,Usage,GetAvailableApplication,Commander,RunTestSuitInTime],main]
Parse command line options and set default values. Run the tests for a specific nightly or small unit.
I would use menos verbosity like test lists or even less
@@ -113,6 +113,13 @@ public class DefaultLocalSchedulerClient implements LocalSchedulerLink { public void reconstructObjects(List<UniqueID> objectIds, boolean fetchOnly) { RayLog.core.info("reconstruct objects {}", objectIds); _reconstruct_objects(client, getIdBytes(objectIds), fetchOnly); + RayLog.core.info("task id is {}", UniqueIdHelper.computeTaskId(objectIds.get(0))); + } + + @Override + public UniqueID generateTaskId(UniqueID driverId, UniqueID parentTaskId, int taskIndex) { + byte[] bytes = _generateTaskId(driverId.getBytes(), parentTaskId.getBytes(), taskIndex); + return new UniqueID(bytes); } @Override
[DefaultLocalSchedulerClient->[wait->[_waitObject]]]
Reconstructs the objects with the specified IDs.
1) this log can be merged with the above one. 2) the message is ambiguous. Imagine when you read `task id is xxx` in tons of log messages. Do you have any idea what this message is about? Thus, as a good practice, we should always be specific about what a log message is about. In this case, it can be something like `Reconstructing objects for task xxx, objects ids are xxx.`. 3) Note that `UniqueIdHelper.computeTaskId` will always be called no matter logging level is below INFO or not. In this case, the computation may not be very heavy. But it's still better to protect the logging call with `if (logger.isInfoEnable())`, to avoid unnecessary computation.
@@ -1,10 +1,10 @@ require 'fastercsv' class AssignmentsController < ApplicationController - before_filter :authorize_only_for_admin, :except => [:deletegroup, :delete_rejected, :disinvite_member, :invite_member, + before_filter :authorize_only_for_admin, except: [:deletegroup, :delete_rejected, :disinvite_member, :invite_member, :creategroup, :join_group, :decline_invitation, :index, :student_interface] - before_filter :authorize_for_student, :only => [:student_interface, :deletegroup, :delete_rejected, :disinvite_member, + before_filter :authorize_for_student, only: [:student_interface, :deletegroup, :delete_rejected, :disinvite_member, :invite_member, :creategroup, :join_group, :decline_invitation] - before_filter :authorize_for_user, :only => [:index] + before_filter :authorize_for_user, only: [:index] auto_complete_for :assignment, :name # Publicly accessible actions ---------------------------------------
[AssignmentsController->[new->[new],decline_invitation->[decline_invitation]]]
This action shows the n - node node in the student s group. It shows the n.
Line is too long. [123/80]<br>Put one space between the method name and the first argument.
@@ -27,7 +27,7 @@ import java.util.List; import java.util.function.BiPredicate; import java.util.function.Predicate; -final class ComparisonUtil { +public final class ComparisonUtil { private static final List<Handler> HANDLERS = ImmutableList.<Handler>builder() .add(handler(SqlBaseType::isNumber, ComparisonUtil::handleNumber))
[ComparisonUtil->[isValidComparison->[KsqlException,nullSchemaException,baseType,orElse],Handler->[requireNonNull],handleString->[baseType],handleBoolean->[baseType],nullSchemaException->[getValue,lineSeparator,KsqlException,name],handleNumber->[isNumber],handler->[handler,Handler],build]]
Creates a new instance of ComparisonUtil. This method is used to compare two KsqlOps.
I think this can be package private again now.
@@ -1830,7 +1830,7 @@ class UpdatePFS(SignedMessage): 'type': self.__class__.__name__, 'chain_id': self.chain_id, 'nonce': self.nonce, - 'token_network_address': to_normalized_address(self.token_network_address), + 'token_network_address': to_checksum_address(self.token_network_address), 'channel_identifier': self.channel_identifier, 'transferred_amount': self.transferred_amount, 'locked_amount': self.locked_amount,
[from_dict->[from_dict],LockedTransferBase->[unpack->[Lock],__init__->[assert_transfer_values]],RefundTransfer->[from_event->[Lock],from_dict->[from_dict],to_dict->[to_dict],unpack->[Lock]],EnvelopeMessage->[message_hash->[packed],__init__->[assert_envelope_values]],Message->[__ne__->[__eq__]],Lock->[from_bytes->[Lock],as_bytes->[Lock],__ne__->[__eq__]],Pong->[unpack->[Pong]],RequestMonitoring->[from_dict->[from_dict],from_balance_proof_signed_state->[from_balance_proof_signed_state],packed->[pack],sign->[sign,_data_to_sign,_sign],unpack->[SignedBlindedBalanceProof],to_dict->[to_dict]],UpdatePFS->[packed->[pack]],SignedMessage->[sender->[_data_to_sign],sign->[sign,_data_to_sign],_data_to_sign->[packed],decode->[unpack]],RevealSecret->[unpack->[RevealSecret]],LockedTransfer->[from_event->[Lock],from_dict->[from_dict],to_dict->[to_dict],unpack->[Lock]],SignedBlindedBalanceProof->[_sign->[sign,_data_to_sign]],decode->[decode]]
Returns a dict representation of the block.
hmm this is not looking good. Every other message we send around exports to the `normalized_address`. Why should this message be different? I would instead adjust the PFS and not the client here.
@@ -93,6 +93,14 @@ public class ZetaSQLQueryPlanner implements QueryPlanner { // requires the JoinCommuteRule, which doesn't work without struct flattening. if (rule instanceof JoinCommuteRule) { continue; + } else if (rule instanceof FilterCalcMergeRule || rule instanceof ProjectCalcMergeRule) { + // In order to support Java UDF, we need both BeamZetaSqlCalcRel and BeamCalcRel. It is + // because BeamZetaSqlCalcRel can execute ZetaSQL built-in functions while BeamCalcRel + // can execute UDFs. So during planning, we expect both Filter and Project are converted + // to Calc nodes before merging with other Project/Filter/Calc nodes. Thus we should not + // add FilterCalcMergeRule and ProjectCalcMergeRule. CalcMergeRule will achieve equivalent + // planning result eventually. + continue; } else if (rule instanceof BeamCalcRule) { bd.add(BeamZetaSqlCalcRule.INSTANCE); } else {
[ZetaSQLQueryPlanner->[setDefaultTimezone->[setDefaultTimezone],getDefaultTimezone->[getDefaultTimezone],getLanguageOptions->[getLanguageOptions],convertToBeamRel->[convertToBeamRel]]]
Modify the given rule sets for ZetaSQL.
Shouldn't we just disable these rules all together (in `BeamRuleSets.java`) if they are redundant instead of special casing them for ZetaSQL?
@@ -0,0 +1,5 @@ +#include "7skb.h" + +void matrix_init_kb(void) { + matrix_init_user(); +};
[No CFG could be retrieved]
No Summary Found.
This can be deleted.
@@ -1169,6 +1169,15 @@ func TestIngressClassAnnotation(t *testing.T) { iPaths(onePath(iPath("/derp"), iBackend("service1", intstr.FromInt(80))))), ), ), + buildIngress( + iNamespace("testing"), + iAnnotation(annotationKubernetesIngressClass, "custom"), + iRules( + iRule( + iHost("herp"), + iPaths(onePath(iPath("/derp"), iBackend("service1", intstr.FromInt(80))))), + ), + ), } services := []*corev1.Service{
[DeepEqual,Setenv,newK8sClient,FromInt,loadIngresses,Error,New,NotNil,Errorf,Run,Nil,Clearenv,EqualValues,Equal,loadConfig,Parallel,NoError,FileOrContent,updateIngressStatus,Fatalf,Sprintf,EqualError,FromString]
TestIngressClassAnnotation tests that the specifed header is unique for all extensions. One - line mock of the n - tuple rule that can be found in the cluster.
Could you replace the `herp/derp` with `foo/bar` or `foo/path` to match the format of other tests?
@@ -56,7 +56,7 @@ class MemoryProfiler end file.puts "Top 20" - delta.sort_by { |k,v| -v.abs }[0..19].sort_by { |k,v| -v}.each do |k,v| + delta.sort_by { |k,v| -v.abs }[0..19].sort_by { |k,v| -v }.each do |k,v| file.printf "%+5d: %s (%d)\n", v, k.name, curr[k] unless v == 0 end file.flush
[MemoryProfiler->[start->[new,to_i,flush,start,merge,each_object,sleep,loop,update,class,each,name,printf,open,clear,puts,push]]]
Initialize a new object.
Space missing after comma.<br>Line is too long. [82/80]
@@ -39,6 +39,7 @@ async function main () { const opts = parseCommandLine(); const currentVersion = await versionUtils.getElectronVersion(); const version = await nextVersion(opts.bump, currentVersion); + const shouldUpdateSupported = (opts.bump, currentVersion, version); const parsed = semver.parse(version); const components = {
[No CFG could be retrieved]
The main entry point for the update script. get next version based on bumpType.
Something weird has happened here
@@ -145,6 +145,11 @@ export class AmpList extends AMP.BaseElement { } } + /** @override */ + doesReuseLoadingIndicator() { + return true; + } + /** * Wraps `toggleFallback()` in a mutate context. * @param {boolean} state
[No CFG could be retrieved]
Provides a callback to handle the case where a node is not available in the DOM. Fetches the list data from src and returns a promise that resolves when the list has been populated.
I like to suggest renaming to `isLoadingReused`. `does`prefix is not commonly used in AMP and other related loading-related methods just say `loading` instead of `loadingIndicator`
@@ -27,13 +27,14 @@ var maskList = MakeStringSet( "urls", "host", "hosts", + "authorization", ) func applyLoggingMask(c interface{}) { switch cfg := c.(type) { case map[string]interface{}: for k, v := range cfg { - if maskList.Has(k) { + if maskList.Has(strings.ToLower(k)) { if arr, ok := v.([]interface{}); ok { for i := range arr { arr[i] = "xxxxx"
[Has]
applyLoggingMask applies a logging mask to the given configuration object.
Let's add `proxy-authorization` as well. We have a few inputs (including heartbeat) that allow users to configure proxies.
@@ -68,12 +68,16 @@ namespace System.Net.NetworkInformation private Socket GetRawSocket(SocketConfig socketConfig) { IPEndPoint ep = (IPEndPoint)socketConfig.EndPoint; - - // Setting Socket.DontFragment and .Ttl is not supported on Unix, so socketConfig.Options is ignored. AddressFamily addrFamily = ep.Address.AddressFamily; + Socket socket = new Socket(addrFamily, SocketType.Raw, socketConfig.ProtocolType); socket.ReceiveTimeout = socketConfig.Timeout; socket.SendTimeout = socketConfig.Timeout; + if (addrFamily == AddressFamily.InterNetworkV6) + { + socket.DualMode = false; + } + if (socketConfig.Options != null && socketConfig.Options.Ttl > 0) { socket.Ttl = (short)socketConfig.Options.Ttl;
[Ping->[SendIcmpEchoRequestOverRawSocketAsync->[TryGetPingReply],PingReply->[TryGetPingReply]]]
Get raw socket.
Why is this necessary? Isn't it only set to true by the two-arg Socket ctor that's not being used here?
@@ -398,7 +398,7 @@ class FnApiRunnerTest(unittest.TestCase): # The actual stage name ends up being something like 'm_out/lamdbda...' m_out, = [ - metrics for name, metrics in postgbk_metrics.ptransforms.items() + metrics for name, metrics in list(postgbk_metrics.ptransforms.items()) if name.startswith('m_out')] self.assertEqual( 5,
[FnApiRunnerTest->[test_multimap_side_input->[create_pipeline],test_pardo_metrics->[MyOtherDoFn,create_pipeline,MyDoFn],test_group_by_key->[create_pipeline],test_pardo->[create_pipeline],test_gbk_side_input->[create_pipeline],test_read->[create_pipeline],test_progress_metrics->[create_pipeline],test_pardo_side_outputs->[create_pipeline],test_no_subtransform_composite->[create_pipeline,First],test_pardo_unfusable_side_inputs->[create_pipeline],test_pardo_windowed_side_inputs->[create_pipeline],test_flattened_side_input->[create_pipeline],test_error_message_includes_stage->[create_pipeline],test_combine_per_key->[create_pipeline],test_error_traceback_includes_user_code->[create_pipeline],test_assert_that->[create_pipeline],test_pardo_side_inputs->[create_pipeline],test_create->[create_pipeline],test_metrics->[create_pipeline],test_windowing->[create_pipeline],test_pardo_side_and_main_outputs->[create_pipeline],test_flatten->[create_pipeline]]]
Test the progress metrics. Check if all the elements in the system have a count of one or two.
Do we need the extra `list()` here?
@@ -36,6 +36,11 @@ public class JavaExternalSerializerProtocol extends AbstractSerializationProtoco */ @Override protected byte[] doSerialize(Object object) throws Exception { + if (object instanceof CursorStreamProvider) { + try (CursorStream cursor = ((CursorStreamProvider) object).openCursor()) { + return SerializationUtils.serialize(toByteArray(cursor)); + } + } validateForSerialization(object); return SerializationUtils.serialize((Serializable) object); }
[JavaExternalSerializerProtocol->[serialize->[serialize],doSerialize->[serialize]]]
Serialize an object.
SAme here with deserialization
@@ -65,11 +65,9 @@ class Order(models.Model): net_field='shipping_price_net', gross_field='shipping_price_gross') token = models.CharField(max_length=36, unique=True) total_net = MoneyField( - currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2, - blank=True, null=True) + currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2) total_gross = MoneyField( - currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2, - blank=True, null=True) + currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2) total = TaxedMoneyField(net_field='total_net', gross_field='total_gross') voucher = models.ForeignKey( Voucher, null=True, related_name='+', on_delete=models.SET_NULL)
[DeliveryGroup->[can_ship->[is_shipping_required],get_total->[get_total]],Payment->[get_purchased_items->[get_lines]],Order->[get_subtotal->[get_lines],is_shipping_required->[is_shipping_required]]]
Adds fields to the auth_user_model. 2. 2. 2 - The last order token.
Wouldn't `default=0` do the trick here? We're setting zero price anyway in all places where an order is created.
@@ -32,7 +32,9 @@ namespace Content.Server.GameObjects.Components.Atmos private BoundUserInterface _userInterface = default!; private GasAnalyzerDanger _pressureDanger; private float _timeSinceSync; - private const float TimeBetweenSyncs = 10f; + private const float TimeBetweenSyncs = 2f; + private bool _checkPlayer = false; // Check at the player pos or at some other tile? + private Vector2 _offset; // The direction in which you're holding the analyzer public override void Initialize() {
[GasAnalyzerComponent->[UserInterfaceOnReceiveMessage->[UpdateUserInterface,Resync],Dropped->[CloseInterface],Initialize->[Initialize],AfterInteract->[OpenInterface]]]
Initialize method.
Why do you store an offset? instead of, y'know, the other position?
@@ -24,7 +24,7 @@ import org.apache.beam.sdk.testing.TestPipelineOptions; /** * Properties needed when using HadoopInputFormatIO with the Beam SDK. */ -public interface HIFITestOptions extends TestPipelineOptions { +interface HIFITestOptions extends TestPipelineOptions { //Cassandra test options @Description("Cassandra Server IP")
[No CFG could be retrieved]
Gets the cassandra server IP.
Aren't PipelineOptions supposed to be public?
@@ -78,5 +78,7 @@ class NextTokenLmReader(DatasetReader): input_field = TextField(tokens, self._token_indexers) fields: Dict[str, Field] = {"tokens": input_field} if target: - fields["target_ids"] = TextField([Token(target)], self._token_indexers) + wordpiece = self._targets_tokenizer.tokenize(target)[0] + target_token = Token(text=target, text_id=wordpiece.text_id, type_id=wordpiece.type_id) + fields["target_ids"] = TextField([target_token], self._token_indexers) return Instance(fields)
[NextTokenLmReader->[text_to_instance->[tokenize,Instance,Token,TextField],__init__->[WhitespaceTokenizer,SingleIdTokenIndexer,super],_read->[text_to_instance,open,tokenize,error]],register,getLogger]
Converts a text string into a sequence of tokens.
Can you add a similar note here to what you have in the masked language model?
@@ -413,13 +413,10 @@ def yolov3_loss(x, gtbox, gtlabel, anchors, + anchor_mask, class_num, ignore_thresh, - loss_weight_xy=None, - loss_weight_wh=None, - loss_weight_conf_target=None, - loss_weight_conf_notarget=None, - loss_weight_class=None, + downsample, name=None): """ ${comment}
[detection_map->[__create_var],prior_box->[_is_list_or_tuple_],anchor_generator->[_is_list_or_tuple_],density_prior_box->[_is_list_or_tuple_],ssd_loss->[target_assign,bipartite_match,box_coder,iou_similarity,__reshape_to_2d],multi_box_head->[_is_list_or_tuple_and_equal->[_is_list_or_tuple_],_is_list_or_tuple_and_equal,_is_list_or_tuple_,_reshape_with_axis_,prior_box]]
Outputs a loss of a single node in the yolov3 model. Computes a single missing node in the network. Adds a yolov3 loss to the object.
downsample -> downsample_ratio
@@ -38,6 +38,13 @@ namespace Dynamo.PackageDependency } } + /// <summary> + /// Re-directs to a web link to get the feedback from the user. + /// </summary> + private void ProvideFeedback(object sender, EventArgs e) + { + System.Diagnostics.Process.Start(FeedbackLink); + } /// <summary> /// Event handler for workspaceAdded event
[PackageDependencyView->[OnWorkspaceCleared->[Clear],OnWorkspaceChanged->[PropertyChanged,DependencyRegen],DependencyRegen->[Clear,LoadedPackageDependencies,Name,PackageDependencies,WarningColor,ToString,Add,Equals],OnWorkspacePropertyChanged->[DependencyRegen,PropertyName,PackageDependencies,nameof],InitializeComponent,CurrentWorkspaceCleared,CurrentWorkspaceModel,PropertyChanged,CurrentWorkspaceChanged,AddToExtensionsSideBar],DependencyTable->[Add],ColumnData->[Red,ConvertFrom]]
Creates a view of the package - dependent objects that can be used to view the package - if name == currentWorkspace. PackageDependencies. Name then regenerates the .
this should be wrapped in a try
@@ -36,6 +36,7 @@ public interface ExpressionLanguageContext boolean contains(String name); + @Deprecated <T> void addPrivateVariable(String name, T value); }
[No CFG could be retrieved]
Add a private variable.
please indicate what to use instead
@@ -115,7 +115,15 @@ class Jetpack_Instant_Search extends Jetpack_Search { ); } - $prefix = Jetpack_Search_Options::OPTION_PREFIX; + $prefix = Jetpack_Search_Options::OPTION_PREFIX; + $posts_per_page = (int) get_option( 'posts_per_page' ); + if ( $posts_per_page > 20 ) { + $posts_per_page = 20; + } + if ( $posts_per_page <= 0 ) { + // -1 is inf posts in Core WP ;) + $posts_per_page = 20; + } $options = array( 'overlayOptions' => array( 'colorTheme' => get_option( $prefix . 'color_theme', 'light' ),
[Jetpack_Instant_Search->[filter__posts_pre_query->[should_handle_query],action__parse_query->[add_aggregations_to_es_query_builder,build_aggregation,instant_api],auto_config_search->[get_preconfig_widget_options],load_assets->[load_and_initialize_tracks,inject_javascript_options],load_php->[base_load_php]]]
Inject JS options into the options array This function is used to create a post type search object. Filters out the JetpackInstantSearchOptions object.
Hey @gibrown any reason we don't want to right this with an OR statement as we are defaulting to the same 20 posts max? it would give us 1 line to change vs 2 if the api limit changes. if( $posts_per_page > 20 || $posts_per_page <= 0 ) {
@@ -12,7 +12,11 @@ namespace NServiceBus.Timeout.Core /// Retrieves the next range of timeouts that are due. /// </summary> /// <param name="startSlice">The time where to start retrieving the next slice, the slice should exclude this date.</param> - /// <returns>Returns the next range of timeouts that are due.</returns> - Task<TimeoutsChunk> GetNextChunk(DateTime startSlice); + /// <param name="maxChunkSize">The maximum chunk size that the caller specifies to limit the number of results.</param> + /// <param name="cancellationToken">The cancellation token used by the caller to notify that the pending work should be cancelled.</param> + /// <returns> + /// Returns the next range of timeouts that are due. + /// </returns> + Task<TimeoutsChunk> GetNextChunk(DateTime startSlice, int maxChunkSize = Int32.MaxValue, CancellationToken cancellationToken = default(CancellationToken)); } } \ No newline at end of file
[No CFG could be retrieved]
Get next chunk.
Clarify that the storage is allowed to return less?
@@ -307,7 +307,10 @@ public class UnboundedSourceWrapperTest { if (seenWatermark.get()) { break; } - Thread.sleep(50); + + // Consider that UnboundedSourceWrapper needs to acquire the checkpoint lock below. + // So wait for enough time for that to happen. + Thread.sleep(200); // Need to advance this so that the watermark timers in the source wrapper fire // Synchronize is necessary because this can interfere with updating the PriorityQueue
[UnboundedSourceWrapperTest->[ParameterizedUnboundedSourceWrapperTest->[testWatermarkEmission->[run],testRestore->[collect->[collect],run],testValueEmission->[collect->[collect],run]],BasicTest->[testSourceWithNoReaderDoesNotShutdown->[run]]]]
Emit a watermark when the source receives a non - zero number of elements. Collect all the elements in the source stream until a non - zero value is found.
Can we use a `CountdownLatch` or something like that instead of a sleep?
@@ -64,11 +64,12 @@ pack_daos_response(Mgmt__DaosResp *daos_resp, Drpc__Response *drpc_resp) void ds_mgmt_drpc_kill_rank(Drpc__Call *drpc_req, Drpc__Response *drpc_resp) { - Mgmt__DaosRank *req = NULL; - Mgmt__DaosResp *resp = NULL; + Mgmt__KillRankReq *req = NULL; + Mgmt__DaosResp *resp = NULL; + int sig; /* Unpack the inner request from the drpc call body */ - req = mgmt__daos_rank__unpack( + req = mgmt__kill_rank_req__unpack( NULL, drpc_req->body.len, drpc_req->body.data); if (req == NULL) { drpc_resp->status = DRPC__STATUS__FAILURE;
[No CFG could be retrieved]
Fill the packed daos response with the inner request. This function handles the set rank request and response.
This never gets sent, right? Does that confuse anything on the control plane side?
@@ -1832,9 +1832,7 @@ func generateModulesFromSchemaPackage(tool string, pkg *schema.Package) map[stri return modules } -// GeneratePackage generates the docs package with docs for each resource given the Pulumi -// schema. -func GeneratePackage(tool string, pkg *schema.Package) (map[string][]byte, error) { +func Initialize(tool string, pkg *schema.Package) { templates = template.New("").Funcs(template.FuncMap{ "htmlSafe": func(html string) template.HTML { // Markdown fragments in the templates need to be rendered as-is,
[genResource->[genNestedTypes,genLookupParams,genConstructors,getProperties,getConstructorResourceInfo,genResourceHeader],gen->[getModuleFileName,genResource,add],getPropertiesWithIDPrefixAndExclude->[typeString],genIndex->[getModuleFileName],genConstructors->[genConstructorTS,genConstructorPython,genConstructorGo,genConstructorCS],genLookupParams->[getTSLookupParams,getCSLookupParams,getPythonLookupParams,getGoLookupParams],getNestedTypes->[contains,getNestedTypes,add],getConstructorResourceInfo->[getLanguageModuleName],cleanTypeString->[cleanTypeString,getLanguageModuleName],getTypes->[getNestedTypes,getTypes],typeString->[cleanTypeString,getLanguageModuleName,typeString],gen,details]
GeneratePackage generates the docs package with the docs for each object given the Pulumi generatePackage generates the package files for the given object types.
Adding this initialize allows to use the generated modules to do other things with it as separate operations rather than in a single function. It seemed like a good idea to decompose them into separate things.
@@ -404,9 +404,14 @@ found: block_map->first_free = block; #if DEBUG_BLOCK_FREE - /* memset the whole block incase some not aligned ptr*/ - alloc_memset_region((void *)(block_map->base + block_map->block_size * block), - block_map->block_size * (i - block), DEBUG_BLOCK_FREE_VALUE); + /* memset the whole block incase some not aligned ptr */ + validate_memory( + (void *)(block_map->base + block_map->block_size * block), + block_map->block_size * (i - block)); + memset( + (void *)(block_map->base + block_map->block_size * block), + DEBUG_BLOCK_FREE_VALUE, block_map->block_size * + (i - block)); #endif }
[_zalloc->[_malloc,bzero],free_heap->[dcache_writeback_region,panic,cpu_get_id,trace_mem_error],_balloc->[alloc_block,spin_unlock_irq,cache_to_uncache,spin_lock_irq,get_heap_from_caps,alloc_cont_blocks],inline->[dcache_writeback_invalidate_region,block_get_size],heap_trace->[trace_mem_init],init_heap->[spinlock_init,panic,init_heap_map],heap_trace_all->[heap_trace,trace_mem_init],rfree->[trace_error,spin_unlock_irq,cpu_get_id,is_uncached,panic,spin_lock_irq,free_block,uncache_to_cache],_malloc->[spin_unlock_irq,rmalloc_sys,trace_mem_error,cpu_get_id,rmalloc_sys_runtime,spin_lock_irq,rmalloc_runtime],alloc_trace_buffer_heap->[trace_heap_blocks,get_buffer_heap_from_caps,trace_mem_error],mm_heap->[cpu_get_id],rzalloc_core_sys->[spin_lock_irq,spin_unlock_irq,rmalloc_sys,bzero],void->[trace_error,alloc_block,dcache_writeback_invalidate_region,get_ptr_from_heap,trace_mem_error,cache_to_uncache,get_heap_from_ptr,flush_block_map,cpu_get_id,panic,alloc_memset_region,get_heap_from_caps],alloc_trace_runtime_heap->[trace_heap_blocks,get_runtime_heap_from_caps,trace_mem_error]]
free_block - free a block set first free block.
Why do we need validate_memory() in free_blocks()? Suppose this memory is used and set other value than DEBUG_BLOCK_FREE_VALUE. So different value from DEBUG_BLOCK_FREE_VALUE should be a right result.
@@ -93,6 +93,7 @@ public class RealtimePlumber implements Plumber { private static final EmittingLogger log = new EmittingLogger(RealtimePlumber.class); private static final int WARN_DELAY = 1000; + private static final int CHECK_INTERNAL_COUNT = 100000; private final DataSchema schema; private final RealtimeTuningConfig config;
[RealtimePlumber->[persistHydrant->[persist,computePersistDir],persistAndMerge->[doRun->[add]],finishJob->[persistAndMerge],addSink->[add],add->[add],computePersistDir->[computeBaseDir],persist->[add],mergeAndPush->[persistAndMerge,add],bootstrapSinksFromDisk->[compare->[compare],add]]]
Imports the java. util. System properties and creates a realtime plumber. Pusher ; SegmentPublisher ; SegmentHandoffNotifier ; SegmentHandoffNotifier ; SegmentHand.
suggest rename to more descriptive name, may be MAX_ROW_EXCEED_CHECK_COUNT
@@ -111,9 +111,15 @@ $userDetails = $userList[0]; <div id="pageWrapper"> <h1><?php echo HEADING_TITLE ?></h1> + <?php + echo zen_draw_form('users', FILENAME_ADMIN_ACCOUNT); + if (isset($formAction)) { + echo zen_draw_hidden_field('action', $formAction); + } -<form action="<?php echo zen_href_link(FILENAME_ADMIN_ACCOUNT) ?>" method="post"> -<?php if (isset($formAction)) echo zen_draw_hidden_field('action',$formAction) . zen_draw_hidden_field('securityToken', $_SESSION['securityToken']); ?> + if ($action == 'edit' || $action == 'password') { + echo zen_draw_hidden_field('user', $user); + } ?> <table cellspacing="0"> <tr class="headingRow"> <th class="name"><?php echo TEXT_ADMIN_NAME ?></th>
[add,add_session]
Displays the admin admin admin admin menu. Display a list of user s neccessary words.
Why was this unused field added?
@@ -158,6 +158,11 @@ class LocallyConnected1DLayersTest(test.TestCase, parameterized.TestCase): self.assertEqual(layer.kernel.constraint, k_constraint) self.assertEqual(layer.bias.constraint, b_constraint) + def test_locallyconnected1d_invalid_output_shapes(self): + kwargs = {'filters': 2 , 'kernel_size': 10} + with self.assertRaises(ValueError): + keras.layers.LocallyConnected1D(**kwargs) + @combinations.generate(combinations.combine(mode=['graph', 'eager'])) class LocallyConnected2DLayersTest(test.TestCase, parameterized.TestCase):
[copy_model_weights->[copy_lc_weights_2_to_1,copy_lc_weights_2_to_3]]
Test locally connected 1D regularization.
Nit: delete the space after 'filters': 2. 'filters': 2 , => 'filters': 2,
@@ -19,6 +19,16 @@ #include <openssl/x509.h> #include <openssl/x509v3.h> +#ifdef WIN32 +#include <windows.h> +#include <winsock2.h> +#include <ws2tcpip.h> +#else +#include <netdb.h> +#include <sys/socket.h> +#include <sys/types.h> +#endif + #include <osquery/core/tables.h> #include <osquery/logger/logger.h>
[void->[has_cert_expired],genTLSCertificate->[getTLSCertificate]]
Creates an object of type int representing the type of the object that is returned by the Get the signature of a certificate.
This should be moved inside cmake/flags.cmake under `windows_common_link_options`
@@ -99,7 +99,7 @@ import java.nio.charset.UnsupportedCharsetException; * {@link #writerIndex() writerIndex} and increase it by the number of written * bytes. If the argument of the write operation is also a {@link ByteBuf}, * and no source index is specified, the specified buffer's - * {@link #readerIndex() readerIndex} is increased together. + * {@link #writerIndex() writerIndex} is increased together. * <p> * If there's not enough writable bytes left, {@link IndexOutOfBoundsException} * is raised. The default value of newly allocated buffer's
[No CFG could be retrieved]
Reads the specified number of bytes from the specified buffer and fills it with random integers. This code is called when a block of data is read from the memory and the reader index.
This actually should be 'readerIndex'. The previous section about readable bytes should indeed use 'writerIndex'. Thanks for a hint.
@@ -37,6 +37,7 @@ type Delegator struct { sig string sigID keybase1.SigID merkleTriple MerkleTriple + postArg APIArg } func (d Delegator) getSigningKID() keybase1.KID { return d.signingKey.GetKID() }
[Run->[CheckArgs],updateLocalState->[getExistingKID,IsSibkey],post->[getSigningKID]]
getSigningKID returns the KID of the signing key or the existing key.
This is where we're stashing some temporary information in the non-aggregated flow
@@ -64,7 +64,11 @@ public class LiquibaseFactory { database.setDefaultSchemaName(config.defaultSchemaName.get()); } } - return new Liquibase(config.changeLog, resourceAccessor, database); + Liquibase liquibase = new Liquibase(config.changeLog, resourceAccessor, database); + if (config.params != null) { + config.params.forEach((k, v) -> liquibase.getChangeLogParameters().set(k, v)); + } + return liquibase; } catch (Exception ex) { throw new IllegalStateException(ex);
[LiquibaseFactory->[createContexts->[Contexts],createLiquibase->[findCorrectDatabaseImplementation,Liquibase,isPresent,ClassLoaderResourceAccessor,IllegalStateException,get,JdbcConnection,ifPresent,setDatabaseChangeLogLockTableName,getConnection,setDefaultCatalogName,getContextClassLoader,setDefaultSchemaName,setDatabaseChangeLogTableName],createLabels->[LabelExpression]]]
Creates a new Liquibase object.
We avoid lambdas in runtime code as they have a cost. The map should never be null.
@@ -115,7 +115,7 @@ public class SessionMonitorTests { TEST_EXP_POLICY); registry.addTicket(ticket); } - + if (ticket != null) { for (int i = 0; i < stCount; i++) { registry.addTicket(ticket.grantServiceTicket(
[SessionMonitorTests->[testObserveWarnServiceTicketsExceeded->[assertTrue,observe,setServiceTicketCountWarnThreshold,contains,addTicketsToRegistry,assertEquals,getCode],testObserveOkJpaTicketRegistry->[getSessionCount,size,observe,getServiceTicketCount,setTicketRegistry,addTicketsToRegistry,assertEquals,getCode],testObserveWarnSessionsExceeded->[setSessionCountWarnThreshold,assertTrue,observe,contains,addTicketsToRegistry,assertEquals,getCode],setUp->[setTicketRegistry,SessionMonitor,DefaultTicketRegistry],addTicketsToRegistry->[MockService,getAuthentication,TicketGrantingTicketImpl,getNewTicketId,grantServiceTicket,addTicket],testObserveOk->[getSessionCount,observe,getServiceTicketCount,addTicketsToRegistry,assertEquals,getCode],HardTimeoutExpirationPolicy,DefaultUniqueTicketIdGenerator]]
Add tickets to the ticket registry.
Not related to this change, just a cleanup...
@@ -358,7 +358,16 @@ namespace System.Net.Http.Functional.Tests // Repeat call. content = new StringContent(data, Encoding.UTF8); - content.Headers.ContentMD5 = TestHelper.ComputeMD5Hash(data); + if (PlatformDetection.IsBrowser) + { + // [ActiveIssue("https://github.com/dotnet/runtime/issues/37669", TestPlatforms.Browser)] + content.Headers.Add("Content-MD5-Skip", "browser"); + } + else + { + content.Headers.ContentMD5 = TestHelper.ComputeMD5Hash(data); + } + using (response = await client.PostAsync(remoteServer.VerifyUploadUri, content)) { Assert.Equal(HttpStatusCode.OK, response.StatusCode);
[No CFG could be retrieved]
Asynchronous method to call the remote server with a specific response. \ u4e82 \ u67ab4 \ u67ab4 \ u67.
Should you have this extracted to a method to reduce copy&paste? :-)
@@ -2472,6 +2472,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M if (projectId.longValue() == -1) { if (caller.getType() == Account.ACCOUNT_TYPE_NORMAL) { permittedAccounts.addAll(_projectMgr.listPermittedProjectAccounts(caller.getId())); + + //permittedAccounts can be empty when the caller is not a part of any project (a domain account) + if (permittedAccounts.isEmpty()) { + permittedAccounts.add(caller.getId()); + } } else { domainIdRecursiveListProject.third(Project.ListProjectResourcesCriteria.ListProjectResourcesOnly); }
[AccountManagerImpl->[enableUser->[doInTransaction->[enableAccount,doSetUserStatus],updateLoginAttempts,checkAccess],getUserByApiKey->[getUserByApiKey],listAclGroupsByAccount->[listAclGroupsByAccount],createUser->[createUser,checkAccess],deleteUserAccount->[deleteAccount,checkAccess],enableAccount->[enableAccount,checkAccess],createApiKeyAndSecretKey->[getUserIncludingRemoved,checkAccess],buildACLSearchParameters->[checkAccess],checkAccessAndSpecifyAuthority->[isResourceDomainAdmin],finalyzeAccountId->[getActiveAccountByName],isInternalAccount->[isRootAdmin],createUserAccount->[checkAccess,createUserAccount],disableUser->[doSetUserStatus,checkAccess],disableAccount->[disableAccount,checkAccess],getKeys->[getActiveUser,checkAccess,getAccount],deleteUser->[checkAccess],AccountCleanupTask->[runInContext->[getSystemAccount,cleanupAccount,disableAccount]],updateAccount->[checkAccess],finalizeOwner->[checkAccess,isAdmin],getUserAccount->[getUserAccount,updateLoginAttempts,isInternalAccount],checkAccess->[isRootAdmin,checkAccess],lockUser->[doSetUserStatus,checkAccess,lockAccount],lockAccount->[checkAccess,lockAccount],updateUser->[updateUser,checkAccess]]]
This method builds the ACL search parameters. This method checks if the caller can access project resources.
Is it possible for this `permittedAccounts` be null? If not, the code is ok, otherwise it might be a good idea to use `CollectionUtils.isEmpty`
@@ -29,7 +29,13 @@ std::map<std::string, pt::ptree> getQueryPacksContent() { Status status = osquery::parseJSON(pack_path, pack_tree); pt::ptree pack_file_element = pack_tree.get_child("test_pack_test"); - result = QueryPackParsePacks(pack_file_element, false, true); + ConfigDataInstance config; + const auto& pack_parser = config.getParser("packs"); + if (pack_parser == nullptr) { + return result; + } + const auto& queryPackParser = std::static_pointer_cast<QueryPackConfigParserPlugin>(pack_parser); + result = queryPackParser->QueryPackParsePacks(pack_file_element, false, true); return result; }
[getQueryPacksContent->[getQueryPacksContent,getQueryPacksExpectedResults]]
getQueryPacksContent - Returns the list of expected query packs for the current query.
Check and make sure queryPackParser != nullptr
@@ -9,13 +9,14 @@ export default Controller.extend({ application: controller(), queryParams: ["period", "order", "asc", "name", "group", "exclude_usernames"], period: "weekly", - order: "likes_received", + order: "", asc: null, name: "", group: null, nameInput: null, exclude_usernames: null, isLoading: false, + columns: null, showTimeRead: equal("period", "all"),
[No CFG could be retrieved]
A default controller for the list of items in a sequence.
We can't have a default value for the query param, because the columns are dynamic. Have to wait until the columns are loaded, then use the first column as the default order
@@ -44,6 +44,12 @@ void make_tree(MMVManip &vmanip, v3s16 p0, bool is_apple_tree, MapNode treenode(ndef->getId("mapgen_tree")); MapNode leavesnode(ndef->getId("mapgen_leaves")); MapNode applenode(ndef->getId("mapgen_apple")); + if (treenode == CONTENT_IGNORE) + errorstream << "Treegen: Mapgen alias 'mapgen_tree' is invalid!" << std::endl; + if (leavesnode == CONTENT_IGNORE) + errorstream << "Treegen: Mapgen alias 'mapgen_leaves' is invalid!" << std::endl; + if (applenode == CONTENT_IGNORE) + errorstream << "Treegen: Mapgen alias 'mapgen_apple' is invalid!" << std::endl; PseudoRandom pr(seed); s16 trunk_h = pr.range(4, 5);
[No CFG could be retrieved]
This is a helper method that is used to create a tree from a list of nodes. - - - - - - - - - - - - - - - - - -.
does this comparison even work? treenode is a `MapNode` (struct) and CONTENT_IGNORE is a number
@@ -58,9 +58,13 @@ const routes = [ path: '/theming/theming-values', }, { - text: 'Overrides', + text: 'Overriding components', path: '/theming/understanding-overrides', }, + { + text: 'Overriding icons globally', + path: '/theming/overriding-icons', + }, ], }, {
[No CFG could be retrieved]
Create a list of routes and routes for a given node. is a base class for all components.
This makes the whole sidebar wider. Also, overrides can override `styles` / `props` only?
@@ -52,7 +52,7 @@ def img_conv_group(input, conv_act=None, param_attr=None, conv_with_batchnorm=False, - conv_batchnorm_drop_rate=None, + conv_batchnorm_drop_rate=0, pool_stride=1, pool_type=None): """
[sequence_conv_pool->[sequence_pool,sequence_conv],dot_product_attention->[reshape,matmul],simple_img_conv_pool->[pool2d,conv2d],img_conv_group->[__extend_list__->[hasattr,len],__extend_list__,len,dropout,isinstance,xrange,pool2d,batch_norm,conv2d,abs],glu->[split,elementwise_mul,sigmoid]]
Image Convolution Group used for vgg net. Get the sequence number of the last node in the pool.
0 => 0.0?
@@ -726,11 +726,12 @@ OSSL_CMP_MSG *ossl_cmp_genp_new(OSSL_CMP_CTX *ctx, OSSL_CMP_PKIBODY_GENP, CMP_R_ERROR_CREATING_GENP); } -OSSL_CMP_MSG *ossl_cmp_error_new(OSSL_CMP_CTX *ctx, OSSL_CMP_PKISI *si, - int errorCode, - const char *details, int unprotected) +OSSL_CMP_MSG *ossl_cmp_error_new(OSSL_CMP_CTX *ctx, const OSSL_CMP_PKISI *si, + int64_t errorCode, const char *details, + int unprotected) { OSSL_CMP_MSG *msg = NULL; + const char *lib = NULL, *reason = NULL; OSSL_CMP_PKIFREETEXT *ft; if (!ossl_assert(ctx != NULL && si != NULL))
[ossl_cmp_pkiconf_new->[ossl_cmp_msg_create,OSSL_CMP_MSG_free],ossl_cmp_pollReq_new->[ossl_cmp_msg_create,OSSL_CMP_MSG_free],OSSL_CMP_MSG_read->[OSSL_CMP_MSG_new,OSSL_CMP_MSG_free],ossl_cmp_rr_new->[ossl_cmp_msg_create,OSSL_CMP_MSG_free],ossl_cmp_pollRep_new->[ossl_cmp_msg_create,OSSL_CMP_MSG_free],ossl_cmp_msg_gen_push1_ITAVs->[ossl_cmp_msg_gen_push0_ITAV],ossl_cmp_rp_new->[ossl_cmp_msg_create,OSSL_CMP_MSG_free],ossl_cmp_certrep_new->[ossl_cmp_msg_create,OSSL_CMP_MSG_free],OSSL_CMP_MSG->[ossl_cmp_msg_create,ossl_cmp_msg_gen_push1_ITAVs,OSSL_CMP_MSG_free],ossl_cmp_certConf_new->[ossl_cmp_msg_create,ossl_cmp_certstatus_set0_certHash,OSSL_CMP_MSG_free],ossl_cmp_error_new->[ossl_cmp_msg_create,OSSL_CMP_MSG_free],ossl_cmp_msg_create->[OSSL_CMP_MSG_new,ossl_cmp_msg_set_bodytype,OSSL_CMP_MSG_free],ossl_cmp_msg_gen_push0_ITAV->[ossl_cmp_msg_get_bodytype],ossl_cmp_certreq_new->[ossl_cmp_msg_create,OSSL_CMP_MSG_free,OSSL_CMP_CTX_setup_CRM]]
Ossl_CMP_MSG ossl_cmp_msg_new ossl_ free a .
don't you have the openssl error code? are these variables really needed? if the answer to one is no, you should change that.
@@ -97,10 +97,6 @@ func setupServer(t *testing.T, serverCreator func(http.Handler) *httptest.Server return server, port } -func tcpMonitorChecks(host string, ip string, port uint16, status string) mapval.Validator { - return hbtest.BaseChecks(ip, status, "tcp") -} - func TestUpEndpointJob(t *testing.T) { server, port := setupServer(t, httptest.NewServer) defer server.Close()
[CertToTempFile,Compose,HelloWorldHandler,AvailableTCP4Port,NewConfigFrom,Test,RespondingTCPChecks,Close,Hostname,SimpleURLChecks,ServerPort,LookupHost,SplitHostPort,WrapCommon,SummaryChecks,ParseCertificate,Len,MustCompile,TLSChecks,Equal,BaseChecks,Name,Strict,Remove,Accept,ErrorChecks,NoError,Read,ParseUint,Write,Listen,Sprintf,Addr,String,Parse]
job creates a job that checks the and checks the tcp port. TestTLSConnection tests the connection of a network network connection with a TLS server.
This helper function had a number of unused arguments and was more confusing than helpful.
@@ -294,6 +294,9 @@ public class MenuTree { .addChild(new MenuItem("Action Chains").withPrimaryUrl("/rhn/schedule/ActionChains.do") .withAltUrl("/rhn/schedule/ActionChain.do")) .addChild(new MenuItem("Recurring States").withPrimaryUrl("/rhn/manager/schedule/recurring-states")) + .addChild(new MenuItem("Maintenance Windows").withDir("/rhn/manager/schedule/maintenance") + .addChild(new MenuItem("Schedules").withPrimaryUrl("/rhn/manager/schedule/maintenance/schedules")) + .addChild(new MenuItem("Calendars").withPrimaryUrl("/rhn/manager/schedule/maintenance/calendars"))) ); // Users
[MenuTree->[getActiveNode->[getActiveNode],getJsonMenu->[getMenuTree],MenuItemList->[add->[add]]]]
This method returns the menu tree of the menu items. Menu items with visibility Menu items for the Unentitled menu Menu items for the administration menu. Menu items for the provisioning menu Menu items for the COBBler administration.
Just questioning to make sure: all users are allowed to reach these pages right? If so, then this is fine.
@@ -98,4 +98,9 @@ class Rose(Package): '--enable-tutorial-directory={0}'.format('no'), ] + @property + def build_directory(self): + return 'rose-build' + install_targets = ["install-core"] +
[Rose->[languages->[list,filter],autoreconf->[which,bash],configure_args->[format,join],variant,depends_on,version,patch]]
Configure the command line arguments for the .
This blank line at the end of the file will cause the flake8 tests to crash.
@@ -80,9 +80,9 @@ public class UnorderedDistributionInterceptor extends NonTxDistributionIntercept List<Address> owners = cacheTopology.getDistribution(command.getKey()).writeOwners(); if (owners.contains(rpcManager.getAddress())) { return invokeNextAndHandle( ctx, command, (rCtx, rCommand, rv, throwable) -> { - Object remoteInvocation = invokeRemotelyAsync(owners, rCtx, (WriteCommand) rCommand); + CompletionStage<?> remoteInvocation = invokeRemotelyAsync(owners, rCtx, (WriteCommand) rCommand); if (remoteInvocation != null) { - return ((CompletionStage<?>) remoteInvocation).handle((responses, t) -> rv); + return remoteInvocation.thenApply(responses -> rv); } return rv; });
[UnorderedDistributionInterceptor->[visitReadWriteKeyCommand->[handleDataWriteCommand],start->[isReplicated],invokeRemotelyAsync->[isSuccessful,isOriginLocal,size,invokeCommand,ignoreLeavers,isSynchronous,sendToMany,invokeCommandOnAll,getSyncRpcOptions],handleDataWriteCommand->[size,invokeCommand,invokeNextAndHandle,handle,tracef,getTopologyId,isSynchronous,sendToMany,getAddress,getSyncRpcOptions,isSuccessful,isOriginLocal,hasFlag,invokeRemotelyAsync,ignoreLeavers,getCacheTopology,invokeNext,contains,writeOwners],visitPutKeyValueCommand->[handleDataWriteCommand],getLog]]
Handles a data write command. if command is not found in the list of owned commands send it to all owned commands.
doesn't this imply that futures chained to the returned stage will not be executed (if remoteInvocation completes exceptionally) whereas before with handle they would always run?
@@ -71,9 +71,16 @@ class MessengerManager(ChatServiceManager): """ if 'models' in self.opt and self.should_load_model: model_params = {} + model_info = {} for model in self.opt['models']: model_opt = self.opt['models'][model] + overrides = model_opt.get('overrides', {}) + if type(overrides) is list: + print("Got list overrides!") + model_opt['overrides'] = overrides[0] model_params[model] = create_agent(model_opt).share() + model_info[model] = {'overrides': overrides} + self.runner_opt['model_info'] = model_info self.runner_opt['shared_bot_params'] = model_params def _init_logs(self):
[MessengerManager->[setup_socket->[get_app_token],setup_server->[setup_server]]]
Load the model if necessary.
nit: you left a print in
@@ -121,7 +121,7 @@ REQUIRED_TEST_PACKAGES = [ ] GCP_REQUIREMENTS = [ - 'google-apitools>=0.5.10,<=0.5.11', + 'google-apitools>=0.5.10,<=0.5.20', 'proto-google-cloud-datastore-v1>=0.90.0,<=0.90.4', 'googledatastore==7.0.1', 'google-cloud-pubsub==0.26.0',
[get_version->[open,exec],generate_protos_first->[cmd->[run->[super,generate_proto_files]],warn],find_packages,get_distribution,system,generate_protos_first,get_version,format,warn,cythonize,setup,StrictVersion]
A function to provide a function that can be used to provide a number of int64_ Command that generates proto files for a given package.
Have you tested this by running a job on Dataflow? This is a little scary because the latest version of apitools on github is 0.5.14. @craigcitro @charlesccychen @chamikaramj (FYI, in case anybody is aware of a known incompatibility.)
@@ -2522,10 +2522,10 @@ dc_epoch_op(daos_handle_t coh, crt_opcode_t opc, daos_epoch_t *epoch, if (rc != 0) goto out; - D_DEBUG(DF_DSMC, DF_CONT": op=%u; hdl="DF_UUID"; epoch="DF_U64"\n", + D_DEBUG(DF_DSMC, DF_CONT": op=%u; hdl="DF_UUID";\n", DP_CONT(arg.eoa_req.cra_pool->dp_pool_hdl, arg.eoa_req.cra_cont->dc_uuid), opc, - DP_UUID(arg.eoa_req.cra_cont->dc_cont_hdl), *epoch); + DP_UUID(arg.eoa_req.cra_cont->dc_cont_hdl)); in = crt_req_get(arg.eoa_req.cra_rpc); if (opc != CONT_SNAP_CREATE)
[No CFG could be retrieved]
This function is called to check if the request is complete and if so determine if the request region tse_task_t.
This change is needed to avoid printing epoch which is an input variable, and is unassigned on the heap of a caller.
@@ -3,8 +3,9 @@ require 'rails_helper' describe 'two_factor_authentication/shared/max_login_attempts_reached.html.slim' do context 'locked out account' do it 'includes localized error message with time remaining' do - @user_decorator = instance_double(UserDecorator) - allow(@user_decorator).to receive(:lockout_time_remaining_in_words).and_return('1000 years') + user_decorator = instance_double(UserDecorator) + allow(view).to receive(:decorated_user).and_return(user_decorator) + allow(user_decorator).to receive(:lockout_time_remaining_in_words).and_return('1000 years') render
[context,to,include,describe,instance_double,t,it,require,and_return]
It displays a description of the neccesary conditions for the lockout time remaining in.
ha what!! where does 1000 years come from?