patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -43,6 +43,8 @@ module.exports = { { test: /\.eot(\?\S*)?$/, loader: 'url-loader', }, { test: /\.ttf(\?\S*)?$/, loader: 'url-loader', }, { test: /\.svg(\?\S*)?$/, loader: 'url-loader', }, { + test: /\.png(\?\S*)?$/, loader: 'url-loader', }, { + test: /\.jpg(\?\S*)?$/, loader: 'url-loader', }, { test: /\.json$/, loader: 'json-loader' }, ], } }
[No CFG could be retrieved]
The default loader for the images.
perhaps this is better in the future if configurable by admin?
@@ -367,7 +367,8 @@ namespace System.Windows.Forms { if (columnIndex < 0 || columnIndex >= MAX_DAYS || - columnIndex >= ColumnCount) + columnIndex >= ColumnCount || + parentAccessibleObject == null) { return null; }
[MonthCalendar->[MonthCalendarAccessibleObject->[GetPropertyValue->[GetPropertyValue],GetCalendarGridInfoText->[GetCalendarGridInfoText],FragmentNavigate->[FragmentNavigate],IsPatternSupported->[IsPatternSupported],GetCalendarGridInfo->[GetCalendarGridInfo],AccessibleObject->[GetCalendarGridInfo],ElementProviderFromPoint->[ElementProviderFromPoint],GetCalendarPartRectangle->[GetCalendarGridInfo]]]]
This method returns a calendar cell accessible object.
This is the cause of #3034 and #2475 issues.
@@ -128,7 +128,8 @@ def display_path(path): if possible.""" path = os.path.normcase(os.path.abspath(path)) if sys.version_info[0] == 2: - path = path.decode(sys.getfilesystemencoding(), 'replace') + fs_enc = sys.getfilesystemencoding() or 'utf-8' + path = path.decode(fs_enc, 'replace') path = path.encode(sys.getdefaultencoding(), 'replace') if path.startswith(os.getcwd() + os.path.sep): path = '.' + path[len(os.getcwd()):]
[dist_in_site_packages->[normalize_path],has_leading_dir->[split_leading_dir],get_installed_distributions->[editables_only_test->[dist_is_editable],editable_test->[dist_is_editable],user_test,editables_only_test,local_test,editable_test],captured_output->[from_stream],dist_in_usersite->[normalize_path],unzip_file->[ensure_dir,has_leading_dir,split_leading_dir,current_umask],captured_stdout->[captured_output],splitext->[splitext],is_local->[normalize_path],dist_is_local->[is_local],get_terminal_size->[ioctl_GWINSZ],untar_file->[ensure_dir,has_leading_dir,split_leading_dir,current_umask],unpack_file->[untar_file,unzip_file,file_contents,is_svn_page],rmtree->[rmtree],dist_location->[egg_link_path]]
Gives the display value for a given path making it relative to cwd if possible.
Why fall back to UTF-8 here but not above?
@@ -450,6 +450,10 @@ class User < ApplicationRecord search_score end + def rate_limiter + RateLimitChecker.new(self) + end + private def estimate_default_language
[User->[check_for_username_change->[path],send_welcome_notification->[send_welcome_notification],blocked_by?->[blocking?],auditable?->[any_admin?],blocking?->[blocking?],resave_articles->[path]]]
This method returns an estimate of the hotness score of the user.
Made this public bc I think we could refactor other parts of the code like in the application controller `current_user.rate_limiter` rather than `RateLimitChecker.new(current_user)`
@@ -298,12 +298,13 @@ export class AmpList extends AMP.BaseElement { /** * @private + * @return {!Promise} */ maybeResizeListToFitItems_() { if (this.loadMoreEnabled_) { - this.attemptToFitLoadMore_(dev().assertElement(this.container_)); + return this.attemptToFitLoadMore_(dev().assertElement(this.container_)); } else { - this.attemptToFit_(dev().assertElement(this.container_)); + return this.attemptToFit_(dev().assertElement(this.container_)); } }
[AmpList->[undoLayout_->[FLEX_ITEM,RESPONSIVE,parseLayout,FLUID,FIXED_HEIGHT,getLayoutClass,FIXED,devAssert,setStyles,INTRINSIC],ssrTemplate_->[setupJsonFetchInit,fetchOpt,dict,userAssert,user,requestForBatchFetch,setupAMPCors,xhrUrl,setupInput],getPolicy_->[getSourceOrigin,ALL,OPT_IN],fetchListAndAppend_->[resolve],constructor->[templatesFor],setLoadMore_->[setStyles],changeToLayoutContainer_->[CONTAINER,resolve,toggle],updateBindings_->[some,resolve,isArray,hasAttribute,user,querySelector,rescan,updateWith,bindForDocOrNull],diff_->[setDOM,dev,length],doRenderPass_->[then,resolver,append,data,rejecter,devAssert,scheduleNextPass,payload],addElementsToContainer_->[setAttribute,hasAttribute,dev,appendChild,forEach],mutatedAttributesCallback->[isArray,dev,userAssert,getMode,renderLocalData],attemptToFit_->[CONTAINER,/*OK*/],attemptToFitLoadMoreElement_->[CONTAINER,dev,/*OK*/,setStyles],buildCallback->[viewerForDoc,user,bindForDocOrNull,scopedQuerySelector],prepareAndSendFetch_->[getViewerAuthTokenIfAvailable],isAmpStateSrc_->[startsWith,isExperimentOn],computeListItems_->[userAssert,user,getValueForExpr,isArray],truncateToMaxLen_->[slice,length,parseInt],layoutCallback->[dev],render_->[removeChildren,dev,hasChildNodes,resetPendingChangeSize,DOM_UPDATE,createCustomEvent],maybeResizeListToFitItems_->[dev],manuallyDiffElement_->[nodeName,getAttribute,some,hasAttribute,setAttribute,classList,devAssert,parentElement,startsWith],maybeLoadMoreItems_->[dev,bottom],fetchList_->[catch,then,resolve],isTabbable_->[matches],maybeRenderLoadMoreTemplates_->[resolve,all,push],isLayoutSupported->[isLayoutSizeDefined],maybeRenderLoadMoreElement_->[dev,removeChildren,resolve,appendChild],resetIfNecessary_->[dev,removeChildren,toArray],markContainerForDiffing_->[forEach,querySelectorAll,String,markElementForDiffing],loadMoreCallback_->[dev,resolve,tryFocus],firstTabbableChild_->[scopedQuerySelector],getAmpStateJson_->[getState,userAssert,slice,length,bindForDocOrNull],triggerFetchErrorEvent_->[trigger,dict,actionServiceForDoc,response,LOW,createCustomEvent],showFallback_->[childElementByAttr],adjustContainerForLoadMoreButton_->[dev,setStyles,px],initializeLoadMoreElements_->[toggle,listen],fetch_->[batchFetchJsonFor],createContainer_->[setAttribute],updateLoadMoreSrc_->[getValueForExpr],handleLoadMoreFailed_->[dev],lastTabbableChild_->[scopedQuerySelectorAll,length],maybeSetLoadMore_->[resolve],BaseElement],registerElement,extension]
Resizes the list to fit items if necessary.
Optional: Do we ever use this return value? If not, it might be simpler (if less consistent) to leave `attemptToFitLoadMore_` and its siblings as void return. E.g. readers won't need to trace the return value when reading `attemptToFitLoadMore_` and siblings.
@@ -752,11 +752,11 @@ class Assignment < Assessment def get_num_marked(ta_id = nil) if ta_id.nil? - results_join = groupings.left_outer_joins(:current_result) - num_incomplete = results_join.where('results.id': nil) - .or(results_join.where('results.marking_state': 'incomplete')) - .count - get_num_assigned - num_incomplete + results_join = groupings.includes(:current_result).where('groupings.is_collected': true) + num_complete = results_join.where('results.id': nil) + .or(results_join.where('results.marking_state': 'complete')) + .count + num_complete else if is_criteria_mark?(ta_id) n = 0
[Assignment->[to_json->[to_json],summary_json->[group_by],get_marks_list->[max_mark],group_by->[group_assignment?],current_submission_data->[group_by],average_annotations->[get_num_marked,get_num_annotations],percentage_grades_array->[calculate_total_percent],get_num_marked->[is_criteria_mark?,get_num_assigned],summary_csv->[max_mark],past_all_collection_dates?->[past_collection_date?],zip_automated_test_files->[autotest_files_dir],create_autotest_dirs->[autotest_files_dir,autotest_path],reset_collection_time->[reset_collection_time],to_xml->[to_xml],grouping_past_due_date?->[past_all_due_dates?]]]
get number of marked items.
You don't need a separate variable here; this entire block of code can be a single query (which you just return the value of)
@@ -166,10 +166,16 @@ export function installServiceInEmbedScope(embedWin, id, service) { 'Service override can only be installed in embed window: %s', id); dev().assert(!getLocalExistingServiceForEmbedWinOrNull(embedWin, id), 'Service override has already been installed: %s', id); - getServiceInternal(embedWin, embedWin, id, undefined, () => service); + registerServiceInternal( + embedWin, + embedWin, + id, + /* opt_ctor */ undefined, + () => service); + // Force service to build + getServiceInternal(embedWin, embedWin, id); } - /** * @param {!Window} embedWin * @param {string} id
[No CFG could be retrieved]
Returns a service object for the given id in the given window. This is the main entry point for the function. It is called by the window.
This can be reverted. Let's address the future of the convenience builder separately.
@@ -77,9 +77,10 @@ export class AmpForm { * @private */ handleSubmit_(e) { - if (e.defaultPrevented) { + if (e.defaultPrevented || this.state_ == FormState_.SUBMITTING) { return; } + if (this.xhrAction_) { e.preventDefault(); this.setState_(FormState_.SUBMITTING);
[No CFG could be retrieved]
Construct a class that represents the form element. Adds proper classes for the form that are in the submission queue.
Don't you need to call `preventDefault`?
@@ -0,0 +1,17 @@ +class TechSupportPolicy < Struct.new(:user, :tech_support) + def index? + user.admin? || user.tech? + end + + def search? + user.admin? || user.tech? + end + + def show? + user.admin? || user.tech? + end + + def reset? + user.admin? || user.tech? + end +end
[No CFG could be retrieved]
No Summary Found.
This is not related to authentication. Do we know for sure we'll be needing a tech support role?
@@ -30,8 +30,8 @@ namespace Microsoft.Xna.Framework public static float PerpendicularDistance(ref Vector3 point, ref Plane plane) { // dist = (ax + by + cz + d) / sqrt(a*a + b*b + c*c) - return (float)Math.Abs((plane.Normal.X * point.X + plane.Normal.Y * point.Y + plane.Normal.Z * point.Z) - / Math.Sqrt(plane.Normal.X * plane.Normal.X + plane.Normal.Y * plane.Normal.Y + plane.Normal.Z * plane.Normal.Z)); + return Math.Abs((plane.Normal.X * point.X + plane.Normal.Y * point.Y + plane.Normal.Z * point.Z) + / MathF.Sqrt(plane.Normal.X * plane.Normal.X + plane.Normal.Y * plane.Normal.Y + plane.Normal.Z * plane.Normal.Z)); } }
[Plane->[GetHashCode->[GetHashCode],Transform->[Transform],Intersects->[Intersects],PlaneIntersectionType->[DotCoordinate,Intersects],Equals->[Equals]]]
Returns the perpendicular distance between the given point and the given plane.
Precision lost! The division was done with double precision because there was no cast to float in front of Math.Sqrt().
@@ -70,8 +70,7 @@ class Gcc(AutotoolsPackage): depends_on("gmp") depends_on("mpc", when='@4.5:') depends_on("isl", when='@5.0:') - depends_on("binutils~libiberty", when='+binutils ~gold') - depends_on("binutils~libiberty+gold", when='+binutils +gold') + depends_on("binutils~libiberty", when='+binutils') # TODO: integrate these libraries. # depends_on("ppl")
[Gcc->[write_rpath_specs->[join_path,gcc,write,set_install_permissions,Executable,startswith,format,warn,closing,open],spec_dir->[glob],configure_args->[join_path,satisfies,which,isfile,extend,filter_file,set,cp,join,add,mkdirp],depends_on,patch,version,provides,variant,run_after]]
Get version of all packages. Configure the header file.
Remind me again why we even depend on binutils? Maybe it should be a build deptype??
@@ -45,7 +45,7 @@ using System.Runtime.InteropServices; // to distinguish one build from another. AssemblyFileVersion is specified // in AssemblyVersionInfo.cs so that it can be easily incremented by the // automated build process. -[assembly: AssemblyVersion("2.0.0.2821")] +[assembly: AssemblyVersion("2.0.0.2906")] // By default, the "Product version" shown in the file properties window is
[Satellite]
Assigns the given type to the assembly.
would you mind removing this file from your PR?
@@ -522,7 +522,9 @@ void Planner::check_axes_activity() { } #if PLANNER_LEVELING - + /** + * lx, ly, lz - cartesian positions in mm + */ void Planner::apply_leveling(float &lx, float &ly, float &lz) { #if HAS_ABL
[No CFG could be retrieved]
Private methods - Calculates the number of non - terminal ethernet registers and writes the appropriate values region Private functions Inverse of the 3x3 matrix.
Not just cartesian, but "logical" (hence the "L" in `lx`, `ly`, `lz`).
@@ -1297,7 +1297,7 @@ public class Upgrade410to420 implements DbUpgrade { s_logger.debug("Index already exists on host_details - not adding new one"); } else { // add the index - try(PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER IGNORE TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)");) { + try(PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)");) { pstmtUpdate.executeUpdate(); s_logger.debug("Index did not exist on host_details - added new one"); }catch (SQLException e) {
[Upgrade410to420->[upgradeVmwareLabels->[getNewLabel]]]
Adds the host_details index if it does not exist.
@wido the index is not added, if you look at the code above ^^ the if statement to check if index already exists with given key name and it only adds the index if it does not exist otherwise skips.
@@ -881,8 +881,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } if (state != State.Stopped) { - s_logger.debug("VM " + vm + " is not in a state to be started: " + state); - return null; + s_logger.warn("VM " + vm + " is not in a state to be started: " + state); + throw new CloudRuntimeException(String.format("Cannot start VM: %s in %s state", vm, state)); } }
[VirtualMachineManagerImpl->[prepareVmStorageMigration->[stateTransitTo],migrateWithStorage->[expunge],orchestrateStart->[areAffinityGroupsAssociated,orchestrateStart,changeState,setupAgentSecurity,getVmGuru,changeToStartState,findById],replugNic->[findById],migrateVmForScaleThroughJobQueue->[VmJobVirtualMachineOutcome],findClusterAndHostIdForVmFromVolumes->[findById],upgradeVmDb->[findById],scanStalledVMInTransitionStateOnUpHost->[handlePowerOnReportWithNoPendingJobsOnVM,handlePowerOffReportWithNoPendingJobsOnVM,findById],destroy->[doInTransactionWithoutResult->[stateTransitTo],advanceStop],removeNicFromVm->[expunge],updateDefaultNicForVM->[createPlaceHolderWork,expunge],start->[start],restoreVirtualMachineThroughJobQueue->[VmJobVirtualMachineOutcome],findClusterAndHostIdForVm->[findClusterAndHostIdForVmFromVolumes,findById,findClusterAndHostIdForVm],migrateVmStorageThroughJobQueue->[checkConcurrentJobsPerDatastoreThreshhold,VmJobVirtualMachineOutcome,findById],changeToStartState->[checkWorkItems],orchestrateAddVmToNetwork->[orchestrateAddVmToNetwork,findById],createMappingVolumeAndStoragePool->[createMappingVolumeAndStoragePool],findById->[findById],migrateVmAwayThroughJobQueue->[VmStateSyncOutcome],restoreVirtualMachine->[createPlaceHolderWork,expunge,findById],handleVmWorkJob->[handleVmWorkJob],getCandidateStoragePoolsToMigrateLocalVolume->[isStorageCrossClusterMigration],migrate->[getExecuteInSequence,expunge,changeState,cleanup,checkVmOnHost,getVmGuru,getVlanToPersistenceMapForVM,stateTransitTo],orchestrateStop->[orchestrateStop,findById,advanceStop],orchestrateMigrateForScale->[getExecuteInSequence,toVmTO,stateTransitTo,changeState,cleanup,getVmGuru,getVlanToPersistenceMapForVM,orchestrateMigrateForScale,findById,checkVmOnHost],orchestrateUpdateDefaultNic->[orchestrateUpdateDefaultNicForVM,findById],addExtraConfig->[addExtraConfig],unplugNic->[findById,getVlanToPersistenceMapForVM],orchestrateReConfigureVm->[findById,upgradeVmDb],stopVmThroughJobQueue->[VmStateSyncOutcome],migrateForScale->[expunge],addVmToNetworkThroughJobQueue->[VmJobVirtualMachineOutcome],orchestrateStorageMigration->[orchestrateStorageMigration,findById,stateTransitTo],startVmThroughJobQueue->[VmStateSyncOutcome],orchestrateRemoveVmFromNetwork->[findById,toNicTO,orchestrateRemoveVmFromNetwork],orchestrateRestoreVirtualMachine->[restoreVirtualMachine,findById,orchestrateRestoreVirtualMachine],advanceReboot->[expunge],orchestrateMigrateAway->[migrate,orchestrateMigrateAway,findById,advanceStop],CleanupTask->[runInContext->[cleanup]],cleanup->[sendStop,getControlNicIpForVM,getExecuteInSequence,getVlanToPersistenceMapForVM],storageMigration->[expunge],orchestrateRemoveNicFromVm->[findById,toNicTO,orchestrateRemoveNicFromVm],migrateVmWithStorageThroughJobQueue->[VmStateSyncOutcome],allocate->[doInTransactionWithoutResult->[allocate],allocate],handlePowerOnReportWithNoPendingJobsOnVM->[stateTransitTo],getVmTO->[toVmTO],toNicTO->[toNicTO],scanStalledVMInTransitionStateOnDisconnectedHosts->[findById],reconfigureVmThroughJobQueue->[VmJobVirtualMachineOutcome],reConfigureVm->[expunge],getVlanToPersistenceMapForVM->[updatePersistenceMap],migrateVmThroughJobQueue->[VmStateSyncOutcome],advanceExpunge->[advanceExpunge,expungeCommandCanBypassHostMaintenance,getVmGuru],updateDefaultNicForVMThroughJobQueue->[VmJobVirtualMachineOutcome],moveVmOutofMigratingStateOnSuccess->[changeState],orchestrateMigrateWithStorage->[moveVmToMigratingState,stateTransitTo,cleanup,getVmGuru,moveVmOutofMigratingStateOnSuccess,createMappingVolumeAndStoragePool,orchestrateMigrateWithStorage,findById,checkVmOnHost],plugNic->[findById],handlePowerOffReportWithNoPendingJobsOnVM->[sendStop,releaseVmResources,stateTransitTo,getVmGuru],unmanageVMVolumes->[removeDynamicTargets,getTargets],unmanage->[doInTransaction->[getVmGuru]],sendStop->[getExecuteInSequence,getVolumesToDisconnect],orchestrateMigrate->[migrate,orchestrateMigrate,findById],removeNicFromVmThroughJobQueue->[VmJobVirtualMachineOutcome],removeVmFromNetworkThroughJobQueue->[VmJobVirtualMachineOutcome],moveVmToMigratingState->[changeState],orchestrateReconfigure->[reConfigureVm,findById],advanceStop->[getExecuteInSequence,releaseVmResources,expunge,changeState,cleanup,getVmGuru,getVlanToPersistenceMapForVM],VmJobVirtualMachineOutcome->[retrieve->[findById],checkCondition->[findById]],processAnswers->[syncVMMetaData],migrateThroughHypervisorOrStorage->[afterHypervisorMigrationCleanup,storageMigration,attemptHypervisorMigration],advanceStart->[advanceStart,expunge],isRootVolumeOnLocalStorage->[findById],VmStateSyncOutcome->[retrieve->[findById],checkCondition->[findById]],rebootVmThroughJobQueue->[VmJobVirtualMachineOutcome],HandlePowerStateReport->[findById],orchestrateUpdateDefaultNicForVM->[findById],orchestrateReboot->[getExecuteInSequence,orchestrateReboot,findById],migrateAway->[expunge],findHostAndMigrate->[findById],addVmToNetwork->[expunge],checkIfCanUpgrade->[isVirtualMachineUpgradable],isVirtualMachineUpgradable->[isVirtualMachineUpgradable]]]
Method to transition a VM into the starting state. Checks if there are any pending work items in the VM. If so it throws an exception.
We could unify these messages in a String var (with String.format).
@@ -474,9 +474,10 @@ func processRequest(r *http.Request, p processor.Processor, config transform.Con return cannotDecodeResponse(err) } - tctx := transform.Context{ - Config: config, - Metadata: *metadata, + tctx := &transform.Context{ + RequestTime: requestTime, + Config: config, + Metadata: *metadata, } if err = report(r.Context(), pendingReq{transformables: transformables, tcontext: tctx}); err != nil {
[Handle,Nanoseconds,Context,Value,Commit,WriteHeader,RemoteAddr,HandlerFunc,DecodeLimitJSONData,ConstantTimeCompare,Now,Wrap,Inc,With,NewLimiter,Add,Allow,Set,GetDefaultVersion,Handler,Error,Format,Errorw,Marshal,Limit,ServeHTTP,NotFound,New,Infow,BuildTime,NewV4,Validate,WithValue,NewServeMux,Errorf,memoizedSmapMapper,DecodeUserData,MustCompile,After,Sub,Infof,NewLogger,Contains,isEnabled,WithContext,Get,Split,Header,Write,Sprintf,Decode,NewInt,NewRecordingResponseWriter,Glob,NewRegistry,ProcessorHandler,DecodeSystemData]
processRequestHandler returns a handler that serves the given request with the given header. response returns a response object.
You could move fetching the requestTime here.
@@ -352,9 +352,6 @@ class Addon(OnChangeMixin, ModelBase): whiteboard = models.TextField(blank=True) - # Whether the add-on is listed on AMO or not. - is_listed = models.BooleanField(default=True, db_index=True) - is_experimental = models.BooleanField(default=False, db_column='experimental')
[watch_disabled->[Addon],AddonManager->[enabled->[get_queryset],valid->[get_queryset],__init__->[__init__],valid_and_disabled_and_pending->[get_queryset],listed->[get_queryset],id_or_slug->[get_queryset],featured->[get_queryset],public->[get_queryset]],Addon->[can_request_review->[find_latest_version],get_required_metadata->[find_latest_version_including_rejected],transformer->[attach_related_versions,attach_static_categories,attach_previews,attach_listed_authors],type_url->[get_type_url],authors_other_addons->[listed,valid],initialize_addon_from_upload->[save,Addon],clean_slug->[clean_slug],update_version->[find_latest_public_listed_version,find_latest_version],increment_theme_version_number->[save,find_latest_version],should_redirect_to_submit_flow->[has_complete_metadata,find_latest_version],__new__->[__new__],update_status->[update_version,find_latest_version],create_addon_from_upload_data->[initialize_addon_from_upload],can_review->[has_author],save->[clean_slug],icon_url->[get_icon_url],delete->[is_soft_deleteable],from_upload->[initialize_addon_from_upload,from_upload],latest_unlisted_version->[find_latest_version],all_dependencies->[valid],AddonManager],AddonQuerySet->[valid_q->[q]],Persona->[theme_data->[get_url_path,hexcolor],thumb_path->[_image_path,is_new],footer_path->[_image_path],preview_url->[_image_url,is_new],header_path->[_image_path],icon_path->[_image_path,is_new],header_url->[_image_url],icon_url->[_image_url,is_new],preview_path->[_image_path,is_new],authors_other_addons->[valid],thumb_url->[_image_url,is_new],footer_url->[_image_url]],Preview->[image_url->[_image_url],thumbnail_url->[_image_url],image_path->[_image_path],thumbnail_path->[_image_path]],watch_status->[find_latest_version],delete_preview_files->[delete]]
Creates a Field in the model that represents the object that holds the add - on. This function is used to filter the objects in the database by the object.
We should file an issue to get rid of the column asynchronously. I've checked and dev/stage/prod ~~(presumably prod too, but that needs to be verified before merging)~~ do have a in-database default for this field, so it's fine to remove from the models and do the migration later.
@@ -846,6 +846,16 @@ daos_prop_dup(daos_prop_t *prop, bool pool) return NULL; } break; + case DAOS_PROP_PO_ACL: + case DAOS_PROP_CO_ACL: + entry_dup->dpe_val_ptr = (void *)daos_acl_copy( + (struct daos_acl *)entry->dpe_val_ptr); + if (entry_dup->dpe_val_ptr == NULL) { + D_ERROR("failed to dup ACL\n"); + daos_prop_free(prop_dup); + return NULL; + } + break; default: entry_dup->dpe_val = entry->dpe_val; break;
[daos_sgls_buf_size->[daos_sgl_buf_size],int->[daos_sgl_init],daos_prop_dup->[daos_prop_free,daos_prop_alloc,daos_prop_valid],daos_sgls_packed_size->[daos_sgls_buf_size],daos_prop_copy->[daos_prop_entry_get]]
duplicate a property.
minor, daos_acl_copy will allocated new acl struct and copy, so daos_acl_dup() probably is a better name. (like strdup and strcpy's difference).
@@ -112,6 +112,10 @@ public class BucketAssignFunction<K, I, O extends HoodieRecord<?>> final HoodieKey hoodieKey = record.getKey(); final BucketInfo bucketInfo; final HoodieRecordLocation location; + if (!allPartitionsLoaded && !partitionLoadState.contains(hoodieKey.getPartitionPath())) { + // If the partition records are never loaded, load the records first. + loadRecords(hoodieKey.getPartitionPath()); + } // Only changing records need looking up the index for the location, // append only records are always recognized as INSERT. if (isChangingRecords && this.indexState.contains(hoodieKey)) {
[BucketAssignFunction->[notifyCheckpointComplete->[reset,refreshTable],open->[BucketAssigner,HoodieFlinkEngineContext,getHadoopConf,getRuntimeContext,getHoodieClientConfig,SerializableConfiguration,FlinkTaskContextSupplier,open],initializeState->[getMapState,of],processElement->[addInsert,addUpdate,HoodieRecordLocation,AssertionError,getKey,setCurrentLocation,getFileId,getFileIdPrefix,contains,seal,collect,getBucketType,getPartitionPath,put,unseal],fromValue,isChangingRecords,getString]]
Process an element in the Hoodie index.
The `allPartitionsLoaded` member variable seems redundant, can we only use `partitionLoadState`?
@@ -4251,13 +4251,14 @@ const ( CtxAllowNameKey CtxAllowNameKeyType = iota ) -func checkDisallowedPrefixes(ctx context.Context, name string) error { +func checkDisallowedPrefixes( + ctx context.Context, name data.PathPartString) error { for _, prefix := range disallowedPrefixes { - if strings.HasPrefix(name, prefix) { + if strings.HasPrefix(name.Plaintext(), prefix) { if allowedName := ctx.Value(CtxAllowNameKey); allowedName != nil { // Allow specialized KBFS programs (like the kbgit remote // helper) to bypass the disallowed prefix check. - if name == allowedName.(string) { + if name.Plaintext() == allowedName.(string) { return nil } }
[waitForRootBlockFetch->[logIfErr],kickOffPartialMarkAndSweep->[Lock,Unlock,goTracked,doPartialMarkAndSweep],handleTLFBranchChange->[setHeadSuccessorLocked,isUnmergedLocked,id,Lock,handleUnflushedEditNotifications,Unlock,setBranchIDLocked],SetInitialHeadFromServer->[kickOffPartialSyncIfNeeded,kickOffRootBlockFetch,waitForRootBlockFetch,id,startOp,endOp,Lock,validateHeadLocked,identifyOnce,getHead,Unlock,setInitialHeadTrustedLocked],runUnlessShutdown->[newCtxWithFBOID,goTracked],setMtimeLocked->[notifyAndSyncOrSignal,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename,nowUnixNano],processMissedLookup->[makeFakeFileEntry,makeFakeDirEntry],setHeadSuccessorLocked->[id,Lock,setHeadLocked,Unlock,setInitialHeadTrustedLocked],kickOffPartialMarkAndSweepIfNeeded->[getProtocolSyncConfigUnlocked,kickOffPartialMarkAndSweep],makeFakeFileEntry->[makeFakeEntryID],NewNotificationChannel->[getEditMonitoringChannel],getUnmergedMDUpdatesLocked->[id,getCurrMDRevision],pathFromNodeForRead->[pathFromNodeHelper],updateLastGetHeadTimestamp->[Lock,Unlock],makeFakeDirEntry->[makeFakeEntryID,nowUnixNano],applyMDUpdates->[Lock,applyMDUpdatesLocked,Unlock],getCachedDirOpsCount->[Lock,Unlock],canonicalPath->[String,pathFromNodeForRead,PathType],onMDFlush->[newCtxWithFBOID,goTracked,handleMDFlush],getHead->[commitHeadLocked,updateLastGetHeadTimestamp],finalizeResolution->[Lock,finalizeResolutionLocked,Unlock],setObfuscatorSecretLocked->[Lock,Unlock],getProtocolSyncConfigUnlocked->[getProtocolSyncConfig],reResolveAndIdentify->[id],commitHeadLocked->[id,goTracked],startMonitorChat->[Lock,Unlock,goTracked],getDirChildren->[pathFromNodeForRead,getMDForReadNeedIdentify],cacheHashBehavior->[id],finalizeMDRekeyWriteLocked->[setHeadSuccessorLocked,Lock,waitForJournalLocked,Unlock,loadCachedMDChanges,setBranchIDLocked],makeRecentFilesSyncConfig->[id],createEntryLocked->[syncDirUpdateOrSignal,checkForUnlinkedDir,id,branch,canonicalPath,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename,nowUnixNano],handleEditNotifications->[sendEditNotifications,makeEditNotifications],SetInitialHeadToNew->[initMDLocked,startOp,endOp,Lock,identifyOnce,Unlock],makeEncryptedPartialPathsLocked->[id,cacheHashBehavior,getProtocolSyncConfig],maybeUnembedAndPutBlocks->[id],getMDForWriteLockedForFilename->[getMDForWriteOrRekeyLocked],setInitialHeadTrustedLocked->[setHeadLocked],doFastForwardLocked->[setHeadSuccessorLocked,kickOffRootBlockFetch,waitForRootBlockFetch],doPartialMarkAndSweep->[id,startOp,endOp,markRecursive],locallyFinalizeTLF->[setHeadSuccessorLocked,id,Lock,oa,Unlock],doFavoritesOp->[deleteFromFavorites,addToFavoritesByHandle,addToFavorites],blockUnmergedWrites->[Lock],getAndApplyNewestUnmergedHead->[setHeadSuccessorLocked,id,Lock,notifyBatchLocked,Unlock],finalizeResolutionLocked->[newCtxWithFBOID,id,Lock,goTracked,notifyOneOpLocked,finalizeBlocks,handleEditNotifications,handleUnflushedEditNotifications,setHeadConflictResolvedLocked,Unlock,loadCachedMDChanges,setBranchIDLocked],checkNodeForRead->[branch,GetTLFHandle,checkNode],ClearPrivateFolderMD->[id,Lock,Unlock],SetSyncConfig->[newCtxWithFBOID,ctxWithFBOID,kickOffRootBlockFetch,id,Lock,startOp,endOp,getLatestMergedMD,kickOffPartialSync,reResolveAndIdentify,getProtocolSyncConfig,branch,Unlock,goTracked,makeEncryptedPartialPathsLocked,triggerMarkAndSweepLocked],waitForJournalLocked->[id],rekeyLocked->[getAndApplyMDUpdates,finalizeMDRekeyWriteLocked,isUnmergedLocked,startOp,endOp,getMDForRekeyWriteLocked,getHead],removeEntryLocked->[checkForUnlinkedDir,notifyAndSyncOrSignal,unrefEntryLocked],SetEx->[startOp,endOp,doMDWriteWithRetryUnlessCanceled,checkNodeForWrite,setExLocked],GetEditHistory->[id,getHead],kickOffPartialSync->[Lock,Unlock,goTracked,doPartialSync],notifyOneOpLocked->[id,pathFromNodeForRead,getUnlinkPathBeforeUpdatingPointers,searchForNode],getProtocolSyncConfig->[id],getMDForReadHelper->[getMDForRead],unstageLocked->[getAndApplyMDUpdates,isUnmergedLocked,id,finalizeMDWriteLocked,notifyBatchLocked,undoUnmergedMDUpdatesLocked,getSuccessorMDForWriteLocked,maybeUnembedAndPutBlocks],checkNodeForWrite->[String,checkNode],receiveNewEditChat->[getEditMonitoringChannel],notifyOneOp->[Lock,Unlock,notifyOneOpLocked],setExLocked->[notifyAndSyncOrSignal,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename,nowUnixNano],getMDForReadNeedIdentify->[getMDForReadHelper],getCurrMDRevision->[getCurrMDRevisionLocked],getMDForMigrationLocked->[getMDForWriteOrRekeyLocked],finalizeGCOp->[Lock,finalizeGCOpLocked,Unlock],commitFlushedMD->[id,kickOffPartialSyncIfNeeded,goTracked,kickOffRootBlockFetch],getJournalRevisions->[id,String],Lock->[Lock],setHeadPredecessorLocked->[setHeadLocked],CreateDir->[startOp,endOp,doMDWriteWithRetryUnlessCanceled,checkNodeForWrite,createEntryLocked],Rename->[renameLocked,startOp,endOp,doMDWriteWithRetryUnlessCanceled,checkNodeForWrite],RemoveEntry->[removeEntryLocked,startOp,endOp,doMDWriteWithRetryUnlessCanceled,checkNodeForWrite,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename],clearConflictView->[id,Lock,Unlock],getMDForReadNoIdentify->[getMDForReadHelper],makeEditNotifications->[id],kickOffPartialSyncIfNeeded->[getProtocolSyncConfigUnlocked,makeRecentFilesSyncConfig,kickOffPartialSync],getMDForRekeyWriteLocked->[getMDForWriteOrRekeyLocked],Lookup->[transformReadError,startOp,endOp,lookup,checkNodeForRead],notifyAndSyncOrSignal->[syncDirUpdateOrSignal],finalizeBlocks->[id,cacheHashBehavior],pathFromNodeForMDWriteLocked->[pathFromNodeHelper],statEntry->[transformReadError,pathFromNodeForRead,getMDForReadNoIdentify,checkNodeForRead,statUsingFS,getMDForReadNeedIdentify],FolderConflictStatus->[isUnmerged],MigrateToImplicitTeam->[Lock,Unlock,finalizeMDRekeyWriteLocked,getMDForMigrationLocked],getSuccessorMDForWriteLocked->[getSuccessorMDForWriteLockedForFilename],SetMtime->[startOp,endOp,doMDWriteWithRetryUnlessCanceled,checkNodeForWrite,setMtimeLocked],createLinkLocked->[checkForUnlinkedDir,notifyAndSyncOrSignal,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename,nowUnixNano],GetTLFHandle->[getHead],applyMDUpdatesLocked->[kickOffPartialSyncIfNeeded,setHeadSuccessorLocked,kickOffRootBlockFetch,isUnmergedLocked,getJournalRevisions,Lock,waitForRootBlockFetch,notifyOneOpLocked,getCurrMDRevisionLocked,Unlock],recomputeEditHistory->[getEditMessages,id],getAndApplyMDUpdates->[id,getLatestMergedRevision],removeDirLocked->[removeEntryLocked,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename],SyncAll->[syncAllLocked,startOp,doMDWriteWithRetryUnlessCanceled,endOp],getUnlinkPathBeforeUpdatingPointers->[pathFromNodeForRead],waitForAndProcessUpdates->[maybeFastForward,getAndApplyMDUpdates,deferLogIfErr],sendEditNotifications->[id,getConvID],Shutdown->[Shutdown],kickOffEditActivityPartialSync->[getProtocolSyncConfigUnlocked,makeRecentFilesSyncConfig,kickOffPartialSync],markRecursive->[markRecursive],Reset->[Lock,Unlock,GetTLFHandle,invalidateAllNodesLocked],registerForUpdatesShouldFireNow->[Lock,Unlock],Write->[startOp,endOp,Write,checkNodeForWrite,signalWrite,getMDForRead],Read->[transformReadError,startOp,endOp,pathFromNodeForRead,checkNodeForRead,Read,getMDForReadNeedIdentify],setHeadLocked->[commitFlushedMD,id,setObfuscatorSecretLocked,getJournalPredecessorRevision,validateHeadLocked,startMonitorChat,goTracked,setBranchIDLocked],oa->[id],GetNodeMetadata->[id,startOp,endOp,oa,statEntry],undoUnmergedMDUpdatesLocked->[getUnmergedMDUpdatesLocked,Lock,id,setHeadPredecessorLocked,setLatestMergedRevisionLocked,undoMDUpdatesLocked,Unlock,setBranchIDLocked],syncAllLocked->[id,startOp,endOp,finalizeMDWriteLocked,branch,startSyncLocked,getSuccessorMDForWriteLocked,getHead],CreateFile->[id,startOp,endOp,doMDWriteWithRetryUnlessCanceled,checkNodeForWrite,createEntryLocked],PushStatusChange->[PushStatusChange],getLatestMergedMD->[id,getLatestMergedRevision],doPartialSync->[startOp,endOp,syncOneNode],registerAndWaitForUpdates->[ctxWithFBOID,locallyFinalizeTLF,Lock,runUnlessShutdown,Unlock],GetUpdateHistory->[id,startOp,endOp,oa,String],getTrustedHead->[commitHeadLocked,updateLastGetHeadTimestamp],getUnmergedMDUpdates->[id,Lock,Unlock,getCurrMDRevision],doMDWriteWithRetry->[Lock,Unlock,goTracked,maybeWaitForSquash],handleEditActivity->[id,getEditMessages,initEditChatChannels,getLatestMergedRevision,recomputeEditHistory,kickOffEditActivityPartialSync,String],syncAllUnlocked->[syncAllLocked,Lock,Unlock],getEditMonitoringChannel->[Lock,Unlock],checkForUnlinkedDir->[String],getConvID->[id,Lock,Unlock],lookup->[statUsingFS,getMDForReadNeedIdentify,processMissedLookup],ForceFastForward->[newCtxWithFBOID,kickOffPartialSyncIfNeeded,doFastForwardLocked,id,Lock,Unlock,goTracked],RemoveDir->[removeDirLocked,startOp,endOp,doMDWriteWithRetryUnlessCanceled,RemoveDir,checkNodeForWrite],SyncFromServer->[syncAllUnlocked,getAndApplyMDUpdates,id,startOp,endOp,isUnmerged],monitorEditsChat->[newCtxWithFBOID,Lock,recomputeEditHistory,Unlock,handleEditActivity],maybeFastForward->[kickOffPartialSyncIfNeeded,doFastForwardLocked,isUnmergedLocked,id,Lock,getJournalPredecessorRevision,isUnmerged,Unlock],getMDForWriteOrRekeyLocked->[id,Lock,identifyOnce,setHeadLocked,getTrustedHead,Unlock],registerForUpdates->[id,registerForUpdatesShouldFireNow,startOp,endOp,getLatestMergedRevision],backgroundFlusher->[id,getCachedDirOpsCount,SyncAll,runUnlessShutdown],isUnmerged->[Lock,Unlock],GetDirChildren->[getDirChildren,transformReadError,startOp,endOp,checkNodeForRead],partialMarkAndSweepLoop->[kickOffPartialMarkAndSweepIfNeeded],getSuccessorMDForWriteLockedForFilename->[getMDForWriteLockedForFilename],getRootNode->[startOp,endOp,Lock,getMDForWriteOrRekeyLocked,Unlock,getMDForRead],TeamAbandoned->[newCtxWithFBOID,locallyFinalizeTLF],statUsingFS->[makeFakeFileEntry,makeFakeDirEntry],GetSyncConfig->[id,branch,getHead,getProtocolSyncConfigUnlocked],CreateLink->[startOp,endOp,doMDWriteWithRetryUnlessCanceled,checkNodeForWrite,createLinkLocked],newCtxWithFBOID->[ctxWithFBOID],unblockUnmergedWrites->[Unlock],initMDLocked->[cacheHashBehavior,id,Lock,setNewInitialHeadLocked,oa,getHead,Unlock,maybeUnembedAndPutBlocks],doMDWriteWithRetryUnlessCanceled->[doMDWriteWithRetry],getJournalPredecessorRevision->[getJournalRevisions],finalizeMDWriteLocked->[setHeadSuccessorLocked,isUnmergedLocked,id,Lock,goTracked,finalizeBlocks,handleEditNotifications,handleUnflushedEditNotifications,Unlock,loadCachedMDChanges,setBranchIDLocked],makeFakeEntryID->[String],TeamNameChanged->[newCtxWithFBOID,id,Lock,oa,Unlock],initEditChatChannels->[id,String],InvalidateNodeAndChildren->[startOp,endOp],transformReadError->[id,GetTLFHandle],onTLFBranchChange->[newCtxWithFBOID,handleTLFBranchChange,goTracked],finalizeGCOpLocked->[setHeadSuccessorLocked,Lock,finalizeBlocks,getSuccessorMDForWriteLocked,maybeUnembedAndPutBlocks,Unlock,loadCachedMDChanges,setBranchIDLocked],markForReIdentifyIfNeeded->[Lock,Unlock],setNewInitialHeadLocked->[setHeadLocked],identifyOnce->[Lock,Unlock],setHeadConflictResolvedLocked->[setHeadLocked],handleUnflushedEditNotifications->[id,makeEditNotifications],Truncate->[startOp,endOp,signalWrite,checkNodeForWrite,Truncate,getMDForRead],invalidateAllNodes->[Lock,Unlock,invalidateAllNodesLocked],getMDForRead->[getTrustedHead,identifyOnce],PushConnectionStatusChange->[getEditMonitoringChannel],getMostRecentFullyMergedMD->[getMDForReadHelper,getJournalPredecessorRevision,id],syncDirUpdateOrSignal->[signalWrite],renameLocked->[checkForUnlinkedDir,notifyAndSyncOrSignal,pathFromNodeForMDWriteLocked,unrefEntryLocked,getMDForWriteLockedForFilename,nowUnixNano],Stat->[statEntry,deferLogIfErr],forceStuckConflictForTesting->[isUnmergedLocked,id,Lock,getHead,Unlock],handleMDFlush->[commitFlushedMD,id,Lock,setLatestMergedRevisionLocked,handleEditNotifications,Unlock,goTracked],FolderStatus->[startOp,endOp],getMDForReadNeedIdentifyOnMaybeFirstAccess->[Lock,Unlock,getMDForRead,getMDForWriteOrRekeyLocked],UnstageForTesting->[startOp,endOp,isUnmerged,unstageLocked,doMDWriteWithRetry,goTracked],undoMDUpdatesLocked->[Lock,setHeadPredecessorLocked,notifyOneOpLocked,getCurrMDRevisionLocked,Unlock],Unlock->[Unlock],String]
checkDisallowedPrefixes returns a non - nil error if the name is not a prefix of canonicalPath returns full canonical path for given dir node and name.
Does this include `.kbfs_fileinfo...` things that we wanted to obfuscate?
@@ -979,6 +979,15 @@ class Either(ParameterizedProperty): raise ValueError("Could not transform %r" % value) + def from_json(self, json, models=None): + for tp in self.type_params: + try: + return tp.from_json(json, models) + except DeserializationError: + pass + else: + raise DeserializationError("%s couldn't deserialize %s" % (self, json)) + def __str__(self): return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
[Tuple->[from_json->[from_json,DeserializationError],validate->[is_valid]],Array->[__init__->[_validate_type_param]],RelativeDelta->[__init__->[Enum]],Property->[__set__->[__get__,matches,validate,transform,is_valid],__get__->[_get],is_valid->[validate]],List->[from_json->[from_json,DeserializationError],validate->[is_valid],__init__->[_validate_type_param]],MetaHasProps->[__new__->[__new__,autocreate]],Either->[validate->[is_valid],transform->[transform]],Color->[__init__->[Tuple,Regex,Enum]],Range->[validate->[is_valid],__init__->[_validate_type_param,validate]],HasProps->[properties_containers->[accumulate_from_subclasses],properties_with_refs->[accumulate_from_subclasses],properties_with_values->[properties],changed_vars->[properties_containers,properties_with_refs],pprint_props->[properties_with_values],class_properties->[accumulate_from_subclasses],changed_properties_with_values->[changed_properties],changed_properties->[changed_vars]],DashPattern->[__init__->[List,Regex,Enum]],Instance->[from_json->[from_json,DeserializationError,instance_type,lookup]],ColorSpec->[_get->[isconst],to_dict->[_formattuple,isconst]],Dict->[from_json->[from_json,DeserializationError],validate->[is_valid],__init__->[_validate_type_param]]]
Transform a sequence of type parameters into a sequence of type parameters.
OK, maybe I am missing something here but... Suppose you have x number of elements in `self.type_params`, and the first one can not be deserialized, then it goes toward the except and it is "passed", then the `for` loop follows and all the other x-1 elements can be deserialized successfully. At the last cycle of the for loop, if the last element is deserialized correctly, it `return`s and the else branch is never executed... but we actually had a deserialization problem at the first element...
@@ -22,11 +22,13 @@ package io.druid.query.dimension; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; import com.metamx.common.StringUtils; import io.druid.query.extraction.ExtractionFn; import io.druid.segment.DimensionSelector; import java.nio.ByteBuffer; +import java.util.List; /** */
[ExtractionDimensionSpec->[getCacheKey->[getCacheKey],hashCode->[hashCode],preservesOrdering->[preservesOrdering],equals->[equals]]]
Creates a dimension spec which can be used to extract a single dimension from a single segment. The constructor for the object.
Backward compat? cc @gianm
@@ -9,7 +9,7 @@ import {CanRender} from '#preact/contextprops'; import {ActionInvocation} from '#service/action-impl'; -import {waitFor} from '#testing/test-helper'; +import {waitFor} from '#testing/helpers/service-helpers'; describes.realWin( 'amp-accordion:1.0',
[No CFG could be retrieved]
Create a window object that can be used to render a single context object. A wait - method to render an element with expanded and collapsed attributes.
micro-nit: I like context based hierarchies so `#testing/helpers/service` might be a better name.
@@ -59,6 +59,18 @@ public class DbConnector ); } + public static void createWorkerSetupTable(final DBI dbi, final String workerTableName) + { + createTable( + dbi, + workerTableName, + String.format( + "CREATE table %s (minVersion TINYTEXT NOT NULL, minNumWorkers SMALLINT NOT NULL, nodeData LONGTEXT NOT NULL, userData LONGTEXT NOT NULL, securityGroupIds LONGTEXT NOT NULL, keyName TINYTEXT NOT NULL)", + workerTableName + ) + ); + } + public static void createTable( final DBI dbi, final String tableName,
[DbConnector->[createTable->[withHandle->[info,format,isEmpty,select,execute],withHandle,warn],getDatasource->[getDatabaseConnectURI,getDatabaseUser,getDatabasePassword,setUrl,setPassword,BasicDataSource,setUsername],createSegmentTable->[createTable,format],createRuleTable->[createTable,format],DBI,Logger,getDatasource]]
Creates the rule table if it does not exist.
Are we going to have multiple rows in this table? It seems to me like it might turn into just a single row, but I'm not sure. If it is just a single row, a part of me things we might want to just create a "config" table and use more of a key-value store style schema (and just put all the config in a payload).
@@ -52,6 +52,11 @@ func ApplyOptions(optsGetter Getter, store *registry.Store, isNamespaced bool, t return fmt.Errorf("store for %s must have CreateStrategy set", store.QualifiedResource.String()) } + isNamespaced := store.CreateStrategy.NamespaceScoped() + if isNamespaced != oldIsNamespaced { // TODO(soltysh): oldIsNamespaced should be completely removed in #12541 + return fmt.Errorf("CreateStrategy has %v for namespace scope but user specified %v as namespace scope", isNamespaced, oldIsNamespaced) + } + opts, err := optsGetter.GetRESTOptions(store.QualifiedResource) if err != nil { return fmt.Errorf("error building RESTOptions for %s store: %v", store.QualifiedResource.String(), err)
[NamespaceKeyRootFunc,GetRESTOptions,Empty,NoNamespaceKeyFunc,String,Errorf,NewFunc,NamespaceKeyFunc,HasPrefix,Decorator]
Apply options to a generic storage.
until we remove the bool, compare the passed bool to the strategy and error
@@ -407,7 +407,7 @@ export class AmpA4A extends AMP.BaseElement { ); this.uiHandler = new AMP.AmpAdUIHandler(this); - this.uiHandler.validateStickyAd(); + this.uiHandler.adFormatHandler.validate(); // Disable crypto key fetching if we are not going to use it in no-signing path. // TODO(ccordry): clean up with no-signing launch.
[AmpA4A->[whenWithinViewport->[whenWithinViewport]]]
Build callback for AMP.
maybe optional chaining? not sure if this can be null
@@ -308,6 +308,11 @@ class DataDrivenTestCase: pycache = os.path.join(path, '__pycache__') if os.path.isdir(pycache): shutil.rmtree(pycache) + # As a somewhat nasty hack, ignore any dirs with .mypy_cache in the path, + # to allow test cases to intentionally corrupt the cache without provoking + # the test suite when there are still files left over. + if '/.mypy_cache' in path: + continue try: rmdir(path) except OSError as error:
[MypyDataCase->[runtest->[setup],setup->[setup],teardown->[teardown]],DataDrivenTestCase->[add_dirs->[add_dirs]],parse_test_data->[TestItem],MypyDataSuite->[collect->[parse_test_cases]]]
Remove all files and directories that have a in the clean_up list.
I wonder if the `/` might cause trouble on Windows.
@@ -267,6 +267,16 @@ class RemoteRegistry(object): else: self._output.warn("The URL is empty. It must contain scheme and hostname.") + @contextmanager + def _editables_metadata_from_cache(self): + """ + Hide editable packages to get the cache layout instead of the editable one + """ + editables = self._cache.editable_packages + self._cache.editable_packages = {} + yield + self._cache.editable_packages = editables + def load_remotes(self): if not os.path.exists(self._filename): self._output.warn("Remotes registry file missing, "
[Remotes->[dumps->[values],loads->[loads,Remotes],_get_by_url->[values],_upsert->[items],_add_update->[_get_by_url,items],items->[items],values->[values],save->[dumps,save,items],get->[get],__nonzero__->[__bool__],clear->[clear],add->[_upsert],defaults->[Remotes],rename->[items]],migrate_registry_file->[load_registry_txt,load_old_registry_json,add_ref_remote,add_pref_remote],RemoteRegistry->[prefs_list->[items],define->[values,save],update->[_validate_url,load_remotes,save,update],load_remotes->[loads,defaults,save],remove->[values,save,load_remotes],add->[values,save,load_remotes,add,_validate_url],clear->[values,save,load_remotes,clear],rename->[values,save,load_remotes,rename]]]
Check if URL contains protocol and hostname and address and if it contains a it will return.
The member `editable_packages` is not a dictionary, it is an instance of `EditablePackages` class, we should override the `_edited_refs` member. I would like to keep the same type, one never knows if there is a `isinstance` check somewhere.
@@ -1762,7 +1762,7 @@ migrate_one_epoch_object(daos_handle_t oh, daos_epoch_range_t *epr, num = KDS_NUM; daos_anchor_set_flags(&dkey_anchor, DIOF_TO_LEADER | DIOF_WITH_SPEC_EPOCH | - DIOF_TO_SPEC_SHARD); + DIOF_TO_SPEC_GROUP); retry: rc = dsc_obj_list_obj(oh, epr, NULL, NULL, &size, &num, kds, &sgl, &anchor,
[No CFG could be retrieved]
unpack_arg - unpack_arg struct dsc_obj_list_obj - function to convert a list of obj objects into UOI retry.
for EC, seems should use SPEC_SHARD, @wangdi1 ? although in this patch, the SPEC_GROUP keep the same behavior as SPEC_SHARD so probably fine, but seems some other change needed later (assume Di will do?)
@@ -34,7 +34,7 @@ const mergeOptions = function (child, parent, visited) { if (key in child && key !== 'webPreferences') continue const value = parent[key] - if (typeof value === 'object') { + if (typeof value === 'object' && !Array.isArray(value)) { child[key] = mergeOptions(child[key] || {}, value, visited) } else { child[key] = value
[No CFG could be retrieved]
Create a security context object that can be used to create a child window. Inherit options from the parent window if it is a BrowserWindow or a webview.
This was a bug I found relevant to this change. `additionalArguments` is an array and wasn't being inherited properly
@@ -1048,10 +1048,10 @@ dfuse_mmap(void *address, size_t length, int prot, int flags, int fd, rc = vector_get(&fd_table, fd, &entry); if (rc == 0) { - DFUSE_LOG_INFO("mmap(address=%p, length=%zu, prot=%d, flags=%d," - " fd=%d, offset=%zd) " - "intercepted, disabling kernel bypass ", address, - length, prot, flags, fd, offset); + DFUSE_LOG_DEBUG("mmap(address=%p, length=%zu, prot=%d, flags=%d," + " fd=%d, offset=%zd) " + "intercepted, disabling kernel bypass ", address, + length, prot, flags, fd, offset); if (entry->fd_pos != 0) __real_lseek(fd, entry->fd_pos, SEEK_SET);
[No CFG could be retrieved]
Reads and writes a number of kernel blocks from the given file descriptor. Get the index of the kernel block that can be used to find the block number of blocks.
(style) line over 80 characters
@@ -1133,6 +1133,10 @@ def prepare_and_parse_args(plugins, args, detect_defaults=False): # pylint: dis ["testing", "standalone"], "--http-01-address", dest="http01_address", default=flag_default("http01_address"), help=config_help("http01_address")) + helpful.add( + ["testing", "apache", "nginx"], "--https-port", type=int, + default=flag_default("https_port"), + help=config_help("https_port")) helpful.add( "testing", "--break-my-certs", action="store_true", default=flag_default("break_my_certs"),
[_add_all_groups->[add_group],_plugins_parsing->[add_plugin_args,add,add_group,flag_default],HelpfulArgumentParser->[add_group->[HelpfulArgumentGroup],set_test_server->[flag_default],remove_config_file_domains_for_renewal->[_Default],modify_kwargs_for_default_detection->[_Default],__init__->[flag_default],add_deprecated_argument->[add_deprecated_argument],add->[add_argument],parse_args->[possible_deprecation_warning,remove_config_file_domains_for_renewal,parse_args],add_plugin_args->[add_group],_usage_string->[_list_subcommands]],_Default->[__nonzero__->[__bool__]],_paths_parser->[config_help,add,flag_default],set_by_cli->[set_by_cli],prepare_and_parse_args->[_add_all_groups,HelpfulArgumentParser,flag_default,add_deprecated_argument,add,parse_args,config_help],_create_subparsers->[add,flag_default],option_was_set->[set_by_cli,has_default_value],flag_default]
Prepares and parses command line arguments for a missing lease. This command is used to check if a key is not in the terminal. Flags that are used to enable or disable a specific lease. Add flags for the user to use.
Why are these changes in both this PR and #6857? This value or `--tls-sni-01-port` doesn't seem to be used in the Apache plugin so I think these changes should be removed from this PR and left in the Nginx one.
@@ -1065,7 +1065,7 @@ TransportSendStrategy::send(TransportQueueElement* element, bool relink) if (max_message_size) { // fragmentation enabled const size_t avail = this->space_available(); if (element_length > avail) { - VDBG_LVL((LM_TRACE, "(%P|%t) DBG: Fragmenting\n"), 0); + VDBG_LVL((LM_TRACE, "(%P|%t) DBG: Fragmenting %u > %u\n", element_length, avail), 0); ElementPair ep = element->fragment(avail); element = ep.first; element_length = element->msg()->total_length();
[No CFG could be retrieved]
Adds the current element to the queue. Add the current element into the collection of packet elems_.
`%B` is used for `size_t` logging. It does seem to be needed so that the `va_arg` is 64-bit
@@ -69,3 +69,11 @@ SUPPORTED_PLATFORM_NORMALIZED_NAMES = { ('darwin', '16'): ('mac', '10.12'), ('darwin', '17'): ('mac', '10.13'), } + + +def get_closest_mac_host_platform_pair(darwin_version_upper_bound, + platform_name_map=SUPPORTED_PLATFORM_NORMALIZED_NAMES): + """Return the (host, platform) pair for the highest known darwin version less than the bound.""" + darwin_versions = [int(x[1]) for x in platform_name_map if x[0] == 'darwin'] + max_darwin_version = str(max(v for v in darwin_versions if v <= int(darwin_version_upper_bound))) + return platform_name_map[('darwin', max_darwin_version)]
[get_normalized_os_name->[get_os_name,normalize_os_name]]
Enumerate all possible mac addresses.
Using `get` and raising a more helpful error here would be good (or having the caller handle None and fall through to the other helpful error you added).
@@ -50,6 +50,8 @@ class BinaryToolBase(Subsystem): # Subclasses may set this to provide extra register() kwargs for the --version option. extra_version_option_kwargs = None + allow_version_override = True + @classmethod def subsystem_dependencies(cls): sub_deps = super(BinaryToolBase, cls).subsystem_dependencies() + (BinaryUtil.Factory,)
[BinaryToolBase->[_select_for_version->[_make_binary_request,select],_make_binary_request->[get_external_url_generator,_name_to_fetch,_get_archiver,get_support_dir],_name_to_fetch->[_get_name],get_support_dir->[_get_name]],XZ->[_executable_location->[select]],Script->[hackily_snapshot->[select]]]
Returns a list of subsystem dependencies for this binary.
I might maybe just change this to a `@classproperty` named something like `hardcoded_version` which defaults to `None`? That would mean the zinc native-image factory subsystem could just set `hardcoded_version` instead of having to override `version()`.
@@ -16,11 +16,6 @@ public interface IMessenger { */ void send(Serializable msg, INode to); - /** - * Send a message to all nodes. - */ - void broadcast(Serializable msg); - /** * Listen for messages. */
[No CFG could be retrieved]
broadcast a message to a node.
I'm surprised this method isn't used. How does chat broadcast the messages if not via this method?
@@ -22,7 +22,7 @@ import { UserRouteAccessService } from 'app/core'; import { SettingsComponent } from './settings.component'; export const settingsRoute: Route = { - path: 'settings', + path: 'account/settings', component: SettingsComponent, data: { authorities: ['ROLE_USER'],
[No CFG could be retrieved]
- > Exporting Route.
I think we can move `account` path prefix to `account.route.ts`.
@@ -57,6 +57,7 @@ module Repository # Static method: Creates a new Git repository at # location 'connect_string' def self.create(connect_string) + if GitRepository.repository_exists?(connect_string) raise RepositoryCollision.new( "There is already a repository at #{connect_string}")
[GitRevision->[initialize->[get_repos]],GitRepository->[delete_bulk_permissions->[add_user],expand_path->[expand_path],remove_user->[add_user],add_file->[path_exists_for_latest_revision?],latest_revision_number->[get_revision_number],remove_file->[commit_options,create],create->[commit_options,create],access->[open],make_directory->[make_directory,expand_path],close->[close],get_revision_by_timestamp->[get_revision,get_revision_number,get_repos],add_user->[close],make_file->[commit_options,create,open]]]
Creates a new repository at the given path. If the repository already exists it will be re.
Extra empty line detected at method body beginning.
@@ -1100,7 +1100,11 @@ class TestDependencies(TestBase): enable_dep_inference: bool = False, ) -> None: target = self.request_single_product(WrappedTarget, requested_address).target - args = ["--dependency-inference"] if enable_dep_inference else [] + args = ( + ["--python-infer-imports", "--backend-packages=pants.backend.python"] + if enable_dep_inference + else [] + ) result = self.request_single_product( Addresses, Params(
[TestDependencies->[test_dependency_injection->[assert_injected],test_normal_resolution->[assert_dependencies_resolved],test_dependency_inference->[assert_dependencies_resolved],test_explicit_file_dependencies->[assert_dependencies_resolved]],GraphTest->[test_resolve_generated_subtarget->[MockTarget],test_cycle_direct->[assert_failed_cycle],test_transitive_targets->[get_target],test_cycle_self->[assert_failed_cycle],test_cycle_indirect->[assert_failed_cycle]],TestCodegen->[test_generate_sources->[AvroSources,GenerateSmalltalkFromAvroRequest],test_works_with_subclass_fields->[CustomAvroSources],test_cannot_generate_language->[AvroSources]],generate_smalltalk_from_avro->[generate_fortran],test_validate_explicit_file_dep->[assert_raises],infer_smalltalk_dependencies->[infer],TestFindValidFieldSets->[test_find_valid_field_sets->[InvalidTarget,find_valid_field_sets,FortranTarget]],TestSources->[test_expected_file_extensions->[ExpectedExtensionsSources],test_default_globs->[DefaultSources],test_output_type->[SourcesSubclass],test_expected_num_files->[hydrate]]]
Assert that the specified node dependencies are resolved.
Bump that this is unnecessary. You can delete this line and delete the bool flag `enable_dep_inference`.
@@ -1,7 +1,5 @@ <?php -if (!$os) { - if (strstr($sysObjectId, '.1.3.6.1.4.1.5596.130.6.4.1')) { - $os = 'vcs'; - } +if (str_contains('.1.3.6.1.4.1.5596.130.6.4.1', $sysObjectId)) { + $os = 'vcs'; }
[No CFG could be retrieved]
<?php ethernet_id = 0 ;.
Wrong way round :)
@@ -193,10 +193,13 @@ class ArgTypeExpander: formal_name = (set(actual_type.items.keys()) - self.kwargs_used).pop() self.kwargs_used.add(formal_name) return actual_type.items[formal_name] - elif (isinstance(actual_type, Instance) - and (actual_type.type.fullname == 'builtins.dict')): - # Dict **arg. - # TODO: Handle arbitrary Mapping + elif ( + isinstance(actual_type, Instance) and + len(actual_type.args) > 1 and + is_subtype(actual_type, self.context.mapping_type) + ): + # Only `Mapping` type can be unpacked with `**`. + # Other types will produce an error somewhere else. return actual_type.args[1] else: return AnyType(TypeOfAny.from_error)
[map_formals_to_actuals->[map_actuals_to_formals]]
Expands the actual type of a tuple or typed dict with the given kinds. The unknown type for the object.
Wouldn't this introduce a new bug if the generic args aren't the same as for Mapping? I guess that's #11138.
@@ -46,8 +46,10 @@ func StorageOptions(options configapi.MasterConfig) (restoptions.Getter, error) {Resource: "netnamespaces"}: "registry/sdnnetnamespaces", {Resource: "netnamespaces", Group: "network.openshift.io"}: "registry/sdnnetnamespaces", }, - // storage versions: no overrides anymore - map[schema.GroupResource]schema.GroupVersion{}, + // storage versions: use v1alpha1 instead of v1 for accessrestrictions + map[schema.GroupResource]schema.GroupVersion{ + {Group: "authorization.openshift.io", Resource: "accessrestrictions"}: {Group: "authorization.openshift.io", Version: "v1alpha1"}, + }, // quorum resources: map[schema.GroupResource]struct{}{ {Resource: "oauthauthorizetokens"}: {},
[NewConfigGetter]
The list of resources in the registry that are not part of the network.
This looks wrong. Find the code that is computing wrong and fix taht.
@@ -0,0 +1 @@ +#include "augio.h"
[No CFG could be retrieved]
No Summary Found.
Please rename this file to lowercase.
@@ -209,13 +209,8 @@ public class TestPlanMatchingFramework */ private void assertFails(Runnable runnable) { - try { - runnable.run(); - fail("Plans should not have matched!"); - } - catch (AssertionError e) { - //ignored - } + assertThatThrownBy(runnable::run) + .isInstanceOf(AssertionError.class); } @Test
[TestPlanMatchingFramework->[testStrictOutputExtraSymbols->[assertFails],testStrictTableScanExtraSymbols->[assertFails],testStrictProjectExtraSymbols->[assertFails]]]
Assert that the given runnable does not match any of the conditions in the plan.
message starting with can be validated
@@ -137,8 +137,8 @@ module Api unless params[:submission_rule_type].nil? submission_rule = get_submission_rule(params) if submission_rule.nil? - render 'shared/http_status', :locals => {:code => '500', :message => - HttpStatusHelper::ERROR_CODE['message']['500']}, :status => 500 + render 'shared/http_status', locals: {code: '500', message: + HttpStatusHelper::ERROR_CODE['message']['500']}, status: 500 return else # If it's a valid submission rule, replace the existing one
[AssignmentsController->[create->[nil?,render,new,process_attributes,submission_rule,has_missing_params?,get_submission_rule,find_by_short_identifier,build_assignment_stat,save],show->[nil?,xml,render,respond_to,to_json,fields_to_render,json,to_xml,find_by_id],process_attributes->[nil?,new,post?,each,delete],update->[nil?,render,attributes,process_attributes,submission_rule,get_submission_rule,find_by_short_identifier,blank?,valid?,destroy,save,find_by_id],get_submission_rule->[new,periods],index->[xml,render,respond_to,to_json,fields_to_render,json,to_xml,get_collection]]]
Updates a single node in the system.
Align the elements of a hash literal if they span more than one line.<br>Space inside } missing.
@@ -311,6 +311,7 @@ func (p *processMetadata) toMap() common.MapStr { "pid": p.pid, "ppid": p.ppid, "start_time": p.startTime, + "username": p.username, }, } }
[enrich->[Wrapf,HasKey,Put,New,getContainerID,GetValue,Clone,Errorf,GetProcessMetadata,Debugf],Close->[StopJanitor],getContainerID->[GetCid],String->[Sprintf],Run->[Wrapf,enrich],With,Wrapf,Unpack,Int,NewLogger,New,ValueOf,Wrap,StartJanitor,Uint,Debugf,RegisterPlugin,Errorf,Inc,NewCacheWithRemovalListener,getMappings,Atoi,Debug]
toMap returns a MapStr representation of the process metadata.
I suggest not to set this field when p.username is the empty string.
@@ -1,11 +1,11 @@ class CheckinsController < ApplicationController permits :comment, :shared_twitter, :shared_facebook - before_filter :authenticate_user!, only: [:new, :create, :create_all, :edit, :update, :destroy] - before_filter :set_work, only: [:new, :create, :create_all, :show, :edit, :update, :destroy] - before_filter :set_episode, only: [:new, :create, :show, :edit, :update, :destroy] - before_filter :set_checkin, only: [:show, :edit, :update, :destroy] - before_filter :redirect_to_top, only: [:edit, :update, :destroy] + before_action :authenticate_user!, only: [:new, :create, :create_all, :edit, :update, :destroy] + before_action :set_work, only: [:new, :create, :create_all, :show, :edit, :update, :destroy] + before_action :set_episode, only: [:new, :create, :show, :edit, :update, :destroy] + before_action :set_checkin, only: [:show, :edit, :update, :destroy] + before_action :redirect_to_top, only: [:edit, :update, :destroy] def new
[CheckinsController->[destroy->[destroy],show->[new],create->[new],create_all->[create],new->[new]]]
Creates a new object.
Unnecessary spacing detected.
@@ -29,4 +29,15 @@ public class CommonTypeConverters { }; } + /** + * @return a converter that transforms an string to a enum. + */ + public static TypeConverter<String, Enum> stringToEnumConverter(Class<? extends Enum> enumType) { + checkArgument(enumType != null, "enumType cannot be null"); + return enumAsString -> { + checkArgument(enumAsString != null, "enumAsString cannot be null"); + return Enum.valueOf(enumType, enumAsString); + }; + } + }
[CommonTypeConverters->[stringToClassConverter->[RuntimeException,loadClass]]]
A TypeConverter that converts a String to a Class.
an string to a enum. > a String into an Enum
@@ -35,10 +35,10 @@ type tsoRequest struct { logical int64 } -type regionRequest struct { - key []byte +type metaRequest struct { + pbReq *pdpb.GetMetaRequest done chan error - region *metapb.Region + pbResp *pdpb.GetMetaResponse } type rpcWorker struct {
[stop->[Wait],work->[NewWriterSize,NewReaderSize,Infof,NewDeadlineWriter,NewReadWriter,NewDeadlineReader,Warnf,DialTimeout,Close,handleRequests,After,Done],getRegionFromRemote->[GetRegion,Flush,Uint64,WriteMessage,New,NewV4,Enum,ReadMessage,Errorf,Bytes,GetGetMeta],handleRequests->[Error,GetPhysical,GetLogical,getRegionFromRemote,Errorf,getTSFromRemote],getTSFromRemote->[Flush,Uint64,GetTimestamps,WriteMessage,New,NewV4,Enum,ReadMessage,GetTso,Errorf,Bytes,Uint32],AddUint64,work,Add]
pd import imports a package containing the functions related to the protocol. work is the main loop for the rpcWorker. It is run in a goroutine. It.
I think It is strange to put a response variable member in a struct called XXRequest. Any suggestion? @siddontang
@@ -58,4 +58,8 @@ public final class TopologyTestDriverContainer { public Topic getSinkTopic() { return sinkTopic; } + + public Set<String> getSourceTopicNames() { + return sourceTopics.stream().map(Topic::getName).collect(Collectors.toSet()); + } } \ No newline at end of file
[TopologyTestDriverContainer->[of->[TopologyTestDriverContainer]]]
Returns the sink topic.
nit: do this in the constructor to only create the set once instead of every time we call `getSourceTopicNames`
@@ -72,12 +72,11 @@ public class BattleCalculatorTest { final Collection<Unit> planes = bomber(data).create(5, british(data)); final Collection<Unit> defendingAa = territory("Germany", data).getUnits().getMatches(Matches.unitIsAaForAnything()); - final ScriptedRandomSource randomSource = new ScriptedRandomSource(0, ScriptedRandomSource.ERROR); - bridge.setRandomSource(randomSource); + whenGetRandom(bridge).thenReturn(new int[] {0}); final Collection<Unit> casualties = BattleCalculator.getAaCasualties(false, planes, planes, defendingAa, defendingAa, roll, bridge, null, null, null, territory("Germany", data), null, false, null).getKilled(); assertEquals(1, casualties.size()); - assertEquals(1, randomSource.getTotalRolled()); + thenGetRandomShouldHaveBeenCalled(bridge, times(1)); } @Test
[BattleCalculatorTest->[testAaCasualtiesLowLuckMixedWithChooseAaCasualtiesRoll->[givenRemotePlayerWillSelectStrategicBombersForCasualties],testAaCasualtiesLowLuckMixedWithChooseAaCasualties->[givenRemotePlayerWillSelectStrategicBombersForCasualties]]]
Test if a casualties are low - luck. a roll bridge bridge.
For this assertion instead of an IllegalStateException
@@ -1,10 +1,10 @@ class EmailSubscriptionsController < ApplicationController PREFERRED_EMAIL_NAME = { - email_digest_periodic: "#{ApplicationConfig['COMMUNITY_NAME']} digest emails", + email_digest_periodic: "#{SiteConfig.community_name} digest emails", email_comment_notifications: "comment notifications", email_follower_notifications: "follower notifications", email_mention_notifications: "mention notifications", - email_connect_messages: "#{ApplicationConfig['COMMUNITY_NAME']} connect messages", + email_connect_messages: "#{SiteConfig.community_name} connect messages", email_unread_notifications: "unread notifications", email_badge_notifications: "badge notifications" }.freeze
[EmailSubscriptionsController->[unsubscribe->[fetch,verify,find,render,update,current],freeze]]
Unsubscribe a user if it is expired.
I just did find and replace, but I suspect this may cause a problem in terms of `SiteConfig` being available yet here in this context, not entirely sure. Will check back on this tomorrow.
@@ -385,6 +385,12 @@ public class Reviewer extends AbstractFlashcardViewer { return true; } } + if (keyCode == KeyEvent.KEYCODE_BUTTON_Y || keyCode == KeyEvent.KEYCODE_BUTTON_X + || keyCode == KeyEvent.KEYCODE_BUTTON_B || keyCode == KeyEvent.KEYCODE_BUTTON_A) + { + displayCardAnswer(); + return true; + } if (keyPressed == 'e') { editCard(); return true;
[Reviewer->[SuspendProvider->[hasSubMenu->[dismissNoteAvailable]],onStop->[onStop],initControls->[initControls],setTitle->[setTitle],onActivityResult->[onActivityResult],onCollectionLoaded->[onCollectionLoaded],onKeyUp->[onKeyUp],BuryProvider->[hasSubMenu->[dismissNoteAvailable]],onOptionsItemSelected->[onOptionsItemSelected],onCreate->[onCreate],displayCardQuestion->[displayCardQuestion],onWindowFocusChanged->[onWindowFocusChanged],fillFlashcard->[fillFlashcard],onCreateOptionsMenu->[onCreateOptionsMenu,setTitle,setCustomButtons],restorePreferences->[restorePreferences]]]
This method is called when the user presses the key up.
this should only happen if `sDisplayAnswer` is False right?
@@ -195,6 +195,16 @@ class CompilationUnit(object): len(buffer), None) self.driver.check_error(err, 'Failed to add module') + def lazy_add_module(self, buffer): + """ + Lazily add an NVVM IR module to a compilation unit. + The buffer should contain NVVM module IR either in the bitcode + representation or in the text representation. + """ + err = self.driver.nvvmLazyAddModuleToProgram(self._handle, buffer, + len(buffer), None) + self.driver.check_error(err, 'Failed to add module') + def compile(self, **options): """Perform Compilation
[llvm_to_ptx->[LibDevice,llvm_replace,CompilationUnit,add_module,get,compile],set_cuda_kernel->[get],NVVM->[__new__->[__new__]],llvm100_to_34_ir->[parse_out_leading_type],ir_numba_atomic_minmax->[ir_cas],find_closest_arch->[get_supported_ccs],get_arch_option->[find_closest_arch],ir_numba_atomic_binary->[ir_cas],ir_cas->[NVVM],ir_numba_atomic_dec->[ir_cas],CompilationUnit->[get_log->[check_error],add_module->[check_error],__init__->[NVVM,check_error],__del__->[NVVM,check_error],_try_error->[check_error]],llvm_replace->[NVVM,ir_numba_atomic_minmax,_replace_datalayout,ir_numba_atomic_dec,ir_numba_atomic_binary,ir_numba_atomic_inc],get_supported_ccs->[get_version],ir_numba_atomic_inc->[ir_cas],add_ir_version->[NVVM],compile]
Add a module level NVVM IR to the program. read - read .
Is there benefit in exposing the `name` parameter? May be for debug?
@@ -735,6 +735,12 @@ type ScheduleConfig struct { // is overwritten, the value is fixed until it is deleted. // Default: manual StoreLimitMode string `toml:"store-limit-mode" json:"store-limit-mode"` + + // Controls the time interval between write hot regions info into leveldb. + HotRegionsWriteInterval typeutil.Duration `toml:"hot-regions-write-interval" json:"hot-regions-write-interval"` + + // The day of hot regions data to be reserved.0 means close + HotRegionsResevervedDays int64 `toml:"hot-regions-reserved-days" json:"hot-regions-reserved-days"` } // Clone returns a cloned scheduling configuration.
[MigrateDeprecatedFlags->[migrateConfigurationMap],Parse->[Parse],RewriteFile->[GetConfigFile],IsDefined->[IsDefined],migrateConfigurationFromFile->[IsDefined],Adjust->[CheckUndecoded,Parse,IsDefined,Validate,Adjust,Child],parseDeprecatedFlag->[IsDefined],adjustLog->[IsDefined],adjust->[Child,Validate,adjust,IsDefined],Parse]
Clone clones the schedule config.
Why not use `typeutil.Duration`?
@@ -463,7 +463,7 @@ class Jetpack_Plugin_Search { $links[] = '<a id="plugin-select-settings" class="jetpack-plugin-search__primary button jetpack-plugin-search__configure" - href="' . esc_url( $plugin['configure_url'] ) . '" + href="' . $this->get_configure_url( $plugin['module'], $plugin['configure_url'] ) . '" data-module="' . esc_attr( $plugin['module'] ) . '" data-track="configure" >' . esc_html__( 'Configure', 'jetpack' ) . '</a>';
[Jetpack_Plugin_Search->[inject_jetpack_module_suggestion->[get_extra_features,is_not_dismissed],insert_module_related_links->[get_configure_url,get_upgrade_url],is_not_dismissed->[get_dismissed_hints]]]
Adds link to the module related menu This function is called by Jetpack when a module is activated. Link to the learn more link.
Not a huge fan of escaping within this function instead of right here on display as it could introduce an issue if `get_configure_url` is refactored in the future and we don't patch up the escaping right, but since it is escaped I don't want to block on that.
@@ -63,6 +63,18 @@ class ObjectsModel(ListModel): is_outside_build_area = node.isOutsideBuildArea() else: is_outside_build_area = False + + #check if we already have an instance of the object based on name + duplicate = False + for n in namecount: + if name == n["name"]: + name = "{0}({1})".format(name, n["count"]) + node.setName(name) + n["count"] = n["count"]+1 + duplicate = True + + if not duplicate: + namecount.append({"name" : name, "count" : 1}) nodes.append({ "name": name,
[ObjectsModel->[createObjectsModel->[ObjectsModel]]]
Updates the nodes in the scene.
Some mixed tabs and spaces here. Could you turn that into spaces?
@@ -292,7 +292,6 @@ class ShellCommandTimeout(IntField): class ShellCommandToolsField(StringSequenceField): alias = "tools" - required = True help = ( "Specify required executable tools that might be used.\n\n" "Only the tools explicitly provided will be available on the search PATH, "
[ShellSourcesGeneratorTarget->[git_url],ShellSourcesGeneratingSourcesField->[tuple],generate_targets_from_shunit2_tests->[Get,SourcesPathsRequest,generate_file_level_targets],generate_targets_from_shell_sources->[Get,SourcesPathsRequest,generate_file_level_targets],Shunit2Shell->[binary_path_test->[match,BinaryPathTest],parse_shebang->[group,cls,decode,splitlines,match]],Shunit2TestTimeoutField->[compute_value->[InvalidFieldException,super]],ShellCommand->[dedent],rules->[collect_rules,UnionRule],ShellCommandTimeout->[compute_value->[InvalidFieldException,super]]]
Shell command to execute. ShellCommandTimeout - Timeout for the command execution of any external tool for its side effects.
Made the `tools` field optional, as when invoked with `./pants run`, we have the users `PATH` already... It will still throw if missing in the code gen case, though.
@@ -314,6 +314,18 @@ class Evoked(object): plot_evoked(self, picks=picks, unit=unit, show=show, ylim=ylim, proj=proj, xlim=xlim) + def to_nitime(self, picks=None): + """ Export Evoded object to NiTime + """ + from nitime import TimeSeries + + if picks is None: + picks = np.arange(self.data.shape[0]) + + evoked_ts = TimeSeries(self.data[picks], sampling_rate=self.info['sfreq']) + + return evoked_ts + def resample(self, sfreq, npad=100, window='boxcar'): """Resample preloaded data
[Evoked->[resample->[resample]],read_evoked->[Evoked]]
Plot evoked data with optional limits.
by doing this "self.data[picks]" you do a copy even if picks is None. Is that on purpose?
@@ -394,7 +394,13 @@ func (o *LoginOptions) SaveConfig() (bool, error) { } if err := kclientcmd.ModifyConfig(o.PathOptions, *configToWrite, true); err != nil { - return false, err + if !os.IsPermission(err) { + return false, err + } + + out := &bytes.Buffer{} + cmderr.PrintError(errors.ErrKubeConfigNotWriteable(o.PathOptions.GetDefaultFilename(), o.PathOptions.IsExplicitFile(), err), out) + return false, fmt.Errorf("%v", out) } created := false
[GatherInfo->[gatherAuthInfo,gatherProjectInfo],gatherAuthInfo->[ClientConfig,Has,Fprintf,NewDefaultClientConfig,RequestToken,Fprint,tokenProvided,New,IsUnauthorized,getClientConfig,Errorf,usernameProvided,passwordProvided],whoAmI->[New],SaveConfig->[IsNotExist,Dir,ModifyConfig,Stat,Errorf,MakeAbs,Getwd,RelativizeClientConfigPaths,GetDefaultFilename,MergeConfig,CreateConfig],getClientConfig->[PromptForBool,PromptForStringWithDefault,Fprintln,Error,IsEmpty,IsCertificateAuthorityUnknown,Sprintf,NormalizeServerURL,Do,New,serverProvided,AbsPath,IsTerminal,Errorf,Get,GetPrettyMessageFor],gatherProjectInfo->[Has,Fprintf,Fprintln,Projects,whoAmI,New,List,IsForbidden,Insert,Errorf,IsNotFound,Get],Get,Compare,Insert,Users]
SaveConfig saves the config to disk.
Is the only error that could come back from this a file permission error
@@ -59,7 +59,7 @@ module GobiertoBudgets def total_budget_per_inhabitant(requested_year = year, kind = GobiertoBudgets::BudgetLine::EXPENSE) amount = total_budget_updated(fallback: true).to_f - population = population(requested_year) || population(requested_year - 1) || population(requested_year - 2) + population = population(requested_year) || population(requested_year - 1) || population(requested_year - 2) || population(requested_year - 3) || population(requested_year - 4) (amount / population).to_f end
[SiteStats->[latest_available->[has_data?],total_budget_executed_percentage->[total_budget_updated,total_budget_executed],debt_level->[debt],total_income_budget->[total_budget],income_execution_percentage->[total_income_budget_updated,total_income_budget_executed],total_income_budget_executed->[total_budget_executed],budgets_execution_summary->[total_budget_executed_percentage],total_income_budget_updated->[total_budget_updated],has_available?->[has_data?]]]
Returns the total budget per inhabitant for the specified year.
Line is too long. [181/180]
@@ -295,6 +295,11 @@ const EXPERIMENTS = [ id: 'amp-ima-video', name: 'IMA-integrated Video Player', }, + { + id: 'a4a-measure-get-ad-urls', + name: 'DoubleClick Fast Fetch measure delay from after first slot ad url ' + + 'generation to last slot as precursor for SRA support', + }, ]; if (getMode().localDev) {
[No CFG could be retrieved]
Construct the expriments tbale. Builds the row of the experiments table.
Nit: I would add a : after "Fast Fetch".
@@ -313,8 +313,9 @@ class DomainLanguage: return_type = signature.return_annotation # TODO(mattg): this might need to just call PredicateType.get_type, or something - what if # one of these is a function? - argument_nltk_types: List[PredicateType] = [BasicType(arg_type) for arg_type in argument_types] - return_nltk_type = BasicType(return_type) + argument_nltk_types: List[PredicateType] = [PredicateType.get_type(arg_type) + for arg_type in argument_types] + return_nltk_type = PredicateType.get_type(return_type) function_nltk_type = PredicateType.get_function_type(argument_nltk_types, return_nltk_type) self._functions[name] = function self._function_types[name] = function_nltk_type
[nltk_tree_to_logical_form->[nltk_tree_to_logical_form],DomainLanguage->[_get_function_transitions->[_get_transitions,ParsingError],_construct_node_from_actions->[_construct_node_from_actions,ParsingError],add_predicate->[BasicType,get_function_type],__init__->[BasicType],_execute_expression->[ExecutionError,_execute_expression],action_sequence_to_logical_form->[nltk_tree_to_logical_form,ParsingError],_get_transitions->[_get_transitions,ParsingError],add_constant->[BasicType],logical_form_to_action_sequence->[ParsingError]]]
Adds a predicate to this domain language.
This TODO is done, right? The code below calls ``PredicateType.get_type``
@@ -23,6 +23,8 @@ def shape_usecase(x): def npyufunc_usecase(x): return np.cos(np.sin(x)) +def astype_usecase(x, dtype): + return x.astype(dtype) def identity(x): return x
[npyufunc_usecase->[cos,sin],TestInterface->[test_interface->[assertTrue,all,arange,SmartArray,assertIsInstance,ones]],print_usecase->[print],len_usecase->[len],TestJIT->[test_identity->[arange,SmartArray,jit,assertIs,cfunc],test_getitem->[list,assertEqual,SmartArray,jit,slice,assertIsInstance,assertPreciseEqual,int32,cfunc],test_len->[arange,SmartArray,jit,assertPreciseEqual,cfunc],test_shape->[arange,SmartArray,jit,assertPreciseEqual,cfunc],test_ufunc->[get,SmartArray,jit,assertIsInstance,sin,assertPreciseEqual,int32,cos,cfunc]],main]
Use the NPYU function to compute the NPYU function.
Is this dead code?
@@ -88,7 +88,10 @@ class SGD(object): event_handler = default_event_handler __check_train_args__(**locals()) - updater = self.__optimizer__.create_local_updater() + if self.__is_local__: + updater = self.__optimizer__.create_local_updater() + else: + updater = self.__optimizer__.create_remote_updater(num_passes) updater.init(self.__gradient_machine__) self.__gradient_machine__.start()
[__check_train_args__->[callable,TypeError,isinstance,reader],SGD->[train->[start,sum,finishPass,eval,finish,createArguments,getNonStaticParameters,xrange,create_local_updater,enumerate,BeginIteration,event_handler,init,EndIteration,locals,startPass,update,len,feeder,isinstance,BeginPass,startBatch,__check_train_args__,makeEvaluator,reader,forwardBackward,finishBatch,DataFeeder,EndPass],__init__->[TypeError,randParameters,proto,Topology,isinstance,createFromConfigProto,enable_types,append_gradient_machine,data_type],test->[makeEvaluator,forward,reader,start,len,feeder,DataFeeder,sum,eval,finish,createArguments,TestResult]]]
Train the neural network using a reader. Finishes all the events in the sequence.
if else optimizer train()~
@@ -14,7 +14,7 @@ class SiteConfig < RailsSettings::Base field :social_networks_handle, type: :string, default: "thepracticaldev" # images - field :main_social_image, type: :string, default: "https://thepracticaldev.s3.amazonaws.com/i/6hqmcjaxbgbon8ydw93z.png" + field :main_social_image, type: :string, default: "https://#{ApplicationConfig['AWS_BUCKET_NAME']}.s3.amazonaws.com/i/6hqmcjaxbgbon8ydw93z.png" field :favicon_url, type: :string, default: "favicon.ico" field :logo_svg, type: :string, default: ""
[SiteConfig->[cache_prefix,field,table_name]]
Site configuration based on Rails settings Get a list of all tags in the system.
he're we're definitely breaking the main social image for a pristine installation
@@ -166,6 +166,10 @@ public class KafkaEmitter implements Emitter if (!alertQueue.offer(objectContainer)) { alertLost.incrementAndGet(); } + } else if (event instanceof RequestLogEvent && config.getRequestTopic() != null) { + if (!requestQueue.offer(objectContainer)) { + requestLost.incrementAndGet(); + } } else { invalidLost.incrementAndGet(); }
[KafkaEmitter->[sendAlertToKafka->[setProducerCallback],close->[close],flush->[flush],sendMetricToKafka->[setProducerCallback]]]
emit an event which can be null if there is no event in the queue.
one question, why not still use `requestLost` as the counter for lost events due to a null topic name instead of putting them in the bucket of `invalid` events which seems to previously have been there just for cases where the object isn't a supported event type?
@@ -44,6 +44,7 @@ #include <platform/timer.h> #include <platform/platform.h> #include <platform/clk.h> +#include <sof/drivers/timer.h> #define trace_sa(__e) trace_event_atomic(TRACE_CLASS_SA, __e) #define trace_sa_value(__e) trace_value_atomic(__e)
[sa_init->[rzalloc,work_init,clock_ms_to_ticks,trace_sa_value,platform_timer_get,trace_sa,work_schedule_default],uint64_t->[trace_sa,trace_sa_value,platform_timer_get,panic],sa_enter_idle->[platform_timer_get]]
PUBLIC FUNCTIONS ARE DEVELOPING IN THE SYSTEM OF THE SOFTWARE AND - - - - - - - - - - - - - - - - - -.
> agent.c:58:18: error: implicit declaration of function 'platform_timer_get'; did you mean 'platform_init'? [-Werror=implicit-function-declaration] > sa->last_idle = platform_timer_get(platform_timer); platform_timer_get is defined in sof/drivers/timer.h
@@ -9,12 +9,12 @@ import org.ray.api.id.UniqueId; public interface RuntimeContext { /** - * Get the current Driver ID. + * Get the current Job ID. * - * If called in a driver, this returns the driver ID. If called in a worker, this returns the ID - * of the associated driver. + * If called in a driver, this returns the job ID of this driver. If called in a worker, + * this returns the ID of the associated job. */ - UniqueId getCurrentDriverId(); + UniqueId getCurrentJobId(); /** * Get the current actor ID.
[No CFG could be retrieved]
Returns the unique identifier for the current driver.
This explanation doesn't seem necessary now. As the concept of "job id" is the same to both drivers and workers.
@@ -188,9 +188,9 @@ class ParticleSolver: # # -def CreateSolver(model_part1, model_part2, new_element, config, geometry_element, num_par): +def CreateSolver(model_part1, model_part2, model_part3, new_element, config, geometry_element, number_particle): - structural_solver = ParticleSolver(model_part1, model_part2,new_element, config.domain_size,geometry_element, num_par) + structural_solver = ParticleSolver(model_part1, model_part2, model_part3, new_element, config.domain_size,geometry_element, number_particle) #Explicit scheme parameters if(hasattr(config, "max_delta_time")):
[CreateSolver->[DefineBufferSize,ParticleSolver],ParticleSolver->[Check->[Check],SetRestart->[Initialize]]]
Creates a ParticleSolver object. This function is used to set the values of the structure structures that are used by the struct Construct a new particle solver object.
my advice would be to give a more meaningful name to the different model parts. not a blocker...but it would save your time later on
@@ -132,10 +132,6 @@ describe('Popover', () => { onPopperUpdate: wrapperInstance.onPopperUpdate, placement: wrapper.state().placement, }); - expect(tetherProps.anchorRef).toBe(wrapperInstance.anchorRef.current); - expect(tetherProps.arrowRef).toBe(wrapperInstance.arrowRef.current); - expect(tetherProps.popperRef).toBe(wrapperInstance.popperRef.current); - // // Manually emit a popper update (normally popper does this by itself) const offsets = { popper: {top: 10, left: 10},
[No CFG could be retrieved]
Updates the popover with the given id and content. expects the popover body to have a property with placement leftTop and arrow offset.
We should keep these tests to make sure it functions as expected.
@@ -246,6 +246,7 @@ class BalanceProof(object): lock_encoded = bytes(lock.as_bytes) lock_hash = sha3(lock_encoded) merkle_proof = [lock_hash] + # Why is the return value not used here? merkleroot(merkletree, merkle_proof) return UnlockProof(
[ChannelExternalState->[unlock->[unlock],update_transfer->[update_transfer],settle->[settle]],BalanceProof->[register_direct_transfer->[merkleroot_for_unclaimed],register_secret->[is_pending],register_locked_transfer->[unclaimed_merkletree,is_known],claim_lock_by_secret->[is_unclaimed,is_pending]],Channel->[create_timeouttransfer_for->[is_pending],channel_closed->[get_known_unlocks,unlock,update_transfer],blockalarm_for_settle->[_settle->[settle]],register_transfer_from_to->[is_pending,compute_merkleroot_with,unclaimed_merkletree,InvalidLocksRoot,merkleroot_for_unclaimed,InvalidNonce,register_locked_transfer,distributable,register_direct_transfer,InsufficientBalance],claim_lock->[get_lock_by_hashlock,claim_lock,is_known],balance->[balance],isopen->[isopen],__init__->[callback_on_settled,callback_on_closed,callback_on_opened],create_refundtransfer_for->[create_lockedtransfer,is_pending],register_secret->[register_secret,get_lock_by_hashlock,is_known],distributable->[distributable],create_lockedtransfer->[compute_merkleroot_with,distributable],create_mediatedtransfer->[create_lockedtransfer],locked->[locked],create_directtransfer->[merkleroot_for_unclaimed,distributable],outstanding->[locked]],ChannelEndState->[claim_lock->[claim_lock_by_secret],compute_merkleroot_with->[unclaimed_merkletree],register_locked_transfer->[register_locked_transfer],__init__->[BalanceProof],register_secret->[register_secret],distributable->[locked,balance],register_direct_transfer->[register_direct_transfer],locked->[locked]]]
Compute the unlock proof for a given lock.
Merkleroot is a destructive function that changes the `merkle_proof` in place, so the function is being executed just for the side effects in the list.
@@ -131,10 +131,18 @@ class Netcdf(Package): LDFLAGS.append("-L%s/lib" % spec['szip'].prefix) LIBS.append("-l%s" % "sz") + # PnetCDF support + if '+parallel-netcdf' in spec: + config_args.append('--enable-pnetcdf') + config_args.append('CC=%s' % spec['mpi'].mpicc) + CPPFLAGS.append("-I%s/include" % spec['parallel-netcdf'].prefix) + LDFLAGS.append("-L%s/lib" % spec['parallel-netcdf'].prefix) + # Fortran support # In version 4.2+, NetCDF-C and NetCDF-Fortran have split. # Use the netcdf-fortran package to install Fortran support. + config_args.append('CFLAGS=%s' % ' '.join(CFLAGS)) config_args.append('CPPFLAGS=%s' % ' '.join(CPPFLAGS)) config_args.append('LDFLAGS=%s' % ' '.join(LDFLAGS)) config_args.append('LIBS=%s' % ' '.join(LIBS))
[Netcdf->[install->[satisfies,append,join,configure,RuntimeError,make],patch->[join_path,TypeError,int,filter,format,FileFilter],variant,depends_on,version]]
Installs a new package with the given specification. This function is called to configure the CPPFLAGS LDFLAGS and LIBS for.
What happens when someone tries to build `netcdf+parallel-netcdf~mpi`?
@@ -117,12 +117,15 @@ export class AmpStoryPlayer { /** @private {!IframePool} */ this.iframePool_ = new IframePool(); - /** @private {!Object<string, !Promise>} */ + /** @private {!Object<number, !Promise>} */ this.messagingPromises_ = map(); /** @private {number} */ this.currentIdx_ = 0; + /** @private {boolean} */ + this.isAutoplaying_ = !!this.element_.attributes.autoplay; + /** @private {!SwipingState} */ this.swipingState_ = SwipingState.NOT_SWIPING;
[No CFG could be retrieved]
A class that exports a single AMPStoryPlayer. External callback for all of the elements that are needed to build the player.
Let's use `this.element_.getAttribute()` to be consistent with the codebase.
@@ -199,7 +199,7 @@ public abstract class AbstractPipeline extends AbstractFlowConstruct implements if (messageSource != null) { // Wrap chain to decouple lifecycle - messageSource.setListener(new AbstractInterceptingMessageProcessor() + messageSource.setListener(new MessageProcessor() { @Override public MuleEvent process(MuleEvent event) throws MuleException
[AbstractPipeline->[configureMessageProcessors->[getName->[getName],getMessageProcessors],validateConstruct->[setProcessingStrategy,validateConstruct],isMessageSourceCompatibleWithAsync->[isMessageSourceCompatibleWithAsync],addMessageProcessorPathElements->[getMessageProcessors,addMessageProcessorPathElements],doStop->[doStop],doDispose->[doDispose],doInitialise->[process->[process],createPipeline,doInitialise],createFlowMap->[getName],ProcessIfPipelineStartedMessageProcessor->[handleUnaccepted->[getName]],doStart->[doStart]]]
Initialises the MulePipeline.
Why was this change required?
@@ -236,6 +236,14 @@ public abstract class AbstractInboundFileSynchronizer<F> F[] files = session.list(remoteDirectory); if (!ObjectUtils.isEmpty(files)) { List<F> filteredFiles = filterFiles(files); + if (maxFetchSize >= 0 && filteredFiles.size() > maxFetchSize) { + rollbackFromFileToListEnd(filteredFiles, filteredFiles.get(maxFetchSize)); + List<F> newList = new ArrayList<>(maxFetchSize); + for (int i = 0; i < maxFetchSize; i++) { + newList.add(filteredFiles.get(0)); + } + filteredFiles = newList; + } for (F file : filteredFiles) { try { if (file != null) {
[AbstractInboundFileSynchronizer->[filterFiles->[filterFiles],copyFileToLocalDirectory->[close],close->[close],synchronizeToLocalDirectory->[doInSession->[filterFiles]]]]
Synchronize remote files to local directory.
`get(i)`? Otherwise we deal only with the first item.
@@ -741,6 +741,10 @@ export class AmpAdNetworkDoubleclickImpl extends AmpA4A { return { PAGEVIEWID: () => Services.documentInfoForDoc(this.element).pageViewId, HREF: () => this.win.location.href, + TGT: () => { + const json = tryParseJson(this.element.getAttribute('data-json')); + return json ? json['targeting'] : ''; + }, ATTR: name => { if (!whitelist[name.toLowerCase()]) { dev().warn('TAG', `Invalid attribute ${name}`);
[AmpAdNetworkDoubleclickImpl->[postTroubleshootMessage->[dev,dict,stringify,now,userAgent],extractSize->[height,extractAmpAnalyticsConfig,get,Number,setGoogleLifecycleVarsFromHeaders,width],getBlockParameters_->[serializeTargeting_,dev,user,isInManualExperiment,Number,assign,join,googleBlockParameters,getMultiSizeDimensions,map],constructor->[resolver,experimentFeatureEnabled,extensionsFor,rejector,getMode,promise,SRA],tearDownSlot->[promise,rejector,removeElement,resolver],shouldPreferentialRenderWithoutCrypto->[dev,experimentFeatureEnabled,CANONICAL_HLDBK_EXP,isCdnProxy],connectFluidMessagingChannel->[dev,stringify,dict],initLifecycleReporter->[googleLifecycleReporterFactory],rewriteRtcKeys_->[keys],onCreativeRender->[customElementExtensions,height,dev,user,addCsiSignalsToAmpAnalyticsConfig,getRefreshManager,installAnchorClickInterceptor,insertAnalyticsElement,getEnclosingContainerTypes,isReportingEnabled,setStyles,width],getCustomRealTimeConfigMacros_->[toLowerCase,documentInfoForDoc,dev],generateAdKey_->[getAttribute,domFingerprintPlain,stringHash32],buildCallback->[getExperimentBranch,getIdentityToken,dev,getVisibilityState,PAUSED,addExperimentIdToElement,onVisibilityChanged,viewerForDoc,EXPERIMENT,randomlySelectUnsetExperiments],receiveMessageForFluid_->[parseInt],populateAdUrlState->[tryParseJson,Number],registerListenerForFluid_->[keys],getSlotSize->[Number],layoutCallback->[is3pThrottled,registerFluidAndExec,timerFor,throttleFn],onFluidResize_->[dev,stringify,dict],getLocationQueryParameterValue->[parseQueryString],fireDelayedImpressions->[split,dev,dict,isSecureUrl,createElementWithAttributes],getA4aAnalyticsConfig->[getCsiAmpAnalyticsConfig],isLayoutSupported->[FLUID,isLayoutSizeDefined],getAdUrl->[resolve,timerFor,all,googleAdUrl,dev,now,assign],groupSlotsForSra->[groupAmpAdsByType],maybeRemoveListenerForFluid->[keys],onNetworkFailure->[dev,maybeAppendErrorParameter],idleRenderOutsideViewport->[isNaN,parseInt],getPreconnectUrls->[push],delayAdRequestEnabled->[experimentFeatureEnabled,DELAYED_REQUEST],getA4aAnalyticsVars->[getCsiAmpAnalyticsVariables],unlayoutCallback->[dev],mergeRtcResponses_->[TIMEOUT,error,RTC_SUCCESS,NETWORK_FAILURE,callout,deepMerge,keys,MALFORMED_JSON_RESPONSE,response,RTC_FAILURE,join,forEach,rtcTime,push],getAdditionalContextMetadata->[dict,stringify],initiateSraRequests->[all,dev,shift,lineDelimitedStreamer,attemptCollapse,SAFEFRAME,map,metaJsonCreativeGrouper,hasAdPromise,resetAdUrl,element,length,isCancellation,sraResponseRejector,keys,xhrFor,checkStillCurrent,assignAdUrlToError,sraResponseResolver,constructSRARequest_,forEach,utf8Encode],getNonAmpCreativeRenderingMethod->[SAFEFRAME]],origin,dev,isInManualExperiment,join,encodeURIComponent,map,isArray,googlePageParameters,registerElement,initialSize_,tryParseJson,getAttribute,extension,truncAndTimeUrl,instance,adKey_,buildIdentityParams_,getData,constructSRABlockParameters,now,assign,connectionEstablished,element,length,serializeItem_,push,split,serializeTargeting_,keys,getFirstInstanceValue_,jsonTargeting_,extractFn,forEach,combiner]
Get the custom real - time config macros for the element.
TGT: () => (tryParseJson(this.element.getAttribute('data-json')) || {})['targeting'] || '',
@@ -249,6 +249,17 @@ class TestPrivacy(TestCase): assert pq(r.content)('.meta .view-stats').length == 0, ( 'Only add-on authors can view stats') + def test_contributer(self): + with override_settings(COLLECTION_FEATURED_THEMES_ID=self.c.id): + self.c.listed = False + self.c.save() + self.assertLoginRedirects(self.client.get(self.url), self.url) + user = UserProfile.objects.get(email='fligtar@gmail.com') + self.grant_permission(user, 'Collections:Contribute') + self.client.login(email='fligtar@gmail.com') + response = self.client.get(self.url) + assert response.status_code == 200 + class TestVotes(TestCase): fixtures = ['users/test_backends']
[TestCollectionViewSetPatch->[test_different_account->[get_url,send],test_admin_patch->[check_data,get_url,send],test_basic_patch->[check_data,send]],TestCollectionAddonViewSetDetail->[test_with_slug->[test_basic],test_deleted->[test_basic]],TestCollectionAddonViewSetPatch->[test_basic->[check_response,send],test_deleted->[test_basic],test_cant_change_addon->[check_response,send]],TestCollectionAddonViewSetList->[test_sorting->[check_result_order],test_with_deleted_or_with_hidden->[send]],TestCollectionAddonViewSetDelete->[test_basic->[check_response,send],test_deleted->[test_basic]],TestCollectionViewSetDelete->[test_different_account_fails->[get_url],test_admin_delete->[get_url],setUp->[get_url]],TestCollectionAddonViewSetCreate->[test_with_slug->[check_response,send],test_fail_when_no_addon->[send],test_basic->[check_response,send],test_fail_when_not_public_addon->[send],test_uniqueness_message->[send],test_fail_when_invalid_addon->[send],test_add_with_comments->[check_response,send]],TestCollectionViewSetDetail->[test_not_listed_admin->[_get_url]],TestViews->[test_collection_directory_redirects->[check_response],test_legacy_redirects->[check_response],test_collection_directory_redirects_with_login->[check_response],test_legacy_redirects_edit->[check_response]],CollectionViewSetDataMixin->[test_no_auth->[send],test_slug_unique->[send],test_update_name_invalid->[send],test_slug_valid->[send],test_biography_no_links->[send]],CollectionAddonViewSetMixin->[test_basic->[check_response,send],test_not_listed_not_logged_in->[send],test_not_listed_different_user->[send],test_not_listed_self->[check_response,send],test_not_listed_admin->[check_response,send]],TestCollectionViewSetCreate->[test_create_numeric_slug->[send],test_admin_create_fails->[get_url,send],test_different_account->[get_url,send],test_create_minimal->[send],test_create_cant_set_readonly->[send],test_basic_create->[check_data,send]],TestCRUD->[test_edit_addons_get->[create_collection],test_edit_description->[create_collection],test_delete->[create_collection],test_no_changing_owners->[create_collection,login_regular,login_admin],test_fix_slug->[create_collection],test_edit_no_description->[create_collection],test_forbidden_edit->[create_collection,login_regular],test_acl_collections_edit->[login_regular],test_edit_addons_post->[create_collection],test_edit_spaces->[create_collection],test_edit_post->[create_collection],test_edit->[create_collection],test_no_xss_in_delete_confirm_page->[create_collection],test_no_xss_in_edit_page->[create_collection]],TestVotes->[test_downvote->[check],test_up_then_up->[check],test_down_then_up->[check],test_upvote->[check]],TestChangeAddon->[test_remove_success->[check_redirect],test_add_existing->[check_redirect],test_remove_nonexistent->[check_redirect],test_add_success->[check_redirect]]]
Test public.
It'd be much more realistic if you could login the user already for the first `client.get()` call and only after you granted the permission try again, with the same user then.
@@ -24,11 +24,16 @@ from olympia.lib.remote_settings import RemoteSettings from olympia.zadmin.models import get_config, set_config +STATSD_PREFIX = 'blocklist.cron.upload_mlbf_to_remote_settings.' + + @freeze_time('2020-01-01 12:34:56') @override_switch('blocklist_mlbf_submit', active=True) class TestUploadToRemoteSettings(TestCase): def setUp(self): - addon_factory() + addon = addon_factory() + version_factory(addon=addon) + version_factory(addon=addon) self.block = Block.objects.create( addon=addon_factory( version_kw={'version': '1.2b3'},
[test_auto_import_blocklist_waffle->[assert_not_called,assert_called_with,CommandError,auto_import_blocklist,patch,assert_called,override_switch],TestUploadToRemoteSettings->[test_stash_because_previous_mlbf->[upload_mlbf_to_remote_settings,dump,int,assert_not_called,assert_called_with,datetime,join,set_config,open,get_config],test_stash_because_many_mlbf->[upload_mlbf_to_remote_settings,dump,int,assert_not_called,assert_called_with,datetime,join,set_config,open,get_config],test_waffle_off_disables_publishing->[upload_mlbf_to_remote_settings,assert_called,assert_not_called,get_config],test_force_base_option->[dump,upload_mlbf_to_remote_settings,assert_not_called,timestamp,update,str,assert_called_once,now,join,exists,set_config,assert_called,open,get_config],test_reset_base_because_over_reset_threshold->[upload_mlbf_to_remote_settings,dump,int,assert_not_called,assert_called_with,datetime,str,assert_called_once,join,exists,set_config,open,get_config],setUp->[create,start,object,addon_factory,user_factory,addCleanup],test_no_block_changes->[,create,assert_not_called,timedelta,join,int,set_config,get_blocklist_last_modified_time,tick,user_factory,get_config,upload_mlbf_to_remote_settings,frozen_time,datetime,update,assert_called_once,open,delete,str,addon_factory,dump],test_no_previous_mlbf->[upload_mlbf_to_remote_settings,int,assert_not_called,assert_called_with,datetime,str,assert_called_once,getsize,join,exists,get_config],object,override_switch,freeze_time],override_switch,patch,freeze_time]
Sets up the mock objects for the Nested Nested Block.
Is this meant to be called twice?
@@ -2114,7 +2114,7 @@ class Form } // Units if (! empty($conf->global->PRODUCT_USE_UNITS)) { - $sql .= ', u.label as unit_long, u.short_label as unit_short'; + $sql .= ", u.label as unit_long, u.short_label as unit_short, p.weight, p.weight_units, p.length, p.length_units, p.width, p.width_units, p.height, p.height_units, p.surface, p.surface_units, p.volume, p.volume_units"; $selectFields .= ', unit_long, unit_short'; }
[Form->[form_users->[select_dolusers],textwithpicto->[textwithtooltip],select_dolusers_forevent->[select_dolusers],formSelectAccount->[select_comptes],formInputReason->[selectInputReason,loadCacheInputReason],form_availability->[load_cache_availability,selectAvailabilityDelay],showFilterAndCheckAddButtons->[showFilterButtons,showCheckAddButtons],select_types_paiements->[load_cache_types_paiements],form_contacts->[select_contacts],select_type_fees->[load_cache_types_fees],showCategories->[select_all_categories,multiselectarray],select_conditions_paiements->[load_cache_conditions_paiements],form_thirdparty->[select_company],form_conditions_reglement->[load_cache_conditions_paiements,select_conditions_paiements],form_remise_dispo->[select_remises],selectAvailabilityDelay->[load_cache_availability],selectInputReason->[loadCacheInputReason],formSelectShippingMethod->[selectShippingMethod],load_tva->[load_cache_vatrates],form_modes_reglement->[load_cache_types_paiements,select_types_paiements]]]
Select produce list This function is used to retrieve all the records in the database that have a single product or This is a table in which all products are in the stock. search in supplier ref - product - product - product - product - product - product - product Filter by sequence of conditions This function is used to search for products that match the given criteria.
Also add new SELECT field to $selectFields else you get groupby sql error
@@ -1484,13 +1484,13 @@ RtpsUdpDataLink::RtpsReader::process_data_i(const RTPS::DataSubmessage& data, } else if (writer->recvd_.contains(seq)) { if (Transport_debug_level > 5) { - GuidConverter writer(src); - GuidConverter reader(id_); + GuidConverter wc(src); + GuidConverter rc(id_); ACE_DEBUG((LM_DEBUG, ACE_TEXT("(%P|%t) RtpsUdpDataLink::process_data_i(DataSubmessage) -") ACE_TEXT(" data seq: %q from %C being DROPPED from %C because it's ALREADY received\n"), seq.getValue(), - OPENDDS_STRING(writer).c_str(), - OPENDDS_STRING(reader).c_str())); + OPENDDS_STRING(wc).c_str(), + OPENDDS_STRING(rc).c_str())); } link->receive_strategy()->withhold_data_from(id_);
[No CFG could be retrieved]
DeliverHeldData - Deliver data from a channel to the channel. - - - - - - - - - - - - - - - - - -.
Could use `LogGuid`
@@ -37,7 +37,7 @@ class MainController < ApplicationController refresh_timeout current_user.set_api_key # set api key in DB for user if not yet set # redirect to last visited page or to main page - redirect_to( uri || { :action => 'index' } ) + redirect_to( uri || { action: 'index' } ) return else @login_error = flash[:error][0]
[MainController->[reset_api_key->[reset_api_key]]]
check if user has a specific api key and if so redirect to main page if not set find a that can be used to find the next user in the system.
Space inside parentheses detected.
@@ -1298,7 +1298,7 @@ class URL implements Serializable { } public static String buildKey(String path, String group, String version) { - return BaseServiceMetadata.buildServiceKey(path, group, version); + return BaseServiceMetadata.buildServiceKey(path, group, version).intern(); } public String getProtocolServiceKey() {
[URL->[getUrlParameter->[getParameterAndDecoded,valueOf],getMethodPositiveParameter->[getMethodParameter],getIp->[getIp],hasMethodParameter->[getMethodParameterStrict,hasMethodParameter],toMap->[getParameters,getUsername,getPort,getProtocol,getHost,getPath,getPassword,valueOf],getBooleanParameter->[getParameter],isLocalHost->[getHost,isLocalHost,getParameter],toInetSocketAddress->[getHost,getPort],getBackupAddress->[getBackupAddress,appendDefaultPort,getAddress],getPositiveParameter->[getParameter],setServiceInterface->[addParameter],getPathKey->[getPath],getCategory->[getCategory,getParameter],getParameterAndDecoded->[getParameterAndDecoded,decode],setPath->[setPath],buildParameters->[getParameters],setPort->[setPort],append->[append,getParameter],toServiceString->[buildString],getServiceParameter->[getServiceParameter,getParameter],hasServiceMethodParameter->[hasMethodParameter],addParametersIfAbsent->[addParametersIfAbsent],toJavaURL->[toString,URL],getMethodParameterAndDecoded->[decode],getServiceMethodNumbers->[getMethodNumbers],getPassword->[getPassword],encode->[encode],getServiceMethodParameter->[getCachedNumber,updateCachedNumber,getServiceMethodParameter,getMethodParameter],toConfiguration->[getParameters],getProtocolServiceKey->[getServiceKey,getProtocol],getVersion->[getVersion,getParameter],getRawParameter->[getParameter,getUsername,getPort,getProtocol,getHost,getPath,getPassword,valueOf],getServiceMethodParameterAndDecoded->[decode],getApplication->[getApplication,getParameter],hasParameter->[getParameter],buildString->[buildString,buildParameters,getUsername,getPort,getProtocol,getHost,getIp,toString,getPath,getPassword],getRemoteApplication->[getParameter],getPort->[getPort],toParameterString->[toString,toParameterString],getGroup->[getGroup,getParameter],removeParameters->[removeParameters],addParameterIfAbsent->[addParameterIfAbsent],returnURL->[newURL],getUsername->[getUsername],getPositiveIntParameter->[getPositiveParameter],getMethodBooleanParameter->[getMethodParameter],addParameters->[addParameters],clearParameters->[clearParameters],setProtocol->[setProtocol],getMethodPositiveIntParameter->[getMethodPositiveParameter],isAnyHost->[getHost,getParameter],setAddress->[setAddress,getPort],getMethodParameter->[getServiceMethodParameter,updateCachedNumber,getCachedNumber,getMethodParameter],getPath->[getPath],addParameter->[addParameter,valueOf],getProtocol->[getProtocol],getHost->[getHost],getServiceInterface->[getPath,getParameter],getIntParameter->[getParameter],getParameter->[getParameter],getServiceMethodParameterStrict->[getMethodParameterStrict],addParameterString->[addParameters],addParameterAndEncoded->[encode],valueOf->[valueOf],equals->[getUrlParam,getUrlAddress,equals],getAddress->[getAddress],getBackupUrls->[setAddress],getSide->[getParameter,getSide],setUsername->[setUsername],decode->[decode],getParameters->[getParameters],hasServiceParameter->[getServiceParameter],getConcatenatedParameter->[getParameter],toServiceStringWithoutResolving->[buildString],getServiceParameters->[getParameters],getColonSeparatedKey->[toString],getAnyMethodParameter->[getAnyMethodParameter],setHost->[setHost],getAbsolutePath->[getPath],getMethodIntParameter->[getMethodParameter],getMethodParameterStrict->[getMethodParameterStrict],getPositiveServiceParameter->[getServiceParameter],getServiceNumbers->[getNumbers],setPassword->[setPassword],getAuthority->[getUsername,getPassword]]]
Build a unique service key.
use intern() in BaseServiceMetadata.buildServiceKey?
@@ -24,6 +24,9 @@ @@TimeFreqLSTMCell @@GridLSTMCell +### ESN cells +@@ESNCell + ### RNNCell wrappers @@AttentionCellWrapper """
[No CFG could be retrieved]
A base package for the RNNCells.
### Echo State Networks
@@ -93,11 +93,16 @@ class OptionsBootstrapper: return bootstrap_options @classmethod - def create(cls, env: Mapping[str, str], args: Sequence[str]) -> "OptionsBootstrapper": + def create( + cls, env: Mapping[str, str], args: Sequence[str], *, allow_pantsrc: bool = False + ) -> "OptionsBootstrapper": """Parses the minimum amount of configuration necessary to create an OptionsBootstrapper. :param env: An environment dictionary, or None to use `os.environ`. :param args: An args array, or None to use `sys.argv`. + :param allow_pantsrc: True to allow pantsrc files to be used. This defaults to false because + of the prevelance of OptionsBootstrapper usage in tests, but should be enabled for + production usecases. """ env = {k: v for k, v in env.items() if k.startswith("PANTS_")} args = tuple(args)
[OptionsBootstrapper->[get_full_options->[_full_options],bootstrap_options->[parse_bootstrap_options],create->[filecontent_for->[FileContent],filecontent_for,get_config_file_paths,parse_bootstrap_options],_full_options->[get_bootstrap_options,create]]]
Parses the minimum amount of configuration necessary to create an OptionsBootstrapper. Load a from the config files and return a new Config object.
It's possible that this arg should not have a default, to make it harder to accidentally leave off?
@@ -28,7 +28,11 @@ module PhoneConfirmation end def matches_code?(candidate_code) - Devise.secure_compare(candidate_code, code) + return Devise.secure_compare(candidate_code, code) if code.nil? || candidate_code.nil? + + crockford_candidate_code = Base32::Crockford.normalize(candidate_code) + crockford_code = Base32::Crockford.normalize(code) + Devise.secure_compare(crockford_candidate_code, crockford_code) end def expired?
[ConfirmationSession->[matches_code?->[secure_compare],expired?->[now],to_h->[to_i],from_h->[at,new,to_sym],start->[call,now,new],regenerate_otp->[call,now,new],initialize->[to_sym],attr_reader]]
Checks if the code matches the candidate code.
how about just a to_s on these and drop the line above?
@@ -181,6 +181,11 @@ public class HiveDatasetFinder implements IterableDatasetFinder<HiveDataset> { throw new RuntimeException(e); } this.jobConfig = ConfigUtils.propertiesToConfig(properties); + + String rejectedFormatSerDe = properties.getProperty(REJECTED_DATA_FORMAT_KEY, StringUtils.EMPTY); + Optional<HiveSerDeWrapper.BuiltInHiveSerDe> serDe = + Enums.getIfPresent(HiveSerDeWrapper.BuiltInHiveSerDe.class, rejectedFormatSerDe.toUpperCase()); + this.rejectFormat = serDe.isPresent() ? serDe.get() : null; } protected static HiveMetastoreClientPool createClientPool(Properties properties) throws IOException {
[HiveDatasetFinder->[getDatasetConfig->[apply],createHiveDataset->[createHiveDataset]]]
Creates a client pool from the given properties.
Why not support a list of rejected data formats? Also, how about extracting out the SerDe class name as a string when an enum match is found, but if one is not found then use the string as provoided? This will allow SerDes not in BuiltInHiveSerDe to be supported.
@@ -215,7 +215,7 @@ public class KeycloakDevServicesProcessor { private StartResult startContainer() { if (!capturedDevServicesConfiguration.enabled) { // explicitly disabled - LOG.error("Not starting devservices for Keycloak as it has been disabled in the config"); + LOG.debug("Not starting devservices for Keycloak as it has been disabled in the config"); return null; } if (!isOidcTenantEnabled()) {
[KeycloakDevServicesProcessor->[FixedPortOidcContainer->[configure->[configure]],startContainer->[close->[close]],createRealm->[close]]]
Starts a new container using the configuration.
It was `debug` in my original copy of a similar code but I changed it to `error` while I was looking at some PR problem and forgot to change it back
@@ -587,6 +587,18 @@ func (b *APIBackend) GetSuperCommittees() (*quorum.Transition, error) { quorum.NewRegistry(stakedSlotsThen), quorum.NewRegistry(stakedSlotsNow) + then.MedianStake, then.SlotPurchases = + committee.ComputeMedianRawStakeForShardCommittee( + b.hmy.BlockChain(), + prevCommittee.StakedValidators().Addrs, + ) + + now.MedianStake, now.SlotPurchases = + committee.ComputeMedianRawStakeForShardCommittee( + b.hmy.BlockChain(), + nowCommittee.StakedValidators().Addrs, + ) + for _, comm := range prevCommittee.Shards { decider := quorum.NewDecider(quorum.SuperMajorityStake, comm.ShardID) if _, err := decider.SetVoters(&comm, prevCommittee.Epoch); err != nil {
[GetElectedValidatorAddresses->[CurrentBlock],SubscribeRemovedLogsEvent->[SubscribeRemovedLogsEvent],GetLastCrossLinks->[CurrentBlock],SubscribeChainSideEvent->[SubscribeChainSideEvent],GetBalance->[GetBalance,StateAndHeaderByNumber],NetVersion->[NetVersion],SubscribeLogsEvent->[SubscribeLogsEvent],GetTransactionsHistory->[GetTransactionsHistory],GetValidatorInformation->[CurrentBlock],GetTransactionsCount->[GetTransactionsCount],SubscribeChainEvent->[SubscribeChainEvent],GetStakingTransactionsCount->[GetStakingTransactionsCount],SubscribeNewTxsEvent->[SubscribeNewTxsEvent],GetAccountNonce->[StateAndHeaderByNumber],GetValidators->[GetShardID],GetNodeMetadata->[CurrentBlock,ChainConfig,GetShardID,IsLeader],SubscribeChainHeadEvent->[SubscribeChainHeadEvent],GetStakingTransactionsHistory->[GetStakingTransactionsHistory]]
GetSuperCommittees returns the next and previous committee transitions.
Actually no need to be this complicated. Just add a field in the final result: { "is-harmony-slot": false, "earning-account": "one1x8kgyyqgslzrm7jrlj4m7rpg285etdywjk7gez", "bls-public-key": "5e8635085788492a87d3eab69732ae72a55087e95c210ad20a726dd169a69b5792cb94616f6eccc4ea0a54a4a77ca88c", "voting-power-unnormalized": "0.026785021562679984", "voting-power-%": "0.008571206900057595", "effective-stake": "5920773850000000000000000.000000000000000000" "raw-stake": NEW_FIELD, }, The raw-stake is just validatorSnap.TotalStake / size(validatorSnap.BLSKeys). No need to add new func like: ComputeMedianRawStakeForShardCommittee. Just loop through all the validators in the decider result and for each validator address, fetch the snapshot and compute the raw-stake and put it in the result.
@@ -118,6 +118,8 @@ class Phist(CMakePackage): % ('ON' if '+trilinos' in spec else 'OFF'), '-DPHIST_USE_PRECON_TPLS:BOOL=%s' % ('ON' if '+trilinos' in spec else 'OFF'), + '-DXSDK_BUILD_Fortran:BOOL=%s' + % ('ON' if '+fortran' in spec else 'OFF'), ] return args
[Phist->[test_install->[working_dir,make],check->[working_dir,make],cmake_args->[],depends_on,version,on_package_attributes,variant,run_after]]
Return a list of arguments for the Cmake command.
Will a `@:1.6.99` version of `phist` just ignore this argument? Or do you need to guard it with an if statement?
@@ -58,7 +58,10 @@ int tls13_enc(SSL *s, SSL3_RECORD *recs, size_t n_recs, int sending) if (s->early_data_state == SSL_EARLY_DATA_WRITING || s->early_data_state == SSL_EARLY_DATA_WRITE_RETRY) { - alg_enc = s->session->cipher->algorithm_enc; + if (s->session != NULL && s->session->ext.max_early_data > 0) + alg_enc = s->session->cipher->algorithm_enc; + else + alg_enc = s->psksession->cipher->algorithm_enc; } else { /* * To get here we must have selected a ciphersuite - otherwise ctx would
[tls13_enc->[EVP_CipherUpdate,EVP_CIPHER_CTX_iv_length,memcpy,RECORD_LAYER_get_read_sequence,EVP_CipherInit_ex,ossl_assert,EVP_CipherFinal_ex,memmove,RECORD_LAYER_get_write_sequence,EVP_CIPHER_CTX_ctrl]]
see if TLS 1. 3 encloses a record Check if the tag can be found in the record and if so set it. return -1 if the tag is not present.
We ossl_assert that the psksession is valid if the "normal" session is not, in early_data_count_ok(), but not here; is there a qualitative difference between the two?
@@ -12,8 +12,11 @@ def send_password_reset_email(context, recipient): reset_url = build_absolute_uri( reverse( 'account:reset-password-confirm', - kwargs={'uidb64': context['uid'], 'token': context['token']})) - context['reset_url'] = reset_url + kwargs={ + 'uidb64': context['uid'], + 'token': context['token']})) + context = get_email_base_context() + context.update({'reset_url': reset_url}) send_templated_mail( template_name='account/password_reset', from_email=settings.DEFAULT_FROM_EMAIL,
[send_password_reset_email->[build_absolute_uri,send_templated_mail,reverse],send_account_delete_confirmation_email->[get_current,send_templated_mail,reverse,build_absolute_uri]]
Sends a password reset email to the specified recipient.
This update is unnecessary as we can modify the dict directly.
@@ -66,6 +66,9 @@ static bool softap_config_equal(const softap_config& lhs, const softap_config& r if(lhs.ssid_hidden != rhs.ssid_hidden) { return false; } + if(lhs.max_connection != rhs.max_connection) { + return false; + } return true; }
[bool->[strcmp],softAPgetStationNum->[wifi_softap_get_station_num],softAPConfig->[wifi_softap_set_dhcps_lease_time,wifi_softap_dhcps_start,wifi_softap_dhcps_stop,wifi_softap_set_dhcps_lease,wifi_softap_set_dhcps_offer_option,enableAP,toString,DEBUG_WIFI,wifi_set_ip_info,wifi_get_ip_info],softAPdisconnect->[ETS_UART_INTR_ENABLE,ETS_UART_INTR_DISABLE,enableAP,wifi_softap_set_config_current,wifi_softap_set_config,DEBUG_WIFI],softAPmacAddress->[String,sprintf,wifi_get_macaddr],softAPIP->[wifi_get_ip_info,IPAddress],softAP->[ETS_UART_INTR_ENABLE,wifi_get_ip_info,softAPConfig,wifi_softap_get_config,softap_config_equal,strlen,ETS_UART_INTR_DISABLE,wifi_softap_dhcps_start,strcpy,enableAP,wifi_softap_set_config_current,wifi_softap_set_config,DEBUG_WIFI,wifi_softap_dhcps_status]]
Returns true if the two softap_configs are equal.
Please check indentation of this line
@@ -61,10 +61,10 @@ public class RandomAccessDataTest { CoderProperties.coderSerializable(RandomAccessDataCoder.of()); CoderProperties.structuralValueConsistentWithEquals( RandomAccessDataCoder.of(), streamA, streamB); - assertTrue(RandomAccessDataCoder.of().isRegisterByteSizeObserverCheap(streamA, Context.NESTED)); - assertTrue(RandomAccessDataCoder.of().isRegisterByteSizeObserverCheap(streamA, Context.OUTER)); - assertEquals(4, RandomAccessDataCoder.of().getEncodedElementByteSize(streamA, Context.NESTED)); - assertEquals(3, RandomAccessDataCoder.of().getEncodedElementByteSize(streamA, Context.OUTER)); + assertTrue(RandomAccessDataCoder.of().isRegisterByteSizeObserverCheap(streamA)); + assertTrue(RandomAccessDataCoder.of().isRegisterByteSizeObserverCheap(streamA)); + assertEquals(4, RandomAccessDataCoder.of().getEncodedElementByteSize(streamA)); + assertEquals(3, RandomAccessDataCoder.of().getEncodedElementByteSize(streamA)); } @Test
[RandomAccessDataTest->[testReadFrom->[size,copyOf,assertArrayEquals,RandomAccessData,readFrom,close,ByteArrayInputStream,array],testThatRandomAccessDataGrowsWhenResettingToPositionBeyondEnd->[assertArrayEquals,resetTo,RandomAccessData,array],testThatRandomAccessDataGrowsWhenReading->[copyOf,assertArrayEquals,RandomAccessData,readFrom,ByteArrayInputStream,array],testLexicographicalComparator->[assertTrue,write,commonPrefixLength,compare,RandomAccessData,assertEquals],testIncrement->[RandomAccessData,increment,assertEquals,assertSame],testEqualsAndHashCode->[write,assertNotEquals,RandomAccessData,assertEquals,hashCode],testResetTo->[resetTo,write,size,copyOf,assertArrayEquals,RandomAccessData,assertEquals,array],testCoder->[coderDeterministic,assertTrue,write,of,coderConsistentWithEquals,coderSerializable,RandomAccessData,structuralValueConsistentWithEquals,getEncodedElementByteSize,isRegisterByteSizeObserverCheap,coderDecodeEncodeEqual,assertEquals],testCoderWithPositiveInfinityIsError->[expectMessage,encode,expect,ByteArrayOutputStream],testWriteTo->[write,toByteArray,assertArrayEquals,RandomAccessData,ByteArrayOutputStream,close,writeTo],testAsInputStream->[write,read,RandomAccessData,close,asInputStream,assertEquals],none]]
Test if the Coder is consistent with the data.
Remove one of the assertTrue since its a duplicate Remove assertEquals(3, ...) since it will never pass.
@@ -98,6 +98,8 @@ public interface KsqlExecutionContext { KsqlConfig ksqlConfig, Map<String, Object> overriddenProperties); + List<PersistentQueryMetadata> getPersistentQueries(); + /** * Holds the union of possible results from an {@link #execute} call. *
[ExecuteResult->[of->[ExecuteResult,empty,of],requireNonNull]]
Execute a statement with a sequence of KSQL statements.
Couple of points on this one: - nit: how about some java docs, given every other method in this interface has them? - nit: would you mind moving this 'persistentQuery' related method next to the other persistent query methods, eg. next to `getPersistentQuery`. - suggestion: to keep the interface succinct, if we're adding `getPersistentQueries()` then can we remove `numberOfPersistentQueries` and just have client code call `getPersistentQueries().size()`.
@@ -1006,6 +1006,10 @@ daos_event_init(struct daos_event *ev, daos_handle_t eqh, evx->evx_sched = &eqx->eqx_sched; daos_eq_putref(eqx); } else { + if (daos_sched_g.ds_udata == NULL) { + D_ERROR("Event queue is not initialized\n"); + return -DER_EQ_UNINIT; + } evx->evx_ctx = daos_eq_ctx; evx->evx_sched = &daos_sched_g; }
[No CFG could be retrieved]
Adds an event to the parent event list. Unlink events from various lists parent_list child list and event queue hash list and destroy.
At a high level, the segmentation fault is caused due to the DAOS client library is not initialized. In other words, `daos_init()` must be called before calling `daos_obj_open().` Having said that, I suggest changing the message to something like `D_ERROR("The DAOS client library is not initialized"DF_RC"\n", DP_RC(-DER_EQ_UNINIT));`
@@ -299,6 +299,14 @@ class Listing { ]) const listings = [] searchResponse.hits.hits.forEach(hit => { + console.log( + `${hit._score.toFixed(3)}\t${hit._source.scoreMultiplier.toFixed(1)}\t${ + hit._id + }\t${hit._source.title} ${(hit._source.price || {}).amount} ${ + (hit._source.price || {}).currency.id + }` + ) + // console.log(hit) listings.push({ id: hit._id, title: hit._source.title,
[No CFG could be retrieved]
Get a list of all cluster items in the system.
Remove this debug statement or replace with logger.debug
@@ -954,6 +954,7 @@ class RepoResolveDependencies(JSONController): # Scope: Actions # POST: Resolve and return dependencies for one or more units + @auth_required(EXECUTE) def POST(self, repo_id): # Params params = self.params()
[RepoResource->[GET->[_merge_related_objects]],RepoSearch->[GET->[_process_repos],POST->[_process_repos]],RepoCollection->[GET->[_process_repos],_process_repos->[_merge_related_objects]]]
Resolve dependencies for a given repository.
Hmmm, since this one is finding dependencies for units, should it be a READ instead of an EXECUTE?
@@ -98,7 +98,6 @@ def test_plot_epochs_colors(epochs, browse_backend): def test_plot_epochs_scale_bar(epochs, browse_backend): """Test scale bar for epochs.""" fig = epochs.plot() - fig._fake_keypress('s') # default is to not show scalebars ax = fig.mne.ax_main # only empty vline-text, mag & grad in this instance assert len(ax.texts) == 3
[test_plot_psdtopo_nirs->[plot_psd_topomap,len],test_plot_psd_epochs_ctf->[plot_psd_topomap,raises,drop_channels,Epochs,plot_psd,make_fixed_length_events,warns],test_plot_epochs_scale_bar->[plot,get_text,len,tuple,_fake_keypress],test_plot_epochs_image->[plot_image,dict,get_fignums,subplots,min,arange,len,close,raises,figure,warns],test_plot_epochs_scalings->[plot],test_plot_overlapping_epochs_with_events->[plot,column_stack,EpochsArray,len,zeros,create_info,get_segments],test_plot_epochs_keypresses->[_fake_keypress,drop_bad,plot,dict,len,traces,_fake_click],test_plot_epochs_colors->[raises,range,plot,len],test_plot_epochs_basic->[get_xticklabels,plot,_close_all,get_text,len,epochs,readouterr,_fake_keypress,raises,warns],test_plot_epochs_ctf->[_fake_scroll,plot,_resize_by_factor,pick_channels,_close_all,_fake_keypress,Epochs,make_fixed_length_events],test_plot_drop_log->[raises,plot_drop_log,drop_bad,close],test_plot_psd_epochs->[plot_psd_topomap,all,get_data,len,close,raises,plot_psd,warns],test_epochs_plot_sensors->[plot_sensors],test_plot_epochs_not_preloaded->[plot],test_plot_epochs_clicks->[_fake_scroll,plot,get_yticklabels,len,_click_ch_name,readouterr,_fake_keypress,_fake_click,_close_event,traces],test_plot_epochs_nodata->[plot,EpochsArray,RandomState,raises,create_info],dict,parametrize]
Test scale bar for epochs.
default is True now, so we don't need the keypress anymore.
@@ -206,7 +206,7 @@ def main(): continue for device, dev_arg in device_args.items(): - print('Test case #{}/{}:'.format(test_case_index, device), + print('\nTest case #{}/{}:'.format(test_case_index, device), ' '.join(shlex.quote(str(arg)) for arg in dev_arg + case_args)) print(flush=True) try:
[main->[option_to_args->[resolve_arg],collect_result,option_to_args,temp_dir_as_path,prepare_models,parse_args],parse_args->[parse_args],main]
Entry point for the missing - nag - sequence command. Check if a missing key is found in the model_info. Exit if there is no n - ary case in the device.
Why do you have that change?