patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -69,7 +69,7 @@ namespace ProtoCore.Utils // // 2.3 Otherwise, classScope == kInvalidIndex && functionScope == kInvalidIndex // Return public member in derived class, or public member in base classes - public static int GetSymbolIndex(ClassNode classNode, string name, int classScope, int functionScope, int blockId, List<CodeBlock> codeblockList, out bool hasThisSymbol, out ProtoCore.DSASM.AddressType addressType) + public static int GetSymbolIndex(ClassNode classNode, string name, int classScope, int functionScope, int blockId, SortedDictionary<int, CodeBlock> codeblocks, out bool hasThisSymbol, out ProtoCore.DSASM.AddressType addressType) { hasThisSymbol = false; addressType = ProtoCore.DSASM.AddressType.Invalid;
[ClassUtils->[GetUpcastCountTo->[GetClassUpcastChain]]]
Get the symbol index for a given name. This function is used to find a free variable in the hierarchy tree.
@pinzart I'm sorry I missed this - you can't change this public signature - please follow the same pattern of obsoleting this and adding an overload.
@@ -54,6 +54,9 @@ public class WaitTest extends BaseTest { @Test public void testWaitInWorker() { + // Call a task in advance to warm up the cluster to avoid too slow to start workers. + warmUpCluster(); + RayObject<Object> res = Ray.call(WaitTest::waitInWorker); res.get(); }
[WaitTest->[waitInWorker->[testWait],testWaitInDriver->[testWait]]]
Test wait in worker.
we should put this in `testWait`. `testWaitInDriver` needs this as well.
@@ -64,7 +64,7 @@ public class FloatColumnSerializer implements GenericColumnSerializer { writer = CompressionFactory.getFloatSerializer( ioPeon, - String.format("%s.float_column", filenameBase), + StringUtils.safeFormat("%s.float_column", filenameBase), byteOrder, compression );
[FloatColumnSerializer->[writeToChannel->[writeToChannel],create->[FloatColumnSerializer],getSerializedSize->[getSerializedSize],close->[close],open->[open]]]
Open the nanoseconds file.
Probably should crash if bad format string
@@ -329,8 +329,14 @@ .ConvertFromInvariantString(value.PropertyValue.ToString()); propertyInfo.SetValue(sagaEntity, convertedValue); - } + sagaEntity.Id = sagaIdGenerator.Generate(value.PropertyName, value.PropertyValue, metadata, context.Extensions); + } + else + { + sagaEntity.Id = sagaIdGenerator.Generate(null, null, metadata, context.Extensions); + } + return sagaEntity; }
[SagaPersistenceBehavior->[IsMessageAllowedToStartTheSaga->[IsMessageAllowedToStartTheSaga]]]
Creates a new saga entity.
I don't really like the approach of passing null values. That will be hard to keep in mind for custom implementations.
@@ -60,7 +60,16 @@ class WPCom_GHF_Markdown_Parser extends MarkdownExtra_Parser { */ public function __construct() { $this->use_code_shortcode = class_exists( 'SyntaxHighlighter' ); - $this->preserve_shortcodes = function_exists( 'get_shortcode_regex' ); + /** + * Allow processing shortcode contents. + * + * @module markdown + * + * @since 4.4.0 + * + * @param boolean $preserve_shortcodes Defaults to $this->preserve_shortcodes. + */ + $this->preserve_shortcodes = apply_filters( 'wpcom_markdown_preserve_shortcodes', $this->preserve_shortcodes ) && function_exists( 'get_shortcode_regex' ); $this->preserve_latex = function_exists( 'latex_markup' ); $this->strip_paras = function_exists( 'wpautop' );
[WPCom_GHF_Markdown_Parser->[_doFencedCodeBlocks_callback->[restore_leading_hash]]]
Constructor for the class.
Could you add a period at the end of this description, as well as at the end of the `@param` declaration, to avoid errors for those of us using a code sniffer? Thanks!
@@ -1024,6 +1024,7 @@ int load_key_certs_crls(const char *uri, int maybe_stdin, end: OSSL_STORE_close(ctx); + BIO_free(bio); if (failed == NULL) { int any = 0;
[No CFG could be retrieved]
Check if a key is expected or not. Check if the unknown extensions are found.
@t8m - does this change look ok to you? Took me a while to figure out .
@@ -116,7 +116,7 @@ def _should_cache( has determined a wheel needs to be built. """ if not should_build_for_install_command( - req, check_binary_allowed=check_binary_allowed + req, check_binary_allowed=_always_true ): # never cache if pip install would not have built # (editable mode, etc)
[should_build_for_wheel_command->[_should_build],_should_cache->[should_build_for_install_command,_contains_egg_info],_get_cache_dir->[_should_cache],build->[_build_one,_get_cache_dir],should_build_for_install_command->[_should_build]]
Checks if a built InstallRequirement can be stored in the persistent wheel cache assuming the wheel cache.
Side note: this test actually boils down to not caching wheels built from editable requirements.
@@ -1,5 +1,11 @@ <% content_for :title, "#{@assignment.short_identifier} #{SplitPdfLog.model_name.human.pluralize}" %> +<% if flash[:split] %> + <script type="text/javascript"> + poll_job("<%= flash[:split] %>", undefined, function() {window.location.reload()}); + </script> +<% end %> + <% @heading_buttons = [ { link_text: t('exam_templates.back_to_exam_template_page'), link_path: assignment_exam_templates_path }
[No CFG could be retrieved]
Renders the list of all log entries for a given node.
This is what I meant by "good basic idea, but use `session` instead of `flash`."
@@ -179,6 +179,16 @@ func (t *transferLeader) Step(r *RaftEngine) { t.finished = true r.SetRegion(region) r.recordRegionChange(region) + fromPeerID := t.fromPeer.GetId() + toPeerID := t.peer.GetId() + _, ok := r.schedulerStats.taskStats.transferLeader[fromPeerID] + if ok { + r.schedulerStats.taskStats.transferLeader[fromPeerID][toPeerID]++ + } else { + m := make(map[uint64]int) + m[toPeerID]++ + r.schedulerStats.taskStats.transferLeader[fromPeerID] = m + } } func (t *transferLeader) RegionID() uint64 {
[Step->[GetPeer,GetPeers,GetRegion,Equal,GetStoreId,RemoveStorePeer,AddPeer,Errorf,decUsedSize,incUsedSize,SetRegion,GetId,recordRegionChange],Desc->[GetStoreId,Sprintf,GetId],GetPeer,GetRegion,GetChangePeer,GetTransferLeader,GetMerge,GetChangeType,GetRegionId,GetTarget,GetRegionEpoch]
Step moves the leader to the next state.
Can we let the Stats just `Observe` the task and the statistical behavior is handled in `observe`?
@@ -321,5 +321,5 @@ main(void) teardown), }; - return cmocka_run_group_tests(tests, NULL, NULL); + return cmocka_run_group_tests_name("control_nvme_control_ut", tests, NULL, NULL); }
[No CFG could be retrieved]
Run all the tests in the group and return the ID of the last one that was run.
(style) line over 80 characters
@@ -337,6 +337,16 @@ class _EnvReloader(object): CUDA_ARRAY_INTERFACE_SYNC = _readenv("NUMBA_CUDA_ARRAY_INTERFACE_SYNC", int, 1) + # Path of the directory that the CUDA driver libraries are located + CUDA_DRIVER = _readenv("NUMBA_CUDA_DRIVER", str, '') + + # Buffer size for logs produced by CUDA driver operations (e.g. + # linking) + CUDA_LOG_SIZE = _readenv("NUMBA_CUDA_LOG_SIZE", int, 1024) + + # Whether to generate verbose log messages when JIT linking + CUDA_VERBOSE_JIT_LOG = _readenv("NUMBA_CUDA_VERBOSE_JIT_LOG", int, 1) + # Compute contiguity of device arrays using the relaxed strides # checking algorithm. NPY_RELAXED_STRIDES_CHECKING = _readenv(
[reload_config->[update],_EnvReloader->[process_environ->[avx_default->[_os_supports_avx],_readenv]],_EnvReloader]
Process the environment variables and return a base - level object. Check if a node - level object is available in the system. Get all of the basic environment variables related to the nagios - core base on environment Get a specific object from the user s config.
With moving to `numba.config` I suppose it's possible for users to end up with broken code if they were manipulating the env vars that used to provide this functionality from within python itself. Once `numba.config` is in use, as soon as `numba` is imported I think these configurations are strongly bound to the environment and subsequent manipulation with e.g. `os.environ` will have no effect. The impact is probably minimal and pretty easy to resolve, but just something of which to be aware.
@@ -550,7 +550,7 @@ public abstract class AbstractLazyLoadRunMap<R> extends AbstractMap<Integer,R> i private static final Comparator<Integer> COMPARATOR = new Comparator<Integer>() { @Override public int compare(Integer o1, Integer o2) { - return o2 - o1; + return -o1.compareTo(o2); } };
[AbstractLazyLoadRunMap->[headMap->[subMap],copy->[Index],getByNumber->[get],load->[put,get,Index,load],subMap->[subMap],purgeCache->[Index],reset->[put,getNumberOf,Index,createReference],putAll->[put],entrySet->[baseDirInitialized],get->[get],unwrap->[get],getIdOf->[getNumberOf],createReference->[getIdOf],put->[put],removeValue->[copy,getNumberOf,removeValue],_put->[put],tailMap->[subMap]]]
Compares two integer values.
Could alternately have used `o2.compareTo(o1)`.
@@ -1347,6 +1347,8 @@ bool Game::createClient(const GameStartData &start_data) /* Skybox */ sky = new Sky(-1, m_rendering_engine, texture_src, shader_src); + if (g_settings->getBool("enable_dynamic_shadows")) + sky->setSkyBodyOrbitTilt(rangelim(g_settings->getFloat("shadow_sky_body_orbit_tilt"), 0.0f, 60.0f)); scsf->setSky(sky); skybox = NULL; // This is used/set later on in the main run loop
[No CFG could be retrieved]
Creates a single cloud object. Set window caption of the device.
i don't understand why it's done there, just do it directly in the sky constructor, it's better to understand how sky is inited
@@ -45,3 +45,12 @@ class AssignHomepageCollectionForm(forms.ModelForm): class Meta: model = SiteSettings fields = ('homepage_collection',) + + def clean(self): + cleaned_data = super().clean() + homepage_collection = cleaned_data['homepage_collection'] + if homepage_collection and not homepage_collection.is_published: + self.add_error('homepage_collection', pgettext_lazy( + 'Homepage collection assign form error', + 'Selected collection is not published')) + return cleaned_data
[CollectionForm->[save->[slugify,super,unidecode],Meta->[pgettext_lazy],__init__->[all,SeoDescriptionField,super,SeoTitleField,fields],all,pgettext_lazy,AjaxSelect2MultipleChoiceField,reverse_lazy]]
The default meta class.
I think is more suitable for a `clean_homepage_collection` functio. What's more, we could change field's queryset to get only collections that are published, and in such case this function would be unnecessary
@@ -15,10 +15,17 @@ class AccountErrorCode(Enum): INVALID = "invalid" INVALID_PASSWORD = "invalid_password" NOT_FOUND = "not_found" - NO_PERMISSION = "no_permission" PASSWORD_ENTIRELY_NUMERIC = "password_entirely_numeric" PASSWORD_TOO_COMMON = "password_too_common" PASSWORD_TOO_SHORT = "password_too_short" PASSWORD_TOO_SIMILAR = "password_too_similar" REQUIRED = "required" UNIQUE = "unique" + + +class PermissionGroupErrorCode(Enum): + ASSIGN_NON_STAFF_MEMBER = "assign_non_staff_member" + CANNOT_ADD_AND_REMOVE = "cannot_add_and_remove" + NO_PERMISSION = "no_permission" + REQUIRED = "required" + UNIQUE = "unique"
[No CFG could be retrieved]
Get the name of the resource from the config file.
This enum should be more specific as now it may be misinterpreted as `PermissionsDenied` exception. This enum indeed means that you cannot do something, but it's rather a validation error. I would suggest something like `OUT_OF_SCOPE_PERMISSION` which I would understand as "I cannot use this permission because it's out of my permissions scope". What do you think?
@@ -319,7 +319,10 @@ public class SecurityConfiguration extends WebSecurityConfigurerAdapter { // each scope as a GrantedAuthority, which we don't care about. if (authority instanceof OidcUserAuthority) { OidcUserAuthority oidcUserAuthority = (OidcUserAuthority) authority; - mappedAuthorities.addAll(SecurityUtils.extractAuthorityFromClaims(oidcUserAuthority.getUserInfo().getClaims())); + OidcUserInfo userInfo = oidcUserAuthority.getUserInfo(); + if (userInfo != null) { + mappedAuthorities.addAll(SecurityUtils.extractAuthorityFromClaims(userInfo.getClaims())); + } } }); return mappedAuthorities;
[No CFG could be retrieved]
Provides a converter that maps the ID Token to Spring Security Authorities. Set the claim set converter.
In which scenario you should get null over here? Isn't the UserInfo endpoint a requirement of `OIDC`?
@@ -13,7 +13,7 @@ RSpec.describe 'smoke test: SP initiated sign in' do visit_idp_from_oidc_sp sign_in_and_2fa(monitor.config.login_gov_sign_in_email) - if page.has_content?('You are now signing in for the first time') + if on_consent_screen? click_on 'Agree and continue' end
[login_gov_sign_in_email,new,let,describe,oidc_sp_is_usajobs?,it,to,have_content,before,filter_if,sign_in_and_2fa,include,has_content?,saml_sp_url,match,click_on,context,remote?,setup,filter_unless]
Describe the page that is being signed in This function is called to get the list of all the node - level errors from the SAML.
rubocop doesn't like this ... ACAB
@@ -156,7 +156,11 @@ class RestCommonMethods(object): response.charset = "utf-8" # To be able to access ret.text (ret.content are bytes) raise get_exception_from_error(response.status_code)(response.text) - result = json.loads(decode_text(response.content)) + try: # This can fail, if some proxy returns 200 and an html message + result = json.loads(decode_text(response.content)) + except Exception: + raise ConanException("Remote responded with unexpected message: %s" + % decode_text(response.content)) if not isinstance(result, dict): raise ConanException("Unexpected server response %s" % result) return result
[handle_return_deserializer->[handle_return->[inner->[get_exception_from_error]]],RestCommonMethods->[search_packages->[search_packages,get_json,server_info],auth->[JWTAuth],upload_recipe->[check_credentials],remove_conanfile->[check_credentials],get_json->[get_exception_from_error],search->[get_json,search],upload_package->[check_credentials],handle_return_deserializer],get_exception_from_error->[_base_error]]
Get a single from the given URL. Check if the local recipe has a and if so upload it.
The issue is produced because of a message that it is not a json, right? The right way to check this is to check if `application/json` header is present. I know you would think that is risky, but a server returning a json without a json header is a bug. The protocols need to be followed. If our own test's with the server fails (I hope they don't) we need to fix it in the server but I agree we cannot break old servers, in that case, narrow the exception capturing to the json decoding, please.
@@ -1530,4 +1530,10 @@ class VariantImageUnassign(BaseMutation): variant_image.delete() variant = ChannelContext(node=variant, channel_slug=None) + + with transaction.atomic(): + transaction.on_commit( + lambda: info.context.plugins.product_variant_deleted(variant) + ) + return VariantImageUnassign(product_variant=variant, image=image)
[CollectionCreate->[save->[save],Arguments->[CollectionCreateInput],perform_mutation->[CollectionCreate]],ProductImageDelete->[perform_mutation->[ProductImageDelete]],ProductImageReorder->[perform_mutation->[save,ProductImageReorder]],ProductImageCreate->[Arguments->[ProductImageCreateInput],perform_mutation->[ProductImageCreate]],VariantImageAssign->[perform_mutation->[VariantImageAssign]],CollectionReorderProducts->[perform_mutation->[CollectionReorderProducts]],ProductTypeUpdate->[Arguments->[ProductTypeInput]],CollectionDelete->[perform_mutation->[CollectionDelete]],ProductVariantReorder->[perform_mutation->[save,ProductVariantReorder]],ProductUpdate->[save->[save],Arguments->[ProductInput]],CollectionUpdate->[save->[save],Arguments->[CollectionInput]],VariantImageUnassign->[perform_mutation->[VariantImageUnassign]],ProductVariantDelete->[success_response->[save]],ProductImageUpdate->[Arguments->[ProductImageUpdateInput],perform_mutation->[save,ProductImageUpdate]],CategoryCreate->[save->[save],Arguments->[CategoryInput]],ProductVariantSetDefault->[perform_mutation->[save,ProductVariantSetDefault]],ProductVariantUpdate->[Arguments->[ProductVariantInput]],CollectionAddProducts->[perform_mutation->[CollectionAddProducts]],CategoryUpdate->[Arguments->[CategoryInput]],ProductCreate->[save->[save],Arguments->[ProductCreateInput],clean_attributes->[clean_input],clean_input->[clean_attributes]],ProductTypeCreate->[Arguments->[ProductTypeInput]],ProductVariantCreate->[save->[save],Arguments->[ProductVariantCreateInput],clean_attributes->[clean_input],clean_input->[validate_duplicated_attribute_values,clean_attributes]],CollectionRemoveProducts->[perform_mutation->[CollectionRemoveProducts]]]
Perform a mutation on a product variant.
The same situation as above, IMO `product_variant_deleted` shouldn't be called for `VariantImageUnassign` mutation. Rather, `product_variant_updated`.
@@ -2189,6 +2189,10 @@ namespace System.Windows.Forms { inheritedCellStyleTmp.ForeColor = columnStyle.ForeColor; } + else if ((this is DataGridViewButtonCell || this is DataGridViewCheckBoxCell) && !DataGridView.IsDefaultCellStyleChanged) + { + inheritedCellStyleTmp.ForeColor = DataGridView.ForeColor; + } else { inheritedCellStyleTmp.ForeColor = dataGridViewStyle.ForeColor;
[DataGridViewCell->[GetContrastedWindowsPens->[ColorDistance],MouseDoubleClickUnsharesRowInternal->[MouseDoubleClickUnsharesRow],ContentDoubleClickUnsharesRowInternal->[ContentDoubleClickUnsharesRow],OnMouseUpInternal->[OnMouseUp],PaintBorder->[GetContrastedPens,GetContrastedWindowsPens],PaintWork->[GetEditedFormattedValue,Paint,GetValue,GetErrorText],MouseLeaveUnsharesRowInternal->[MouseLeaveUnsharesRow],OnMouseEnterInternal->[OnMouseEnter],TextFitsInBounds->[MeasureTextHeight],EnterUnsharesRowInternal->[EnterUnsharesRow],GetToolTipText->[GetInternalToolTipText,GetDefaultToolTipText],OnContentDoubleClickInternal->[OnContentDoubleClick],MeasureTextHeight->[MeasureTextHeight],MouseDownUnsharesRowInternal->[MouseDownUnsharesRow],ToString->[ToString],OnCellErrorAreaMouseEnterInternal->[GetErrorText],OnEnterInternal->[OnEnter],OnMouseMoveInternal->[OnCellDataAreaMouseEnterInternal,OnMouseMove,OnCellDataAreaMouseLeaveInternal,OnCellErrorAreaMouseLeaveInternal,OnCellErrorAreaMouseEnterInternal],GetEditedFormattedValue->[GetEditedFormattedValue],KeyDownUnsharesRowInternal->[KeyDownUnsharesRow],OnClickInternal->[OnClick],GetValue->[GetValue],OnContentClickInternal->[OnContentClick],MouseUpUnsharesRowInternal->[MouseUpUnsharesRow],GetFormattedValue->[GetFormattedValue],MouseClickUnsharesRowInternal->[MouseClickUnsharesRow],MouseEnterUnsharesRowInternal->[MouseEnterUnsharesRow],Rectangle->[Size],GetContrastedPens->[ColorDistance],SetValue->[GetValue],OnDoubleClickInternal->[OnDoubleClick],DataGridViewCellAccessibleObject->[AccessibleObject->[GetChildCount],SetFocus->[SetFocus],IsPatternSupported->[IsPatternSupported],GetPropertyValue->[GetPropertyValue],ToString,ParseFormattedValue,GetFormattedValue],OnMouseClickInternal->[OnMouseClick],PaintInternal->[Paint],OnMouseLeaveInternal->[OnCellErrorAreaMouseLeaveInternal,OnMouseLeave,OnCellDataAreaMouseLeaveInternal],LeaveUnsharesRowInternal->[LeaveUnsharesRow],GetClipboardContentInternal->[GetClipboardContent],GetValueInternal->[GetValue],MouseMoveUnsharesRowInternal->[MouseMoveUnsharesRow],GetHeight->[GetHeight],KeyUpUnsharesRowInternal->[KeyUpUnsharesRow],OnKeyUpInternal->[OnKeyUp],GetClipboardContent->[FormatPlainText,FormatPlainTextAsHtml],OnLeaveInternal->[OnLeave],SetValueInternal->[SetValue],OnMouseDownInternal->[OnMouseDown],PositionEditingControl->[Size],ContentClickUnsharesRowInternal->[ContentClickUnsharesRow],KeyPressUnsharesRowInternal->[KeyPressUnsharesRow],ContextMenuStrip->[ContextMenuStrip],OnCellDataAreaMouseEnterInternal->[GetPreferredWidth,GetValue,GetToolTipText,GetPreferredHeight,GetPreferredTextHeight,GetEditedFormattedValue],Clone->[CloneInternal],Bitmap->[Dispose],OnMouseDoubleClickInternal->[OnMouseDoubleClick],TruncateToolTipText->[ToString],Size->[GetHeight],ClickUnsharesRowInternal->[ClickUnsharesRow],OnDataGridViewChanged->[OnDataGridViewChanged],PaintErrorIcon->[PaintErrorIcon],DoubleClickUnsharesRowInternal->[DoubleClickUnsharesRow],IsBeingTabbedTo->[IsBeingTabbedTo],Dispose->[Dispose]]]
This method returns the style of the cell with the specified row and column index. This method is used to determine which back color and fore color are used for the row and This method is called to set the selection back color and fore color of the cell.
This seems to be breaking how dataGridViewStyle works - i should be applied to all cells unless the column or the row overrides the style. Is it possible to adjust ForeColor property on DefaultCellStyle instead?
@@ -23,7 +23,7 @@ <br> <br> <div class="sm-col sm-col-2 padding-right-2"> - <%= image_tag(asset_url('alert/icon-lock-alert-important.svg'), alt: '', size: '80') %> + <%= image_tag(asset_url('alert/icon-lock-alert-important.svg'), alt: 'Lock Icon with red exclamation point', size: '80') %> </div> <div class="sm-col sm-col-10"> <div class="bold margin-bottom-0">
[No CFG could be retrieved]
Displays a list of all personal key locks. The default key - page - controller is the clipboard.
We should make sure this is translated based on the current locale.
@@ -1230,7 +1230,7 @@ btr_root_resize(struct btr_context *tcx, struct btr_trace *trace, return rc; } trace->tr_node = root->tr_node = nd_off; - memcpy(btr_off2ptr(tcx, nd_off), nd, old_size); + umem_tx_memcpy(btr_umm(tcx), btr_off2ptr(tcx, nd_off), nd, old_size); /* NB: Both of the following routines can fail but neither presently * returns an error code. For now, ignore this fact. DAOS-2577 */
[dbtree_lookup->[dbtree_fetch],btr_probe_rc->[btr_probe_valid],dbtree_iterate->[dbtree_iter_next,dbtree_iter_prev,dbtree_iter_finish,dbtree_iter_probe,dbtree_iter_prepare,dbtree_iter_fetch],int->[btr_root_grow,btr_root_start],dbtree_iter_prepare->[btr_context_addref],dbtree_open_inplace->[dbtree_open_inplace_ex]]
This function is called by the trace allocator when a trace is being resized.
(style) 'nd' may be misspelled - perhaps 'and'? (style) 'nd' may be misspelled - perhaps 'and'?
@@ -128,8 +128,10 @@ void GenericSmallStrainIsotropicPlasticity<TConstLawIntegratorType>::CalculateMa if( r_constitutive_law_options.Is( ConstitutiveLaw::U_P_LAW ) ) { predictive_stress_vector = rValues.GetStressVector(); } else { - // S0 = r_constitutive_matrix:(E-Ep) - predictive_stress_vector = prod(r_constitutive_matrix, r_strain_vector - plastic_strain); + // S0 = Elastic stress with strain (E-Ep) + Vector aux_stress = ZeroVector(VoigtSize); + BaseType::CalculatePK2Stress(r_strain_vector - plastic_strain, aux_stress, rValues); + noalias(predictive_stress_vector) = aux_stress; } // Initialize Plastic Parameters
[No CFG could be retrieved]
This function calculates the from the given r - values and the corresponding indices. - - - - - - - - - - - - - - - - - -.
why the case of U_P_LAW is different?
@@ -122,8 +122,8 @@ public class GameRunner { {TRIPLEA_GAME_PROPERTY, TRIPLEA_SERVER_PROPERTY, TRIPLEA_CLIENT_PROPERTY, TRIPLEA_HOST_PROPERTY, TRIPLEA_PORT_PROPERTY, TRIPLEA_NAME_PROPERTY, TRIPLEA_SERVER_PASSWORD_PROPERTY, TRIPLEA_STARTED, TRIPLEA_LOBBY_PORT_PROPERTY, - LOBBY_HOST, LOBBY_GAME_COMMENTS, LOBBY_GAME_HOSTED_BY, TRIPLEA_ENGINE_VERSION_BIN, HttpProxy.PROXY_HOST, - HttpProxy.PROXY_PORT, TRIPLEA_DO_NOT_CHECK_FOR_UPDATES, MAP_FOLDER}; + LOBBY_HOST, LOBBY_GAME_COMMENTS, LOBBY_GAME_HOSTED_BY, TRIPLEA_ENGINE_VERSION_BIN, + TRIPLEA_DO_NOT_CHECK_FOR_UPDATES, MAP_FOLDER}; /**
[GameRunner->[getChat->[getChat],joinGame->[showConfirmDialog,joinGame,showMessageDialog],setServerObserverJoinWaitTime->[getServerObserverJoinWaitTime],showConfirmDialog->[showConfirmDialog],Title->[of->[Title]],setServerStartGameSyncWaitTime->[getServerStartGameSyncWaitTime],checkForLatestEngineVersionOut->[showMessageDialog],quitGame->[dispatchEvent],clientLeftGame->[showMainFrame],setupLogging->[dispatchEvent->[dispatchEvent]],hasChat->[getChat],showMessageDialog->[showMessageDialog]]]
The main method of the game client. Reads the command line arguments and checks if the command line arguments are valid. If the command.
We do not run bots or lobby with a proxy arg, given it can be updated via UI, we really do not need command line args for it. Simplifying here by removing them. Proxy configuration was stored in the engine preferences window, to move this to the settings window required for me to go through the proxy code. There are some further updates/simplfications to the proxy code
@@ -33,6 +33,10 @@ class RAneufinder(RPackage): depends_on('r-ggplot2', type=('build', 'run')) depends_on('r-reshape2', type=('build', 'run')) depends_on('r-ggdendro', type=('build', 'run')) + depends_on('r-ggrepel', type=('build', 'run')) depends_on('r-reordercluster', type=('build', 'run')) depends_on('r-mclust', type=('build', 'run')) - depends_on('r-ggrepel', type=('build', 'run')) + + depends_on('r-ecp', when='@1.8.0:', type=('build', 'run')) + + depends_on('r@3.5:', when='@1.10.2:', type=('build', 'run'))
[RAneufinder->[depends_on,version]]
requires_on method for all build - specific functions.
It doesn't look like `r-biocgenerics` is a dependency.
@@ -28,7 +28,7 @@ export function truncateAddress(address = '') { + address.substr(address.length - backChars) } -export function abbreviateName(party, defaultName = '') { +export function abbreviateName(party = {}, defaultName = '') { const { profile = {}, fullName } = party const { firstName = '', lastName = '' } = profile const lastNameLetter = lastName.length ? `${lastName.charAt(0)}.` : ''
[No CFG could be retrieved]
Abbreviate name of .
In case the user was not passed by the conversation component
@@ -139,8 +139,8 @@ static int load_setup_config(struct comp_dev *dev, void *cfg, uint32_t size) (uintptr_t)cfg, size); ret = -EINVAL; goto end; - } else if (size <= sizeof(struct ca_config)) { - comp_err(dev, "load_setup_config(): no codec config available."); + } else if (size < sizeof(struct ca_config)) { + comp_err(dev, "load_setup_config(): no codec config available, size %d", size); ret = -EIO; goto end; }
[No CFG could be retrieved]
This function loads the configuration data from the specified device. Load the codec specific part of the config file and return the number of the component.
Commit message "dummy" -> "passthrough".
@@ -206,9 +206,10 @@ Rails.application.routes.draw do match '/manage/phone/:id' => 'users/edit_phone#update', via: %i[patch put] delete '/manage/phone/:id' => 'users/edit_phone#destroy' get '/manage/personal_key' => 'users/personal_keys#show', as: :manage_personal_key - post '/account/personal_key' => 'users/personal_keys#create', as: :create_new_personal_key post '/manage/personal_key' => 'users/personal_keys#update' + post '/account/personal_key' => 'accounts/personal_keys#create', as: :create_new_personal_key + get '/otp/send' => 'users/two_factor_authentication#send_code' get '/two_factor_options' => 'users/two_factor_authentication_setup#index' patch '/two_factor_options' => 'users/two_factor_authentication_setup#create'
[draw,namespace,root,scope,enable_gpo_verification?,redirect,get,enable_test_routes,post,devise_scope,join,patch,devise_for,each,match,put,delete]
Route to the users. This gem is a list of all the possible backup code management gem names.
the fact that there were two separate prefixes `/manage` and `/account` was a hint to me that some things could be separated
@@ -29,7 +29,7 @@ namespace NServiceBus /// <summary> /// Configures transport for receiving. /// </summary> - protected internal override void ConfigureForReceiving(TransportReceivingConfigurationContext context) + protected internal override TransportReceivingConfigurationResult ConfigureForReceiving(TransportReceivingConfigurationContext context) { new CheckMachineNameForComplianceWithDtcLimitation().Check();
[MsmqTransport->[TransactionSupport->[Distributed],ReceiveStrategy->[Distributed,None],ConfigureForSending->[SetDispatcherFactory,RetrieveSettings,Empty,Check,TryGet],ConfigureForReceiving->[IsolationLevel,RetrieveSettings,SetQueueCreatorFactory,ConnectionString,SelectReceiveStrategy,SetMessagePumpFactory,Check,Settings,TransactionTimeout],ToTransportAddress->[TransportDiscriminator,MachineName,UserDiscriminator,Append,Qualifier,ToString],GetDiscriminatorForThisEndpointInstance->[MachineName],OutboundRoutingPolicy->[DirectSend]]]
Configure for receiving.
Why not just change it to? `TransportReceivingConfigurationContext ConfigureForReceiving();`
@@ -25,6 +25,16 @@ final class DebugMenu extends JMenu { .setMnemonic(KeyEvent.VK_X); } + players.stream() + .filter(PlayerDebug.class::isInstance) + .forEach( + player -> { + final JMenu playerDebugMenu = new JMenu(player.getName()); + add(playerDebugMenu); + + ((PlayerDebug) player).addDebugMenuItems(frame, playerDebugMenu::add); + }); + add(SwingAction.of("Show Console", () -> ClientSetting.showConsole.setValueAndFlush(true))) .setMnemonic(KeyEvent.VK_C); }
[DebugMenu->[anyMatch,getLocalPlayers,initialize,setMnemonic]]
Add the menu items to the menu.
I think we should prefer to avoid using class types to infer properties of an object. It can get odd in terms of nesting interfaces. Second, the menus suffer from a lot of side effect calls with methods like "addMenu(menu)", this makes code a lot harder to follow. What would you think about adding a method to the `Player` type, something like `List<JMenuItem> getDebugMenutItems()`. Then in this method, when iterating over each player, all you'd need to filter on is that the debug menus items is no non-null, and then you could add the menu times. This would avoid the 'void' return method and instanceOf check.
@@ -15,7 +15,7 @@ namespace System.ComponentModel.Design /// <summary> /// <para>Describes and represents inherited properties in an inherited class.</para> /// </summary> - internal class InheritedPropertyDescriptor : PropertyDescriptor + internal sealed class InheritedPropertyDescriptor : PropertyDescriptor { private PropertyDescriptor propertyDescriptor; private object _defaultValue;
[InheritedPropertyDescriptor->[GetValue->[GetValue],ShouldSerializeValue->[GetValue,ShouldSerializeValue],CanResetValue->[CanResetValue],FillAttributes->[FillAttributes],ReadOnlyCollectionConverter->[ConvertTo->[ConvertTo]],ResetValue->[ResetValue],SetValue->[SetValue],InitInheritedDefaultValue->[ClonedDefaultValue,GetValue]]]
Creates an instance of the inherited property descriptor class.
What is the reasoning behind sealed? Are you using it whereever it's internal + does not currently have subclassing (for some possible small performance improvement)? Are we doing this generally?
@@ -143,6 +143,7 @@ function ScrollerWithRef({children, loop, restingIndex, setRestingIndex}, ref) { ref={containerRef} onScroll={handleScroll} class={`${classes.scrollContainer} ${classes.hideScrollbar} ${classes.horizontalScroll}`} + snap={String(snap)} tabindex={0} > {slides}
[No CFG could be retrieved]
Displays a single in the DOM. When the user stops scrolls the user s slides.
I made a note on JSS, but IMHO we should just translate this into a class.
@@ -21,7 +21,7 @@ public class UnbufferedBase64InputStream extends FilterInputStream { private byte[] decoded; private int pos; private final DataInputStream din; - private static final Base64.Decoder decoder = Base64.getMimeDecoder(); + private static final Base64.Decoder decoder = Base64.getDecoder(); public UnbufferedBase64InputStream(InputStream in) { super(in);
[UnbufferedBase64InputStream->[skip->[read],read->[read]]]
This class implements a base64 decoder that decodes base64 bytes without buffering. return r or n.
This fixes the change from #2958
@@ -332,6 +332,17 @@ namespace System.Windows.Forms // variable here because we hit this a lot private UICuesStates _uiCuesState; + // Stores scaled font from Dpi changed values. This is required to distinguish the Font change from + // Dpi changed events and explicit Font change/assignment. Caching Font values for each Dpi is complex. + // ToDO: Look into caching Dpi and control bounds for each Dpi to improve perf. + // https://github.com/dotnet/winforms/issues/5047 + private Font _scaledControlFont; + private FontHandleWrapper _scaledFontWrapper; + + // ContainerControls like 'PropertyGrid' scale their children when they resize. + // no explicit scaling of children required in such cases. They have specific logic. + internal bool _doNotScaleChildren; + #if DEBUG internal int LayoutSuspendCount {
[Control->[OnSystemColorsChanged->[OnSystemColorsChanged,Invalidate],UpdateRoot->[GetTopLevel],OnFontChanged->[GetAnyDisposingInHierarchy,Font,DisposeFontHandle,GetStyle,Invalidate],AccessibilityNotifyClients->[AccessibilityNotifyClients],OnParentFontChanged->[OnFontChanged],AutoValidate->[AutoValidate],OnParentBackColorChanged->[OnBackColorChanged],WmKeyChar->[ProcessKeyMessage,DefWndProc],WmWindowPosChanging->[ActiveXUpdateBounds,DefWndProc],WmMouseHover->[OnMouseHover,DefWndProc],AdjustWindowRectEx->[AdjustWindowRectEx],ScaleBitmapLogicalToDevice->[ScaleBitmapLogicalToDevice],OnDragOver->[OnDragOver],GetNeighboringToolsRectangles->[GetNeighboringToolsRectangles],CreateControl->[CreateHandle,CreateControl],WmParentNotify->[ReflectMessage,DefWndProc],ScaleFont->[DisposeFontHandle],OnParentEnabledChanged->[OnEnabledChanged,GetState],Refresh->[Invalidate],WmNotifyFormat->[ReflectMessage,DefWndProc],ResetPadding->[ResetPadding],DefWndProc->[DefWndProc],SetVisibleCore->[SelectNextIfFocused,SetState,OnVisibleChanged,GetTopLevel,CreateControl],CanShowToolTipsNow->[CanShowToolTipsNow],OnForeColorChanged->[GetAnyDisposingInHierarchy,Invalidate],OnRightToLeftChanged->[GetAnyDisposingInHierarchy],OnDragLeave->[OnDragLeave],SetAutoSizeMode->[SetAutoSizeMode],WmCtlColorControl->[InitializeDCForWmCtlColor,DefWndProc],SetAcceptDrops->[GetState],PreProcessControlState->[GetExtendedState,IsInputKey,PreProcessMessage,IsInputChar,OnPreviewKeyDown],WmPrintClient->[OnPrint],UpdateStyles->[OnStyleChanged],ProcessKeyMessage->[ProcessKeyEventArgs],OnDragDrop->[OnDragDrop],InitLayout->[InitLayout],WmPaint->[Graphics,PaintWithErrorHandling,GetStyle],OnParentRightToLeftChanged->[OnRightToLeftChanged],PerformContainerValidation->[PerformControlValidation,GetStyle,PerformContainerValidation],SelectNextControl->[Select],WmCaptureChanged->[OnMouseCaptureChanged,DefWndProc],OnVisibleChanged->[GetAnyDisposingInHierarchy,OnParentBecameInvisible,OnParentVisibleChanged,CreateControl],OnParentChanged->[OnTopMostActiveXParentChanged],ShouldPerformContainerValidation->[GetStyle],WmKillFocus->[InvokeLostFocus,DefWndProc],CreateHandle->[CreateHandle],Invoke->[Invoke],ProcessKeyPreview->[ProcessKeyPreview],WmDpiChangedAfterParent->[OnDpiChangedAfterParent,DefWndProc],SendToBack->[GetTopLevel],WmGetControlName->[MarshalStringToMessage],WmOwnerDraw->[ReflectMessage,DefWndProc],Dispose->[DestroyHandle,Dispose,ResetBindings],PrintToMetaFile_SendPrintMessage->[GetStyle],WndProc->[WmDestroy,WmGetControlType,ReflectMessage,WmNotify,WmKeyChar,WmGetControlName,WmEraseBkgnd,WmWindowPosChanging,WmClose,WmMouseUp,WmMouseHover,WmUpdateUIState,WmCommand,WmMove,WmParentNotify,WmNotifyFormat,InvokeMarshaledCallbacks,WmHelp,WmMouseMove,DefWndProc,SetState,OnNotifyMessage,GetStyle,WmCtlColorControl,WmShowWindow,WmCreate,WmMouseDown,WmPrintClient,WmPaint,WmCaptureChanged,WmMouseLeave,WmKillFocus,WmDisplayChange,WmGetObject,WmSetCursor,WmMouseWheel,WmQueryNewPalette,WmSetFocus,WmContextMenu,WmDpiChangedBeforeParent,WmDpiChangedAfterParent,WmWindowPosChanged,WmMouseEnter,WmOwnerDraw],ResumeLayout->[InitLayout,ResumeLayout,PerformLayout,OnLayoutResuming,GetState],Load->[Load],GetCaptionForTool->[GetCaptionForTool],PaintTransparentBackground->[InvokePaintBackground,Control,PaintTransparentBackground,InvokePaint,Graphics],WmMouseUp->[OnMouseDoubleClick,DefWndProc,SetState,GetStyle,OnMouseClick,OnClick,OnMouseUp,OnDoubleClick,GetState],DisposeAxControls->[DisposeAxControls],OnHandleCreated->[GetExtendedState,SetRegion,GetStyle,ListenToUserPreferenceChanged,GetTopLevel,GetState],ProcessKeyEventArgs->[OnKeyDown,OnKeyUp,OnKeyPress],WmMove->[UpdateBounds,DefWndProc],OnMove->[Invalidate],WmHelp->[OnHelpRequested,DefWndProc],HRESULT->[OnFrameWindowActivate,Load,IsInputKey,GetStyle,ProcessMnemonic,OnHelpRequested],OnParentBindingContextChanged->[OnBindingContextChanged],ScaleCore->[AssertLayoutSuspendCount,Scale,ResumeLayout],PreProcessMessage->[IsInputKey,GetExtendedState,IsInputChar],UpdateStylesCore->[Invalidate,SetState],OnTopMostActiveXParentChanged->[OnTopMostActiveXParentChanged],CanProcessMnemonic->[CanProcessMnemonic,TraceCanProcessMnemonic],WmShowWindow->[DefWndProc,SetState,OnVisibleChanged,GetState,GetTopLevel,CreateControl],ListenToUserPreferenceChanged->[GetExtendedState],WmMouseDown->[GetExtendedState,DefWndProc,SetState,OnMouseDown,GetStyle,Focus],OnPrint->[DefWndProc,GetStyle],OnResize->[Invalidate,GetState],UnhookMouseEvent->[SetState],OnBackgroundImageChanged->[GetAnyDisposingInHierarchy,Invalidate],UserPreferenceChanged->[OnSystemColorsChanged],WmMouseLeave->[OnMouseLeave,DefWndProc],Select->[Select],SetTopLevelInternal->[GetExtendedState,SetParentHandle,SetState,ListenToUserPreferenceChanged,GetTopLevel,CreateControl],OnBackgroundImageLayoutChanged->[GetAnyDisposingInHierarchy,Invalidate],WmSetFocus->[InvokeGotFocus,DefWndProc],IsHoveredWithMouse->[IsHoveredWithMouse],OnParentBackgroundImageChanged->[OnBackgroundImageChanged],Scale->[AssertLayoutSuspendCount,Scale,ResumeLayout],WmWindowPosChanged->[UpdateChildControlIndex,UpdateBounds,DefWndProc],UpdateBounds->[OnSizeChanged,OnClientSizeChanged,AdjustWindowRectEx,OnLocationChanged,UpdateBounds,GetTopLevel],WmDestroy->[DefWndProc,UnhookMouseEvent,SetState,ReleaseUiaProvider,OnHandleDestroyed,OnMouseLeave,GetState],WmGetControlType->[MarshalStringToMessage],MarshaledInvoke->[WaitForWaitHandle,InvokeMarshaledCallbacks],HookMouseEvent->[GetState],WmNotify->[ReflectMessage,DefWndProc],OnQueryContinueDrag->[OnQueryContinueDrag],ResetMouseEventArgs->[HookMouseEvent,GetState],CheckParentingCycle->[CheckParentingCycle],LogicalToDeviceUnits->[LogicalToDeviceUnits],InitializeDCForWmCtlColor->[GetStyle],OnHandleDestroyed->[GetAnyDisposingInHierarchy,ListenToUserPreferenceChanged,GetState],WmCommand->[ReflectMessage,DefWndProc],OnParentInvalidated->[Invalidate],OnSizeChanged->[OnResize],InvokeMarshaledCallbacks->[InvokeMarshaledCallback],PaintWithErrorHandling->[PaintException,GetStyle,OnPaint,Invalidate,OnPaintBackground,GetState],ScaleChildControls->[Scale],OnDragEnter->[OnDragEnter],OnChildLayoutResuming->[OnChildLayoutResuming],WmCreate->[DefWndProc,GetStyle,UpdateChildZOrder,UpdateBounds,OnHandleCreated],DrawToBitmap->[CreateHandle],OnEnabledChanged->[GetAnyDisposingInHierarchy,Invalidate,GetStyle],WmDisplayChange->[DefWndProc,Invalidate],OnInvalidated->[ActiveXViewChanged],WmGetObject->[DefWndProc],WmMouseWheel->[OnMouseWheel,DefWndProc],Font->[Font],ProcessCmdKey->[ProcessCmdKey],WmQueryNewPalette->[DefWndProc,Invalidate],ScaleControl->[Size,ScaleControl,AdjustWindowRectEx],PaintBackground->[RenderColorTransparent,PaintBackground],OnGiveFeedback->[OnGiveFeedback],WmContextMenu->[WmContextMenu,DefWndProc,Contains],PerformLayout->[GetAnyDisposingInHierarchy,GetExtendedState,OnLayout,PerformLayout,GetState],ShouldSerializeEnabled->[GetState],RecreateHandleCore->[OnParentHandleRecreated,DestroyHandle,OnParentHandleRecreating,Focus,CreateControl,GetState,CreateHandle],ChildGotFocus->[ChildGotFocus,ActiveXOnFocus],SetBounds->[Size,SetBoundsCore,InitScaling],ProcessDialogChar->[ProcessDialogChar],DestroyHandle->[DestroyHandle],OnLayout->[ActiveXViewChanged],WmEraseBkgnd->[PaintWithErrorHandling,DefWndProc,GetStyle],OnParentCursorChanged->[OnCursorChanged],WmClose->[DefWndProc],WmUpdateUIState->[OnChangeUICues,Invalidate,DefWndProc],EndUpdateInternal->[EndUpdateInternal],SetBoundsCore->[InitScaling,InitLayout,ResumeLayout,GetState],Save->[Save],SetParentHandle->[GetTopLevel,RecreateHandle],OnGotFocus->[ChildGotFocus,ActiveXOnFocus],WmMouseMove->[OnMouseMove,DefWndProc,GetStyle],SetClientSizeCore->[OnClientSizeChanged],OnPaddingChanged->[Invalidate,GetStyle],OnLostFocus->[ActiveXOnFocus],ShowsOwnToolTip->[ShowsOwnToolTip],Rectangle->[Size],OnParentBecameInvisible->[OnParentBecameInvisible],OnBackColorChanged->[GetAnyDisposingInHierarchy,Invalidate,GetState],PrintToMetaFileRecursive->[PrintToMetaFileRecursive],PerformControlValidation->[NotifyValidated,NotifyValidating],SuspendLayout->[OnLayoutSuspended],AllowsToolTip->[AllowsToolTip],AllowsChildrenToShowToolTips->[AllowsChildrenToShowToolTips],WmSetCursor->[DefWndProc],SelectNextIfFocused->[SelectNextControl],ProcessDialogKey->[ProcessDialogKey],EndInvoke->[WaitForWaitHandle],Size->[Size,LogicalToDeviceUnits,GetStyle,AdjustWindowRectEx],OnParentForeColorChanged->[OnForeColorChanged],UpdateWindowFontIfNeeded->[GetStyle],WmMouseEnter->[DefWndProc,OnMouseEnter],WmDpiChangedBeforeParent->[OnDpiChangedBeforeParent,Size,RescaleConstantsForDpi,DefWndProc],Invalidate->[Invalidate],AddReflectChild,AssertLayoutSuspendCount,Control,Size,DisposeFontHandle,GetState]]
Private methods for handling the control. The base class for the Control object.
There are multiple version of casing for `DPI`. Please pick one version and use it consistently.
@@ -44,7 +44,11 @@ class SettingsHandler extends ManagementHandler { */ function workflow($args, $request) { parent::workflow($args, $request); - TemplateManager::getManager($request)->display('management/workflow.tpl'); + + $templateMgr = TemplateManager::getManager($request); + $context = $request->getContext(); + + $templateMgr->display('management/workflow.tpl'); } /**
[SettingsHandler->[distribution->[getTemplateVars,fetch,getCSRFToken,getContext,addField,addGroup,display,url,assign,getConfig,getId,getDispatcher,getSupportedFormLocales,getPath,getRouter],workflow->[display],__construct->[addRoleAssignment]]]
Show the workflow page.
I _think_ this code is unnecessary...
@@ -951,10 +951,10 @@ function contact_block() { dbesc(implode(",", $contacts))); if (DBM::is_result($r)) { - $contacts = sprintf(tt('%d Contact','%d Contacts', $total),$total); + $contacts = sprintf(L10n::tt('%d Contact', '%d Contacts', $total), $total); $micropro = []; foreach ($r as $rr) { - $micropro[] = micropro($rr,true,'mpfriend'); + $micropro[] = micropro($rr, true, 'mpfriend'); } } }
[dlogger->[save_timestamp],get_intltext_template->[save_timestamp],get_plink->[remove_baseurl],replace_macros->[save_timestamp,template_engine,getMessage,replaceMacros],text_highlight->[factory,setRenderer,highlight],load_view_file->[save_timestamp],logger->[save_timestamp],get_markup_template->[save_timestamp,template_engine,getMessage,getTemplateFile]]
contact_block - block all contacts Contact block.
Can you please replace all the `sprintf(tt('...', '...', $arg), $arg)` constructs with `L10n::tt('...', '...', $arg)` ?
@@ -82,8 +82,9 @@ import io.confluent.ksql.util.KsqlException; import io.confluent.ksql.util.Pair; import io.confluent.ksql.util.PersistentQueryMetadata; import io.confluent.ksql.util.QueryMetadata; +import io.confluent.ksql.util.StreamsTopologyUtil; -public class KsqlEngine implements Closeable, QueryTerminator { +public class KsqlEngine implements Closeable { private static final Logger log = LoggerFactory.getLogger(KsqlEngine.class);
[KsqlEngine->[createQueries->[parseQueries,planQueries],close->[close],terminateAllQueries->[close]]]
Imports the given object. private static final String DEBUG_MESSAGE = "SQL Server.
You've removed the implementation of `QueryTerminator` but you've left the unused interface around. Futher, this changes the behaviour of the `DropStream` and `DropTable` commands in that any queries against them are not dropped. Either this is not valid or i think these commands should check that all queries have been terminated before a corresponding `DropXxxx` Command is run
@@ -272,12 +272,15 @@ def run_yara_query_rule(query_rule_pk): # Build the workflow using a group of tasks dealing with 250 files at a # time, chained to a task that marks the query as completed. chunk_size = 250 + chunked_tasks = create_chunked_tasks_signatures( + run_yara_query_rule_on_versions_chunk, list(pks), chunk_size, + task_args=(query_rule_pk,)) workflow = ( - create_chunked_tasks_signatures( - run_yara_query_rule_on_versions_chunk, list(pks), chunk_size, - task_args=(query_rule_pk,)) | + chunked_tasks | mark_yara_query_rule_as_completed_or_aborted.si(query_rule_pk) ) + log.info('Running workflow of %s tasks for run_yara_query_rule on rule %s', + (len(chunked_tasks), rule.pk)) # Fire it up. workflow.apply_async()
[run_customs->[run_scanner],_run_yara_query_rule_on_version->[_run_yara_for_path],run_wat->[run_scanner]]
Run a specific ScannerQueryRule on multiple versions.
I find this notation quite confusing actually (the `|`) but that was there before so
@@ -231,6 +231,10 @@ public class IndexTask extends AbstractBatchIndexTask implements ChatHandler throw new UOE("partitionsSpec[%s] is not supported", tuningConfig.getPartitionsSpec().getClass().getName()); } } + InputSource inputSource = getIngestionSchema().getIOConfig().getNonNullInputSource( + getIngestionSchema().getDataSchema().getParser() + ); + inputSource.validateAllowDenyPrefixList(securityConfig); return determineLockGranularityAndTryLock(taskActionClient, ingestionSchema.dataSchema.getGranularitySpec()); }
[IndexTask->[makeGroupId->[makeGroupId],getSegmentGranularity->[getSegmentGranularity],IndexIngestionSpec->[getInputFormat],getLiveReports->[doGetRowStats],generateAndPublishSegments->[getTaskCompletionReports,getType],isReady->[getType],determineShardSpecs->[getType],createShardSpecsFromInput->[getType],IndexTuningConfig->[withPartitionsSpec->[IndexTuningConfig],getNumShards->[getNumShards],getMaxTotalRows->[getMaxTotalRows],getPartitionDimensions->[getPartitionDimensions],equals->[equals],withBasePersistDirectory->[IndexTuningConfig],getMaxRowsPerSegment->[getMaxRowsPerSegment],getPartitionsSpec],makeGroupId]]
Checks if the lock is ready for the current ingestion schema.
This should be done in `ParallelIndexSupervisorTask.isReady()` as well.
@@ -14,8 +14,10 @@ class Jetpack_RelatedPosts_Module { /** * Class variables + * + * @var $instance */ - private static $__instance = null; + private static $instance = null; /** * Singleton implementation
[No CFG could be retrieved]
Provides a class that provides related posts. Load Customizer controls.
It also complains about `* Additional Search Queries: related, jetpack related posts, related posts for wordpress,` since WordPress isn't capitalized - I'm not sure if it is lower case intentionally for the `Additional Search Queries` field and if I should ignore it.
@@ -2,13 +2,13 @@ import torch from torch import nn, Tensor from torch.nn.modules.utils import _pair -from torch.jit.annotations import List +from torch.jit.annotations import List, BroadcastingList2 from ._utils import convert_boxes_to_roi_format def roi_align(input, boxes, output_size, spatial_scale=1.0, sampling_ratio=-1): - # type: (Tensor, Tensor, int, float, int) -> Tensor + # type: (Tensor, Tensor, BroadcastingList2[int], float, int) -> Tensor """ Performs Region of Interest (RoI) Align operator described in Mask R-CNN
[RoIAlign->[forward->[roi_align]],roi_align->[roi_align]]
Performs Region of Interest Align operator described in Mask R - CNN.
oh, so we indeed need to add `BroadcastingList2` here. A similar fix is needed for `roi_pool` as well I believe
@@ -83,7 +83,6 @@ func resourceAwsCodeBuildProject() *schema.Resource { }, }, }, - Set: resourceAwsCodeBuildProjectArtifactsHash, }, "cache": { Type: schema.TypeList,
[StringLenBetween,StringValueSlice,NewSet,BoolValue,CreateProject,StringInSlice,NonRetryableError,Set,Add,Itoa,FormatBool,Int64Value,MatchString,GetOk,IntAtLeast,HasChange,Sequence,Errorf,SetId,MustCompile,RetryableError,Bool,BatchGetProjects,Id,Int64,Get,StringMatch,Printf,StringValue,Sprintf,List,String,IntBetween,UpdateProject,WriteString,DeleteProject,Retry]
Artifacts is a list of artifacts that can be read from the resource. A helper to create a schema for a list of cache keys.
I don't see it in the diff, but we can probably get rid of the actual function definition as it is no longer being used.
@@ -121,6 +121,11 @@ class StructureFormMetadataLoader implements FormMetadataLoaderInterface, CacheW foreach ($this->locales as $locale) { foreach ($structuresMetadataByTypes as $structureType => $structuresMetadata) { $structure = $this->mapStructureMetadata($structuresMetadata, $locale); + + foreach ($structure->getForms() as $formMetadata) { + $this->validateFormMetadata($formMetadata); + } + $configCache = $this->getConfigCache($structureType, $locale); $configCache->write( \serialize($structure),
[StructureFormMetadataLoader->[enhanceBlockMetadata->[enhanceBlockMetadata]]]
Warm up the configuration cache.
I have no idea how to test this, because this is in the `warmUp()` method. Calling this method in a test doesn't throw an exception
@@ -414,6 +414,10 @@ func FollowTagReference(stream *ImageStream, tag string) (finalTag string, ref * return tag, &tagRef, multiple, nil } + if tagRef.From.Namespace != "" && tagRef.From.Namespace != stream.ObjectMeta.Namespace { + return tag, nil, multiple, ErrCrossImageStreamReference + } + // The reference needs to be followed with two format patterns: // a) sameis:sometag and b) sometag if strings.Contains(tagRef.From.Name, ":") {
[Exact->[NameString],String->[Exact],RegistryHostPort->[DockerClientDefaults],DaemonMinimal->[Minimal],Exact,Equal,MostSpecific]
DockerImageReferenceForStream walks through the stream and looks for a Docker image reference that is LatestImageTagEvent returns the most recent TagEvent and the tag for the specified image.
Could you fix the docstring of the error as well?
@@ -323,7 +323,7 @@ function notifier_run(&$argv, &$argc){ dbesc(NETWORK_DFRN) ); if (dbm::is_result($r)) - foreach($r as $rr) + foreach ($r as $rr) $recipients_followup[] = $rr['id']; } }
[notifier_run->[get_curl_code,set_baseurl,get_hostname]]
This function is the main function that is invoked when the notifier is invoked. It is the This function is used to fetch a single record from the database. finds all contacts that are not self and not self This function is used to avoid race conditions with deletion of items.
Can you please add braces to the `if` and subsequent `foreach`?
@@ -56,7 +56,7 @@ import org.springframework.util.Assert; * @author Misagh Moayyed * @since 3.1 */ -public final class SimpleHttpClient implements HttpClient, Serializable, DisposableBean { +final class SimpleHttpClient implements HttpClient, Serializable, DisposableBean { /** Unique Id for serialization. */ private static final long serialVersionUID = -4949380008568071855L;
[SimpleHttpClient->[isValidEndPoint->[isValidEndPoint]]]
Creates a new SimpleHttpClient object. The codes and the http client are used to make the request.
what does removing public have to do with this pull request?
@@ -59,7 +59,11 @@ public abstract class AbstractRemoteFileOutboundGatewayParser extends AbstractCo else { builder.addConstructorArgValue(element.getAttribute("command")); if (element.hasAttribute(EXPRESSION_ATTRIBUTE)) { - builder.addConstructorArgValue(element.getAttribute(EXPRESSION_ATTRIBUTE)); + String expression = element.getAttribute(EXPRESSION_ATTRIBUTE); + if ("++xsd.expression.default++".equals(expression)) { + expression = null; + } + builder.addConstructorArgValue(expression); } } IntegrationNamespaceUtils.setValueIfAttributeDefined(builder, element, "command-options", "options");
[AbstractRemoteFileOutboundGatewayParser->[parseHandler->[getAttribute,hasAttribute,setValueIfAttributeDefined,createExpressionDefinitionFromValueOrExpression,postProcessBuilder,genericBeanDefinition,addConstructorArgReference,getTemplateClass,getGatewayClassName,parseRemoteFileTemplate,addConstructorArgValue,addPropertyValue,configureFilter,setReferenceIfAttributeDefined],registerPatternFilter->[getSimplePatternFileListFilterClassName,getName,equals,genericBeanDefinition,getBeanDefinition,addConstructorArgValue,addPropertyValue],registerRegexFilter->[getName,equals,genericBeanDefinition,getBeanDefinition,addConstructorArgValue,addPropertyValue,getRegexPatternFileListFilterClassName],registerExpressionFilter->[addPropertyValue,getBeanDefinition],configureFilter->[getAttribute,hasText,error,registerPatternFilter,registerRegexFilter,registerExpressionFilter,addPropertyReference]]]
Parses the given element and creates a BeanDefinitionBuilder for the gateway. This method is called to build the necessary information for the FileExists object.
Why just this not enough? I mean we just call `builder.addConstructorArgValue(element.getAttribute(EXPRESSION_ATTRIBUTE));`. Nothing more. What am I missing with `null` propagation and defering logic into the `AbstractRemoteFileOutboundGateway`. Probably that point is that `expression` was not optional in the past, so we left that `if (element.hasAttribute(EXPRESSION_ATTRIBUTE)) {` logic in the parser. But it turns out that we just need to always call `builder.addConstructorArgValue(expression);`, so we got the proper ctor resolution even if `expression` is `null`.
@@ -394,7 +394,8 @@ static int evp_set_default_properties(OPENSSL_CTX *libctx, return 0; } -int EVP_set_default_properties(OPENSSL_CTX *libctx, const char *propq) +int evp_set_default_properties_intern(OPENSSL_CTX *libctx, const char *propq, + int loadconfig) { OSSL_PROPERTY_LIST *pl = NULL;
[evp_generic_do_all->[ossl_algorithm_do_all],evp_generic_fetch->[libctx_descriptor,inner_evp_generic_fetch,ERR_raise,ERR_raise_data],int->[EVPerr,EVP_set_default_properties,evp_set_default_properties,ossl_namemap_name2num_n,ossl_namemap_stored,evp_method_id,strlen,ossl_method_store_add,ossl_parse_query,ossl_property_free,ossl_property_is_enabled,strchr,ossl_property_merge,get_evp_method_store,ossl_method_store_flush_cache,ossl_ctx_global_properties],uint32_t->[ossl_assert],const->[openssl_ctx_is_default,openssl_ctx_is_global_default],OSSL_METHOD_STORE->[openssl_ctx_get_data],evp_method_store_flush->[get_evp_method_store,ossl_method_store_flush_cache],EVP_set_default_properties->[evp_set_default_properties,EVPerr,ossl_parse_query],evp_names_do_all->[ossl_provider_library_context,ossl_namemap_stored,ossl_namemap_doall_names],EVP_default_properties_is_fips_enabled->[evp_default_property_is_enabled],char->[ossl_provider_library_context,ossl_namemap_stored,ossl_namemap_num2name],evp_generic_fetch_by_number->[ERR_raise_data,ossl_namemap_num2name,ossl_namemap_stored,inner_evp_generic_fetch,ERR_raise,libctx_descriptor],EVP_default_properties_enable_fips->[evp_default_properties_merge],void->[ossl_provider_library_context,ossl_namemap_name2num,ossl_method_store_cache_get,ossl_namemap_stored,ossl_namemap_name2num_n,evp_method_id,strlen,ossl_method_store_fetch,ossl_method_store_new,ossl_namemap_add_names,ossl_assert,ossl_method_store_cache_set,strchr,ossl_method_construct,get_evp_method_store,ossl_method_store_free],evp_is_a->[ossl_provider_library_context,ossl_namemap_stored,ossl_namemap_name2num]]
This method is called from OpenSSL library code to set the default properties of the EVP.
It is really confusing to have evp_set_default_properties EVP_set_default_properties and evp_set_default_properties_intern. Perhaps rename evp_set_default_properties to evp_set_parsed_default_properties and (optionally) evp_set_default_properties_intern to evp_set_default_properties.
@@ -1015,6 +1015,18 @@ define([ * @exception {RuntimeError} image with id must be in the atlas. */ BillboardCollection.prototype.update = function(context, frameState, commandList) { + var startTime = getTimestamp(); + var timeSlice = this._clampTimeSlice; + var endTime = startTime + timeSlice; + + var clampList = this._clampToTerrainList; + while (clampList.length > 0) { + Billboard._clampPosition(clampList.shift(), frameState.mode, frameState.mapProjection); + if (getTimestamp() >= endTime) { + break; + } + } + removeBillboards(this); var billboards = this._billboards;
[No CFG could be retrieved]
Adds a command to the collection of billboards. This method is called when a vertex is not provided by a texture atlas.
For later as we discussed offline: #2655
@@ -336,7 +336,7 @@ class spell_pal_ardent_defender : public SpellScriptLoader // Max heal when defense skill denies critical hits from raid bosses // Formula: max defense at level + 140 (raiting from gear) uint32 reqDefForMaxHeal = victim->getLevel() * 5 + 140; - float pctFromDefense = (defenseSkillValue - victim->getLevel()*5.0f) / 140.0f; + float pctFromDefense = (defenseSkillValue = victim->getLevel()*5.0f) / 140.0f; if (pctFromDefense < 0.0f) pctFromDefense = 0.0f; else if (pctFromDefense > 1.0f)
[No CFG could be retrieved]
The AuraEffect is a player and it is a player. Register the aura script that is responsible for a specific aura.
to be honest I don't get the aim of this. Why are you assigning `victim->getLevel()*5.0f` to `defenseSkillValue` if this variable will not be used anywhere ?
@@ -133,6 +133,7 @@ class OverflowMenu extends PureComponent<Props, State> { <RaiseHandButton { ...buttonProps } /> <LobbyModeButton { ...buttonProps } /> <ScreenSharingButton { ...buttonProps } /> + <MuteEveryoneButton { ...buttonProps } /> <MoreOptionsButton { ...moreOptionsButtonProps } /> <Collapsible collapsed = { !showMore }> <ToggleCameraButton { ...buttonProps } />
[No CFG could be retrieved]
A component that renders a menu of buttons. Renders the menu toggle in the bottom sheet header area.
Let's put it inside the collapsible section please.
@@ -111,6 +111,7 @@ public class PerfsToBigQueryTest { expectedRows.add(row1); TableRow row2 = new TableRow() + .set("timestamp", start.getMillis()) .set("runtimeSec", nexmarkPerf2.runtimeSec) .set("eventsPerSec", nexmarkPerf2.eventsPerSec) // when read using TableRowJsonCoder the row field is boxed into an Integer, cast it to int
[PerfsToBigQueryTest->[before->[setStreaming,getBigQueryDataset,setBigQueryTable,createDataset,setTempLocation,setProject,setUp,setResourceNameMode,setBigQueryDataset,setRunner,getAbsolutePath,as,getProject],testSavePerfsToBigQuery->[getAllRows,getTableId,size,NexmarkConfiguration,toArray,tableSpec,NexmarkPerf,containsInAnyOrder,set,savePerfsToBigQuery,add,assertThat,assertEquals,put,valueOf,getBigQueryDataset,getProject],FakeDatasetService,withJobService,TemporaryFolder,FakeJobService]]
Simulates the save of the given nexmark configuration and the rows in the BigQuery table when read using TableRowJsonCoder the row field is boxed into an Integer and.
`TableRow.set()` accepts `Object` so I'm a little bit afraid of runtime errors in case it doesn't understand long. :) Is BigQuery able to convert long to TIMESTAMP when we pass `start.getMillis()` (long value) in a table row?
@@ -247,8 +247,9 @@ public class LocalStreamManagerImpl<K, V> implements LocalStreamManager<K> { operation.setSupplier(() -> getStream(cacheEntrySet, parallelStream, segments, keysToInclude, keysToExclude)); operation.handleInjection(registry); R value = operation.performOperation(); - rpc.invokeRemotely(Collections.singleton(origin), factory.buildStreamResponseCommand(requestId, true, - Collections.emptySet(), value), rpc.getDefaultRpcOptions(true)); + CompletableFuture<Map<Address, Response>> completableFuture = rpc.invokeRemotelyAsync(Collections.singleton(origin), + factory.buildStreamResponseCommand(requestId, true, Collections.emptySet(), value), rpc.getDefaultRpcOptions(true)); + handleResponseError(completableFuture, requestId, origin); } @Override
[LocalStreamManagerImpl->[dataRehashed->[lostSegments],getRehashStream->[lostSegments,localSegments,getStream],streamOperationRehashAware->[getRehashStream,SegmentListener],streamOperation->[getStream]]]
Streams the given operation on the given segments. This method is called when a change request is received. It is called by the cache manager.
If `trace` is not on, you could even use `RpcManager.sendTo`. Not sure about the elegance of non-trivial differences in `if (trace) { ... }`
@@ -617,9 +617,8 @@ func apiLegacyV1(all map[string]rest.Storage) *genericapiserver.APIGroupInfo { VersionedResourcesStorageMap: map[string]map[string]rest.Storage{}, Scheme: kapi.Scheme, // version.ParameterCodec = runtime.NewParameterCodec(kapi.Scheme) - ParameterCodec: kapi.ParameterCodec, - NegotiatedSerializer: kapi.Codecs, - SubresourceGroupVersionKind: map[string]schema.GroupVersionKind{}, + ParameterCodec: kapi.ParameterCodec, + NegotiatedSerializer: kapi.Codecs, } // TODO, just create this with lowercase names
[mutate->[mutate],withAppsAPIServer->[Complete],withQuotaAPIServer->[Complete],withTemplateAPIServer->[Complete],withUserAPIServer->[Complete],withNetworkAPIServer->[Complete],withOAuthAPIServer->[Complete],New->[mutate,New],withImageAPIServer->[Complete],withSecurityAPIServer->[Complete],withRouteAPIServer->[Complete],withProjectAPIServer->[Complete],withBuildAPIServer->[Complete],withAuthorizationAPIServer->[Complete],Complete->[Complete]]
apiLegacyV1 returns the resources and codec for the given object. Route adds a route to the given mux.
SubresourceGroupVersionKind for scale is not needed anymore?
@@ -38,6 +38,5 @@ class ProductFieldEnum(graphene.Enum): CHARGE_TAXES = "charge taxes" PRODUCT_IMAGES = "product images" VARIANT_SKU = "variant sku" - COST_PRICE = "cost price" VARIANT_WEIGHT = "variant weight" VARIANT_IMAGES = "variant images"
[ExportScope->[description->[ValueError]],to_enum]
Get the names of the properties of the neccessary resource.
Are we removing the ability to export cost price?
@@ -506,6 +506,16 @@ class DependencyVisitor(TraverserVisitor): # Helpers + def add_type_alias_deps(self, o: DepNode) -> None: + """Add dependencies from type aliases to the current target.""" + if o in self.alias_deps: + for alias in self.alias_deps[o]: + self.add_dependency(make_trigger(alias)) + if isinstance(o, FuncItem): + self.add_dependency(make_trigger(alias), make_trigger(o.fullname())) + elif isinstance(o, ClassDef): + self.add_dependency(make_trigger(alias), make_trigger(o.fullname)) + def add_dependency(self, trigger: str, target: Optional[str] = None) -> None: """Add dependency from trigger to a target.
[dump_all_dependencies->[get_dependencies],DependencyVisitor->[visit_member_expr->[process_global_ref_expr],add_attribute_dependency->[add_dependency],add_attribute_dependency_for_expr->[add_attribute_dependency],add_iter_dependency->[add_attribute_dependency],visit_for_stmt->[process_lvalue],add_operator_method_dependency_for_type->[add_operator_method_dependency_for_type],visit_operator_assignment_stmt->[process_lvalue],add_type_dependencies->[add_dependency],process_lvalue->[process_lvalue],visit_name_expr->[process_global_ref_expr],attribute_triggers->[attribute_triggers]],TypeTriggersVisitor->[visit_union_type->[get_type_triggers],visit_callable_type->[get_type_triggers],visit_typeddict_type->[get_type_triggers],visit_instance->[get_type_triggers],visit_tuple_type->[get_type_triggers],visit_type_type->[get_type_triggers],visit_overloaded->[get_type_triggers]]]
Add a dependency from trigger to a target. .
This doesn't look right -- `o.fullname()` might not be a valid target (e.g., it could be a nested function, which is not a separate target from the enclosing function).
@@ -118,9 +118,10 @@ public abstract class AbstractSiteToSiteReportingTask extends AbstractReportingT return properties; } - @OnScheduled - public void setup(final ConfigurationContext context) throws IOException { - siteToSiteClient = SiteToSiteUtils.getClient(context, getLogger()); + public void setup(final ReportingContext reportContext) throws IOException { + if (siteToSiteClient != null) { + siteToSiteClient = SiteToSiteUtils.getClient(reportContext, getLogger()); + } } @OnStopped
[AbstractSiteToSiteReportingTask->[JsonRecordReader->[getRawNodeValue->[getRawNodeValue],close->[close],convertField->[convertJsonNodeToRecord,convertField]]]]
Gets the list of supported property descriptors.
I believe this is a negative-logic error, as the real `siteToSiteClient` never gets set. If I run this with a `SiteToSiteProvenanceReportingTask`, then as soon as a provenance event is generated and the reporting task runs, it fails with a NullPointerException. The unit tests did not catch it because the `MockSiteToSiteProvenanceReportingTask.getClient()` method does not use this one. For consistency of behavior, I'm thinking with the new `setup()` method in `AbstractSiteToSiteReportingTask` (as well as the `getClient()` method for testing, `MockSiteToSiteProvenanceReportingTask` should not override `getClient()` but instead override `setup()` and create the mock there. That still won't catch the above logic error, but that will be hard to do with a unit test anyway as that method is trying to create a real client. We could include the same logic in the mocked `setup()` method though, to check if the client is null and create it if it is.
@@ -376,6 +376,7 @@ public class WindmillStateCacheTest { cache.forComputation("comp1").forKey(computationKey("comp1", "key1", SHARDING_KEY), 0L, 0L); WindmillStateCache.ForKeyAndFamily family1 = keyCache.forFamily("family1"); WindmillStateCache.ForKeyAndFamily family2 = keyCache.forFamily("family2"); + @SuppressWarnings("unused") WindmillStateCache.ForKeyAndFamily family3 = keyCache.forFamily("family3"); TestState state1 = new TestState("g1");
[WindmillStateCacheTest->[testBasic->[TestStateTag,windowNamespace,triggerNamespace,TestState],testStaleWorkItem->[TestStateTag,windowNamespace,TestState],TestState->[equals->[equals]],testMultipleShardsOfKey->[TestStateTag,TestState],TestStateTag->[equals->[equals]],testEviction->[TestStateTag,windowNamespace,triggerNamespace,TestState],testExplicitInvalidation->[TestStateTag,TestState],testMultipleKeys->[TestStateTag,TestState],testMultipleFamilies->[TestStateTag,computationKey,TestState],testInvalidation->[TestStateTag,TestState],testBadCoderEquality->[TestStateTagWithBadEquality,TestState]]]
Test if there are multiple families with the same key.
Can this be removed?
@@ -40,15 +40,3 @@ func Convert_quota_ResourceQuotasStatusByNamespace_To_v1_ResourceQuotasStatusByN return nil } - -func addConversionFuncs(scheme *runtime.Scheme) { - err := scheme.AddConversionFuncs( - Convert_quota_ResourceQuotasStatusByNamespace_To_v1_ResourceQuotasStatusByNamespace, - Convert_v1_ResourceQuotasStatusByNamespace_To_quota_ResourceQuotasStatusByNamespace, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - -}
[AddConversionFuncs,Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus,Next,Front,OrderedKeys,Insert,Get,Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus]
Adds conversion functions to the scheme if they are malformed.
this being unused seems like a bug, doesn't it?
@@ -24,10 +24,15 @@ class NodeModule(NodePackage): # tasks. The reality is likely to be though that both pants will never cover all cases, and a # back door to execute new tools during development will be desirable and supporting conversion # of pre-existing package.json files as node_module targets will require this. + package_manager = 'yarnpkg' if package_manager == 'yarn' else package_manager + if package_manager and package_manager not in ['npm', 'yarnpkg']: + raise RuntimeError('Unknown package manager: %s' % package_manager) payload = payload or Payload() payload.add_fields({ 'sources': self.create_sources_field(sources=sources, sources_rel_path=address.spec_path, key_arg='sources'), + 'package_manager': PrimitiveField(package_manager), }) + logger.debug('NodeModule payload: %s', payload.fields) super(NodeModule, self).__init__(address=address, payload=payload, **kwargs)
[NodeModule->[__init__->[super,create_sources_field,Payload,add_fields]]]
Initialize a node module with a sequence of sources.
xx: Supporting multiple names here doesn't seem like a good idea... just failing if it isn't what you wanted would be better.
@@ -103,6 +103,10 @@ LOGGING['loggers'].update({ 'z.pool': {'level': logging.ERROR}, }) +# Update the logger name used for mozlog +LOGGING['formatters']['json']['logger_name'] = 'http_app_addons_stage' + + # This is used for `django-cache-machine` REDIS_BACKEND = env('REDIS_BACKENDS_CACHE')
[email_url,env,bool,db,LOGGING,get_redis_settings,lazy,dict,join,items,lower,cache]
This function is used to setup the database and cache for the given n - tuple. Config for the .
I expect you did it deliberately but calling out this is a change - previously stage didn't override the name so used the same as prod used.
@@ -19,8 +19,11 @@ def test_dataframe_correct_columns(result): def test_dataframe_correct_length(result): - # no idea what this check truly does - should we just remove it? - assert len(result.index) == 14397 + # FIX: no idea what this check truly does - should we just remove it? + # magic needs to change everytime testdata/* is updated. + # Perhaps change result fixture to use BTC_UNITEST instead? + magic = 14397 + assert len(result.index) == magic def test_populates_buy_trend(result):
[test_get_signal_handles_exceptions->[get_signal,Exception,MagicMock,patch],test_returns_latest_buy_signal->[get_signal,DataFrame,patch,utcnow,MagicMock],test_parse_ticker_dataframe->[tolist,parse_ticker_dataframe],test_dataframe_correct_length->[len],test_populates_sell_trend->[populate_sell_trend,Strategy,populate_indicators],test_returns_latest_sell_signal->[get_signal,DataFrame,patch,utcnow,MagicMock],test_get_signal_empty_dataframe->[int,get_signal,log_has,set_level,DataFrame,patch],test_get_signal_exception_valueerror->[int,get_signal,log_has,set_level,ValueError,patch],test_populates_buy_trend->[populate_buy_trend,Strategy,populate_indicators],test_dataframe_correct_columns->[tolist],test_get_signal_old_dataframe->[int,get_signal,log_has,set_level,DataFrame,timedelta,patch,utcnow],test_get_signal_empty->[int,get_signal,log_has,set_level,patch]]
Test dataframe to ensure that the dataframe is correct length.
this definitely should use BTC_UNITTEST. The test is just to check the total length of the dataframe that should match the length of ticker in the json file.
@@ -57,6 +57,8 @@ class Probe { $ssl_url = "https://".$host."/.well-known/host-meta"; $url = "http://".$host."/.well-known/host-meta"; + $baseurl = "http://".$host; + $xrd_timeout = Config::get('system','xrd_timeout', 20); $redirects = 0;
[Probe->[get_feed_link->[query,loadHTMLFile],pumpio_profile_data->[item,loadHTMLFile],poll_hcard->[item,query,loadHTML]]]
This function fetches the XRD metadata from the specified host. This function is used to add the lrdd - xml and lrdd - json attributes to.
Why not directly do `self::$baseurl = "http://".$host;` here and removing the subsequent added line later?
@@ -146,8 +146,7 @@ class SimpleCache { _elgg_services()->datalist->set('simplecache_enabled', 0); _elgg_services()->config->set('simplecache_enabled', 0); - // purge simple cache - _elgg_rmdir(_elgg_services()->config->getDataPath() . "views_simplecache"); + $this->invalidate(); } }
[SimpleCache->[init->[get],registerView->[canonicalizeViewName],getRoot->[get],isEnabled->[get],invalidate->[set],enable->[invalidate,set],getUrl->[canonicalizeViewName,registerView,getRoot],disable->[getDataPath,set,get]]]
disable simple cache.
@mrclay This line and the similar one below.
@@ -436,7 +436,9 @@ func printOldNewDiffs( if diff := olds.Diff(news); diff != nil { printObjectDiff(b, *diff, planning, indent, summary, debug) } else { - printObject(b, news, planning, indent, op, true, debug) + // If there's no diff, report the op as Same - there's no diff to render + // so it should be rendered as if nothing changed. + printObject(b, news, planning, indent, deploy.OpSame, true, debug) } }
[IsObject,IsAssets,ArrayValue,NewArchiveProperty,DiffLinesToChars,Assertf,BoolValue,DiffMain,IsBool,PropertyKey,IsNull,Diff,IsOutput,Strings,Color,Itoa,NewAssetProperty,IgnoreError,New,GetAssets,ParseReference,StableKeys,Assert,Len,NumberValue,Prefix,IsComputed,TypeString,TrimSpace,ID,Failf,TypeOf,Name,IsAsset,IsText,AssetValue,GetPath,DiffCharsToLines,Split,IsString,StringValue,IsURI,IsArchive,GetURI,Sprintf,ObjectValue,IsNumber,ArchiveValue,Keys,RawPrefix,IsArray,String,WriteString,URN,MassageIfUserProgramCodeAsset]
assetOrArchiveToPropertyValue converts an asset or archive element into a property value. print prints the contents of the object.
I'm thinking it's potentially the case that `printObject` doesn't need a `StepOp` parameter anymore, but I didn't want to go too crazy refactoring this whole file for a bug fix. I'm happy to try to get rid of it if anyone wants me to.
@@ -1319,10 +1319,9 @@ class _BaseSurfaceSourceEstimate(_BaseSourceEstimate): subjects_dir : string, or None Path to SUBJECTS_DIR if it is not set in the environment. buffer_size : int - Morph data in chunks of `buffer_size` time instants. - Saves memory when morphing long time intervals. + Deprecated. Will be ignored. n_jobs : int - Number of jobs to run in parallel. + Deprecated. Will be ignored. subject_from : string Name of the original subject as named in the SUBJECTS_DIR. If None, self.subject will be used.
[SourceEstimate->[center_of_mass->[_center_of_mass],save->[_write_stc,_write_w],extract_label_time_course->[extract_label_time_course]],_make_stc->[_get_src_type],VectorSourceEstimate->[normal->[SourceEstimate],magnitude->[SourceEstimate]],_BaseSourceEstimate->[__imul__->[_verify_source_estimate_compat,_remove_kernel_sens_data_],__neg__->[_remove_kernel_sens_data_],__iadd__->[_verify_source_estimate_compat,_remove_kernel_sens_data_],data->[_remove_kernel_sens_data_],__isub__->[_verify_source_estimate_compat,_remove_kernel_sens_data_],resample->[_remove_kernel_sens_data_,resample],bin->[copy],__idiv__->[_verify_source_estimate_compat,_remove_kernel_sens_data_],mean->[mean],transform->[copy,transform_data],__abs__->[_remove_kernel_sens_data_],__ipow__->[_remove_kernel_sens_data_]],spatial_dist_connectivity->[spatio_temporal_dist_connectivity],VolSourceEstimate->[save->[_write_stc,_write_w],__init__->[__init__]],_read_w->[_read_3],_morph_sparse->[copy,_sparse_argmax_nnz_row],_gen_extract_label_time_course->[mean,_get_label_flip,sqrt],spatial_tris_connectivity->[spatio_temporal_tris_connectivity],compute_morph_matrix->[_get_subject_sphere_tris,_morph_buffer],_write_w->[_write_3],_center_of_mass->[mean,sqrt],grade_to_vertices->[sqrt],morph_data_precomputed->[SourceEstimate,VectorSourceEstimate],spatial_src_connectivity->[spatio_temporal_src_connectivity],MixedSourceEstimate->[plot_surface->[SourceEstimate],__init__->[__init__]],morph_data->[_get_subject_sphere_tris,SourceEstimate,VectorSourceEstimate],_BaseSurfaceSourceEstimate->[in_label->[_hemilabel_stc],expand->[copy,_remove_kernel_sens_data_],__init__->[__init__]],_spatio_temporal_src_connectivity_vol->[_get_vol_mask],_save_stc_as_volume->[save,_get_src_type],read_source_estimate->[_read_stc,_read_w],spatio_temporal_src_connectivity->[_spatio_temporal_src_connectivity_vol,_spatio_temporal_src_connectivity_surf],_morph_buffer->[_morph_buffer],extract_label_time_course->[_gen_extract_label_time_course]]
Morph a source estimate from one subject to another. Morph a source estimate for a single missing node.
@TommyClausner did you bench that it's not faster to allow n_jobs > 1?
@@ -898,6 +898,7 @@ public class IndexTask extends AbstractTask private static final boolean DEFAULT_GUARANTEE_ROLLUP = false; private static final boolean DEFAULT_REPORT_PARSE_EXCEPTIONS = false; private static final long DEFAULT_PUBLISH_TIMEOUT = 0; + private static final Period DEFAULT_LOCK_TIMEOUT = new Period("PT5m"); static final int DEFAULT_TARGET_PARTITION_SIZE = 5000000;
[IndexTask->[IndexTuningConfig->[withBasePersistDirectory->[IndexTuningConfig]],createNonExtendableShardSpecs->[getShardSpec->[getShardSpec]],generateAndPublishSegments->[isGuaranteedRollup,updateShardSpec,getShardSpec],determineShardSpecs->[isGuaranteedRollup],makeId,makeDataSource]]
Gets the firehoseFactory. This parameter is left for compatibility with Druid 0. 12.
Is that intended that this is a separate constant with the one in HadoopTuningConfig?
@@ -116,6 +116,7 @@ type defaultHost struct { ctx *Context // the shared context for this host. config ConfigSource // the source for provider configuration parameters. events Events // optional callbacks for plugin load events + runtimeOptions map[string]bool // options to pass to the language plugins. analyzerPlugins map[tokens.QName]*analyzerPlugin // a cache of analyzer plugins and their processes. languagePlugins map[string]*languagePlugin // a cache of language plugins and their processes. resourcePlugins map[tokens.Package]*resourcePlugin // a cache of resource plugins and their processes.
[GetRequiredPlugins->[LanguageRuntime,GetRequiredPlugins],EnsurePlugins->[LanguageRuntime,Analyzer,Provider],LanguageRuntime->[loadPlugin],SignalCancellation->[SignalCancellation],Close->[Close],Analyzer->[loadPlugin],Provider->[loadPlugin]]
The main entry point for the protocol. This is a helper function to send a message to the diagnostic log.
Are we sure that all options we'll want to support here are going to be Booleans?
@@ -72,8 +72,7 @@ public: */ ScriptApiBase::ScriptApiBase() : - m_luastackmutex(), - m_gamedef(NULL) + m_luastackmutex() { #ifdef SCRIPTAPI_LOCK_DEBUG m_lock_recursion_count = 0;
[loadScript->[getStack,ModError,luaL_loadfile,lua_pcall,c_str,PUSH_ERROR_HANDLER,lua_pop,lua_tostring],scriptError->[getStack,c_str,script_error],stackDump->[lua_gettop,lua_type,snprintf,lua_toboolean,lua_tostring,lua_typename,lua_tonumber], m_luastackmutex->[lua_atpanic,lua_setglobal,luaL_newstate,lua_pushcfunction,luaJIT_setmode,luaL_openlibs,lua_rawseti,lua_newtable,lua_pop,lua_pushlightuserdata,FATAL_ERROR_IF,lua_pushstring],realityCheck->[script_get_backtrace,lua_gettop,stackDump,LuaError], lua_close->[lua_close],luaPanic->[str,lua_tostring,FATAL_ERROR],addObjectReference->[lua_pushvalue,lua_settable,lua_getfield,lua_gettop,lua_pushnumber,lua_getglobal,luaL_checktype,getId],ModNameStorer->[ L->[c_str,lua_rawseti,lua_pushstring], lua_pushnil->[lua_pushnil,lua_rawseti]],loadMod->[loadScript],objectrefGetOrCreate->[push_objectRef,getId],removeObjectReference->[lua_settable,lua_getfield,lua_gettop,lua_pushnumber,lua_pop,luaL_checktype,lua_gettable,getId,lua_pushnil],setOriginFromTableRaw->[getStack,lua_istable,getstringfield_default],runCallbacksRaw->[getStack,scriptError,lua_getfield,lua_gettop,lua_insert,lua_pushnumber,lua_pcall,lua_remove,PUSH_ERROR_HANDLER,lua_getglobal,assert,FATAL_ERROR_IF]]
This function returns a ScriptApiBase object. This function is called from ScriptApiSecurity.
this constructor is useless you can remove it
@@ -112,7 +112,7 @@ module Verify end def initialize_idv_session - idv_session.params.merge!(profile_params) + idv_session.params = profile_params.to_h idv_session.applicant = idv_session.vendor_params end
[SessionsController->[idv_form->[new],step->[new]]]
Initialize the idv_session and add vendor and params if needed.
I guess it's safe to overwrite here because this is the first step?
@@ -42,8 +42,8 @@ if (GETPOST('action', 'alpha') == 'set') { $db->begin(); - $res = dolibarr_set_const($db, "TAKEPOS_HEADER", GETPOST('TAKEPOS_HEADER', 'alpha'), 'chaine', 0, '', $conf->entity); - $res = dolibarr_set_const($db, "TAKEPOS_FOOTER", GETPOST('TAKEPOS_FOOTER', 'alpha'), 'chaine', 0, '', $conf->entity); + $res = dolibarr_set_const($db, "TAKEPOS_HEADER", GETPOST('TAKEPOS_HEADER', 'none'), 'chaine', 0, '', $conf->entity); + $res = dolibarr_set_const($db, "TAKEPOS_FOOTER", GETPOST('TAKEPOS_FOOTER', 'none'), 'chaine', 0, '', $conf->entity); $res = dolibarr_set_const($db, "TAKEPOS_RECEIPT_NAME", GETPOST('TAKEPOS_RECEIPT_NAME', 'alpha'), 'chaine', 0, '', $conf->entity); $res = dolibarr_set_const($db, "TAKEPOS_SHOW_CUSTOMER", GETPOST('TAKEPOS_SHOW_CUSTOMER', 'alpha'), 'chaine', 0, '', $conf->entity); $res = dolibarr_set_const($db, "TAKEPOS_AUTO_PRINT_TICKETS", GETPOST('TAKEPOS_AUTO_PRINT_TICKETS', 'int'), 'int', 0, '', $conf->entity);
[textwithpicto,Create,rollback,loadLangs,trans,begin,close,commit,selectyesno]
Set page for TakePos module Set the values for the cashdesk.
'none' is dandgerous (it should be used in very special cases). Can you try instead 'restricthtml' (so html is allowed but limited).
@@ -44,14 +44,6 @@ class Git throw new \InvalidArgumentException('The source URL '.$url.' is invalid, ssh URLs should have a port number after ":".'."\n".'Use ssh://git@example.com:22/path or just git@example.com:path if you do not want to provide a password or custom port.'); } - if (!$initialClone) { - // capture username/password from URL if there is one - $this->process->execute('git remote -v', $output, $cwd); - if (preg_match('{^(?:composer|origin)\s+https?://(.+):(.+)@([^/]+)}im', $output, $match)) { - $this->io->setAuthentication($match[3], urldecode($match[1]), urldecode($match[2])); - } - } - // public github, autoswitch protocols if (preg_match('{^(?:https?|git)://'.self::getGitHubDomainsRegex($this->config).'/(.*)}', $url, $match)) { $protocols = $this->config->get('github-protocols');
[Git->[throwException->[getErrorOutput,execute],getGitHubDomainsRegex->[get],runCommand->[ask,throwException,getErrorOutput,setAuthentication,isInteractive,getAuthentication,write,askAndHideAnswer,get,authorizeOAuthInteractively,authorizeOAuth,storeAuth,hasAuthentication,removeDirectory,execute]]]
Runs a command on the remote or from a GitHub repository. Checks if a git repository can be cloned. Checks if a user has authentication and stores it in the auth cache.
Any particular reason you want to ignore existing credentials?
@@ -3911,9 +3911,9 @@ inline void Game::limitFps(FpsControl *fps_timings, f32 *dtime) else fps_timings->busy_time = 0; - u32 frametime_min = 1000 / (g_menumgr.pausesGame() - ? g_settings->getFloat("pause_fps_max") - : g_settings->getFloat("fps_max")); + u32 frametime_min = 1000 / (device->isWindowFocused() + ? g_settings->getFloat("fps_max") + : g_settings->getFloat("fps_max_unfocused")); if (fps_timings->busy_time < frametime_min) { fps_timings->sleep_time = frametime_min - fps_timings->busy_time;
[No CFG could be retrieved]
Updates the profiler graph and updates the device time based on the specified frame time. This is a private function that is called by the showOverlay method of the Game class.
Why is `g_menumgr.pausesGame()` no longer needed?
@@ -25,8 +25,14 @@ module Payments end def call - process_purchase - create_credits if success? + # we expect at least one of :stripe_token, :organization_id, or :selected_card to process + if purchase_options.compact_blank.present? + process_purchase + create_credits if success? + else + self.error = "Please select a payment method" + end + self end
[ProcessCreditPurchase->[call->[call,success?],create_credits->[find,new,size,update,current,purchaser,present?,id,insert_all],process_purchase->[error,update_user_stripe_info,create_charge,message,success,find_or_create_card],update_user_stripe_info->[nil?,id,update_column],create_charge->[id,charge],find_or_create_customer->[email,stripe_id_code,create,get],cost_per_credit->[credit_prices_in_cents],find_or_create_card->[get_source,id,create_source],initialize->[user,success,purchase_options,credits_count],attr_reader,attr_writer,attr_accessor]]
Create credits for a single node.
Since organization id is passed in the `purchase_options` hash but not used during purchase (only after purchase in `create_credits`) we need to either slice `:stripe_token, :selected_card` here or just check at least one of those are non-blank.
@@ -88,15 +88,7 @@ class SmartContentContainer implements ArrayableInterface private $stopwatch; /** - * @param ContentQueryExecutorInterface $contentQuery - * @param ContentQueryBuilderInterface $contentQueryBuilder - * @param TagManagerInterface $tagManager - * @param array $params - * @param string $webspaceKey - * @param string $languageCode - * @param string $segmentKey - * @param bool $preview - * @param Stopwatch $stopwatch + * Constructor */ public function __construct( ContentQueryExecutorInterface $contentQueryExecutor,
[SmartContentContainer->[__get->[getConfig,getData],toArray->[getConfig],getData->[getConfig]]]
Constructs a new instance of the class.
Why did you remove all this type hints?
@@ -17,7 +17,11 @@ class Cdo(AutotoolsPackage): maintainers = ['skosukhin'] - version('1.9.8', sha256='f2660ac6f8bf3fa071cf2a3a196b3ec75ad007deb3a782455e80f28680c5252a', url='https://code.mpimet.mpg.de/attachments/download/20286/cdo-1.9.8.tar.gz') + version('1.9.9.rc2', sha256='2328299c43ecd10f8283056b6a65e6f205fb64e988ce360fc2b30672e7491e66', + url='https://code.mpimet.mpg.de/attachments/download/21529/cdo-1.9.9rc2.tar.gz') + version('1.9.8', sha256='f2660ac6f8bf3fa071cf2a3a196b3ec75ad007deb3a782455e80f28680c5252a', + url='https://code.mpimet.mpg.de/attachments/download/20286/cdo-1.9.8.tar.gz', + preferred=True) version('1.9.7.1', sha256='3771952e065bcf935d43e492707370ed2a0ecb59a06bea24f9ab69d77943962c', url='https://code.mpimet.mpg.de/attachments/download/20124/cdo-1.9.7.1.tar.gz') version('1.9.6', sha256='b31474c94548d21393758caa33f35cf7f423d5dfc84562ad80a2bdcb725b5585', url='https://code.mpimet.mpg.de/attachments/download/19299/cdo-1.9.6.tar.gz')
[Cdo->[configure_args->[append,enable_or_disable,with_or_without,satisfies],variant,conflicts,depends_on,version]]
This function returns a list of all the climate and nwp model data for a given This file contains all the information about the Cdo. 2016 - 03 - 28.
`cdo` is supposed to work with `PROJ6` starting version `1.9.7` (by switching to the old API). Starting version `1.9.8` `cdo` officially supports the new API of `PROJ6`. Therefore, we don't really need the release candidate. Could you, please, remove it?
@@ -114,9 +114,10 @@ func (node *Node) StartServer(port string) { // Disable this temporarily. // node.blockchain = syncing.StartBlockSyncing(node.Consensus.GetValidatorPeers()) } - fmt.Println("going to start server on port:", port) - //node.log.Debug("Starting server", "node", node, "port", port) - node.listenOnPort(port) + p2pv2.InitHost(node.SelfPeer.Ip, port) + p2pv2.BindHandler(node.NodeHandler) + // Hang forever + <-make(chan struct{}) } func (node *Node) SetLog() *Node {
[ConnectBeaconChain->[SendMessage,SerializeNode,ConstructIdentityMessage],addPendingTransactions->[Lock,Unlock,Debug],AddPeers->[Copy,Store,AddPeers,Sprintf,Info,Load],SerializeNode->[Encode,NewEncoder,Println,Bytes],SetLog->[New],String->[String],StartServer->[listenOnPort,Println],JoinShard->[SendMessage,Debug,ConstructPingMessage,NewExpBackoff,Sleep,NewPingMessage],addCrossTxsToReturn->[Lock,Unlock,Debug],listenOnPort->[NodeHandler,Error,Listen,Accept,JoinHostPort,Close,NewExpBackoff,Sleep],getTransactionsForNewBlock->[SelectTransactionsForNewBlock,Lock,Unlock,Debug],Root,NewBlockChain,MustCommit,Error,NewMemDatabase,GetAddressFromInt,New,NewCoinbaseTX,NewBuffer,GenerateKey,PubkeyToAddress,NewGenesisBlock,NewDecoder,NewTxPool,NewFaker,Println,Decode,NewInt,CreateUTXOPoolFromGenesisBlock]
StartServer starts the server on the given port.
Why do we have to delete this? Can we keep the old version and the new version at the same time until the full p2p is done?
@@ -55,7 +55,7 @@ public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWri private long recordsWritten = 0; private long insertRecordsWritten = 0; private long recordsDeleted = 0; - private Iterator<HoodieRecord<T>> recordIterator; + private Map<String, HoodieRecord<T>> recordMap; private boolean useWriterSchema = false; public HoodieCreateHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T> hoodieTable,
[HoodieCreateHandle->[close->[close],write->[write],canWrite->[canWrite]]]
Creates a HoodieWriteHandle object. Reads the Hoodie partition metadata and creates a marker file.
need to ensure that having this be a map wont affect normal inserts.
@@ -102,11 +102,12 @@ EOT $input->getOption('repository-url'), $input->getOption('no-custom-installers'), $input->getOption('no-scripts'), + $input->getOption('no-progress'), $input->getOption('keep-vcs') ); } - public function installProject(IOInterface $io, $packageName, $directory = null, $packageVersion = null, $stability = 'stable', $preferSource = false, $preferDist = false, $installDevPackages = false, $repositoryUrl = null, $disableCustomInstallers = false, $noScripts = false, $keepVcs = false) + public function installProject(IOInterface $io, $packageName, $directory = null, $packageVersion = null, $stability = 'stable', $preferSource = false, $preferDist = false, $installDevPackages = false, $repositoryUrl = null, $disableCustomInstallers = false, $noScripts = false, $noProgress = false, $keepVcs = false) { $config = Factory::createConfig();
[CreateProjectCommand->[createDownloadManager->[createDownloadManager]]]
Installs a package. Finds a package and returns it if it is a many - node package. Checks if the project is installed and if so removes the VCS metadata if it is. returns 0 if not found.
why don't you add it in last place to preserve BC ?
@@ -203,7 +203,8 @@ class SquaredDifferenceTest(test_util.TensorFlowTestCase): @test_util.run_in_graph_and_eager_modes() def testSquaredDifference(self): - for dtype in [np.int32, np.float16]: + for dtype in [np.float16, np.float32, np.float64, + np.int32, np.int64]: x = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) y = np.array([-3, -2, -1], dtype=dtype) z = (x - y) * (x - y)
[DivAndModTest->[testTruncateModInt->[intTestData],testFloorModInt->[intTestData],testFloorModFloat->[floatTestData],testRealDiv->[floatTestData],testConsistent->[intTestData],testDivideInt->[intTestData],testTruncateModFloat->[floatTestData]]]
Test squared difference of two sets of elements.
normally when we talk about "squared difference" we are referring to the L2 metric in a vector space. Therefore, this function should compute conj(x-y)*(x-y). In other words, you need to modify the implementation as well by adding a conjugation to the first input for complex types.
@@ -267,6 +267,14 @@ public class HiveMetaStoreBasedRegister extends HiveRegister { } } + private String stringifyPartition(Partition partition) { + if (log.isDebugEnabled()) { + return partition.toString(); + } else { + return Arrays.toString(partition.getValues().toArray()); + } + } + @Override public Optional<HiveTable> getTable(String dbName, String tableName) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) {
[HiveMetaStoreBasedRegister->[createTableIfNotExists->[createTableIfNotExists],getPartition->[getPartition],getTable->[getTable],alterPartition->[getPartition],createDbIfNotExists->[createDbIfNotExists],alterTable->[getTable],close->[close]]]
Add or alter a partition.
Can't you just add `log.debug` after line 263? Or is this for skipping the Azkaban default log4j properties?
@@ -413,13 +413,9 @@ class PackageFinder(object): for location in url_locations: logger.debug('* %s', location) - formats = set(["source"]) - if self.use_wheel: - formats.add("binary") - search = Search( - project_name.lower(), - pkg_resources.safe_name(project_name).lower(), - frozenset(formats)) + canonical_name = pkg_resources.safe_name(project_name).lower() + formats = fmt_ctl_formats(self.format_control, canonical_name) + search = Search(project_name.lower(), canonical_name, formats) find_links_versions = self._package_versions( # We trust every directly linked archive in find_links (Link(url, '-f', trusted=True) for url in self.find_links),
[PackageFinder->[_sort_locations->[sort_path],_get_index_urls_locations->[mkurl_pypi_url],find_requirement->[_find_all_versions,InstallationCandidate,_sort_versions],_link_package_versions->[_log_skipped_link,InstallationCandidate],_find_all_versions->[_sort_locations,_validate_secure_origin,_get_index_urls_locations],_package_versions->[_sort_links]],Link->[splitext->[splitext],ext->[splitext]],Link]
Find all available versions of a given project name. Find packages that are not available in the system.
It didnt catch it on the previous PR but it seems strange to set `project_name.lower()` as the user supplied name (since the user obviously supplied `project_name`).
@@ -62,7 +62,11 @@ Rails.application.routes.draw do delete :remove_admin end end - resources :reactions, only: [:update] + resources :reactions do + collection do + post "update_reaction" + end + end resources :response_templates, only: %i[index new edit create update destroy] resources :chat_channels, only: %i[index create update] resources :reports, only: %i[index show], controller: "feedback_messages" do
[new,authenticate,authenticated,redirect,devise_scope,mount,draw,has_role?,resources,member,root,use,scope,post,set,controllers,require,secrets,class_eval,use_doorkeeper,resource,patch,devise_for,each,production?,delete,resolve,namespace,collection,get,session_options,tech_admin?]
The dashboard_resource controller Updates a user s identity.
Leaving a note to update this back to the previous version :)
@@ -39,13 +39,10 @@ hostname_regex = re.compile( r"^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*[a-z]+$", re.IGNORECASE) -@zope.interface.implementer(interfaces.IPlugin) -class Plugin: +class Plugin(AbstractPlugin, metaclass=ABCMeta): # pylint: disable=abstract-method """Generic plugin.""" - # provider is not inherited, subclasses must define it on their own - # @zope.interface.provider(interfaces.IPluginFactory) - def __init__(self, config, name): + def __init__(self, config, name): # pylint: disable=super-init-not-called self.config = config self.name = name
[install_version_controlled_file->[_install_current_file->[_write_current_hash],_write_current_hash,_install_current_file],dir_setup->[expanded_tempdir],Addr->[__eq__->[normalized_tuple],get_ipv6_exploded->[_normalize_ipv6]],Installer->[finalize_checkpoint->[finalize_checkpoint],rollback_checkpoints->[rollback_checkpoints],revert_temporary_config->[revert_temporary_config],recovery_routine->[recovery_routine]],Plugin->[conf->[dest],option_namespace->[option_namespace],inject_parser_options->[add->[option_namespace],add_parser_arguments],dest_namespace->[dest_namespace]]]
Initialize the object.
Is there a problem with calling `super` here? This code feels a little odd to me. I wonder if we should: 1. Call `super().__init__(config, name)` here. 2. Make `interfaces.Plugin` call `super().__init__()`. What do you think?
@@ -207,6 +207,7 @@ final class Configuration implements ConfigurationInterface ->info('Enable the tags-based cache invalidation system.') ->canBeEnabled() ->children() + ->scalarNode('purger')->defaultValue('varnish')->cannotBeEmpty()->info('The name of the purger to use.')->end() ->arrayNode('varnish_urls') ->defaultValue([]) ->prototype('scalar')->end()
[Configuration->[addExceptionToStatusSection->[end],addFormatSection->[end],getConfigTreeBuilder->[root,addExceptionToStatusSection,addFormatSection,end]]]
Returns a TreeBuilder instance for the API platform configuration. This function is used to enable or disable partial data according to serialization groups. This function is used to add some basic information to the configuration tree.
IMO, it should be directly the name of the service instead of a tricky concatenation (i.e `api_platform.http_cache.purger.$purger`). Or did I miss something specific? So, create an implementation of your new interface for Varnish whose the service name will be the default value here. WDYT?
@@ -133,7 +133,7 @@ class ConfigOptionParser(CustomOptionParser): def __init__(self, *args, **kwargs): self.name = kwargs.pop('name') self.isolated = kwargs.pop("isolated", False) - self.config = Configuration() + self.config = Configuration(self.isolated) assert self.name optparse.OptionParser.__init__(self, *args, **kwargs)
[PrettyHelpFormatter->[__init__->[__init__]],UpdatingDefaultsHelpFormatter->[expand_default->[expand_default]],ConfigOptionParser->[_update_defaults->[check_default],get_default_values->[_update_defaults],__init__->[__init__]]]
Initialize the object with name and optional parameters.
Is it still useful to store the `self.isolated` on `ConfigOptionParser` ?
@@ -350,7 +350,7 @@ function message_content(App $a) { $o .= $header; $r = q("SELECT count(*) AS `total` FROM `mail` - WHERE `mail`.`uid` = %d GROUP BY `parent-uri` ORDER BY `created` DESC", + WHERE `mail`.`uid` = %d GROUP BY `parent-uri`, `created` ORDER BY `created` DESC", intval(local_user()) );
[message_content->[set_pager_total]]
message_content - Message content This function is called when the user is about to cancel the confirmation. It deletes the message This function is called when a new message is received from a user. It will generate the This function returns a link to a private message that can be sent to a user.
Same problem, I suggest using `MAX(created) AS created` in the SELECT instead.
@@ -194,6 +194,10 @@ module.exports = JhipsterServerGenerator.extend({ this.languages = ['en', 'fr']; } + if(this.applicationType == 'gateway') { + this.skipUserManagement = true; + } + this.log(chalk.green('This is an existing project, using the configuration from your .yo-rc.json file \n' + 'to re-generate the project...\n'));
[No CFG could be retrieved]
This function is called by the server to generate a new key for the application. The serverPort parameter is a port number that can be used to run a server on a.
can we do this in `app/index.js` itself similar to how we set it for microservice
@@ -44,6 +44,8 @@ AC_CONFIG_HEADERS([zfs_config.h], [ (mv zfs_config.h zfs_config.h.tmp && awk -f ${ac_srcdir}/config/config.awk zfs_config.h.tmp >zfs_config.h && rm zfs_config.h.tmp) || exit 1]) +AC_CACHE_VAL([lt_cv_sys_lib_dlsearch_path_spec],[ + lt_cv_sys_lib_dlsearch_path_spec="\$libdir `rpm --eval '/%{_lib}'`"]) AC_PROG_INSTALL AC_PROG_CC
[No CFG could be retrieved]
END of function cDDLHeader Creates a single object of type A.
> /sbin/zdb: error while loading shared libraries: libnvpair.so.1: cannot open shared object file: No such file or directory And this of course won't work: RPATH="/lib64" is the only thing that is making ZFS binaries work where "/lib64" is not in `ld` search path, taking this away allow us to compile but then we can't run the result.
@@ -338,7 +338,7 @@ public class ClientSessionFactoryImpl implements ClientSessionFactoryInternal, C return createSessionInternal(null, null, xa, autoCommitSends, autoCommitAcks, preAcknowledge, serverLocator.getAckBatchSize()); } - // ConnectionLifeCycleListener implementation -------------------------------------------------- + // ClientConnectionLifeCycleListener implementation -------------------------------------------------- @Override public void connectionCreated(final ActiveMQComponent component,
[ClientSessionFactoryImpl->[DelegatingFailureListener->[connectionFailed->[handleConnectionFailure,connectionFailed]],finalize->[finalize,close],establishNewConnection->[DelegatingFailureListener,connect,createTransportConnection,schedulePing,addFailureListener],cleanup->[interruptConnectAndCloseAllSessions],schedulePing->[run],handleConnectionFailure->[close,handleConnectionFailure],close->[interruptConnectAndCloseAllSessions],SessionFactoryTopologyHandler->[nodeDisconnected->[CloseRunnable],notifyNodeUp->[notifyNodeUp],notifyNodeDown->[notifyNodeDown]],checkCloseConnection->[cancelScheduledTasks,close],failoverOrReconnect->[lockFailover,close,failoverOrReconnect],DelegatingBufferHandler->[bufferReceived->[bufferReceived]],createSessionInternal->[close],createTransportConnection->[openTransportConnection,close,instantiateConnectorFactory,createConnector],createConnector->[createConnector],CloseRunnable->[stop->[causeExit]],isClosed->[isClosed],callSessionFailureListeners->[callSessionFailureListeners],openTransportConnection->[close],ActualScheduledPinger->[run->[run]]]]
Create a new session with a specific sequence number.
where is the deprecation? I see this a fix, but I'm not sure why you're calling this a deprecation?
@@ -239,10 +239,8 @@ public class KeyValueBlobTransientStore implements TransientStoreProvider { BlobProvider bp = getBlobProvider(); BinaryGarbageCollector gc = bp.getBinaryManager().getGarbageCollector(); gc.start(); - keyStream().map(this::getBlobs) // + keyStream().map(this::getBlobKeys) // .flatMap(Collection::stream) - .map(ManagedBlob.class::cast) - .map(ManagedBlob::getKey) .forEach(gc::mark); gc.stop(true); // delete computeStorageSize();
[KeyValueBlobTransientStore->[atomicUpdate->[getKeyValueStore],removeCompleted->[getKeyValueStore],remove->[removeCompleted,removeBlobs,removeParameters],removeAll->[getKeyValueStore,doGC],setReleaseTTL->[jsonToList,getKeyValueStore,jsonToMap],putParameters->[putParameter],getParameters->[jsonToList,getKeyValueStore,getParameter],isCompleted->[getKeyValueStore],putParameter->[toJson,atomicUpdate,jsonToList,getKeyValueStore,markEntryExists],doGC->[computeStorageSize,getBlobProvider],exists->[getKeyValueStore],computeStorageSize->[getKeyValueStore],putBlobs->[toJson,getKeyValueStore,addStorageSize,doGC,getStorageSize,getBlobProvider,markEntryExists],keyStream->[getKeyValueStore],setCompleted->[getKeyValueStore],release->[doGC,getStorageSize],getSize->[getKeyValueStore,jsonToMap],getStorageSize->[getKeyValueStore],markEntryExists->[getKeyValueStore],getBlobs->[getBlobProvider,getKeyValueStore,jsonToMap],getParameter->[getKeyValueStore],getKeyValueStore->[getKeyValueStore],removeBlobs->[addStorageSize,getKeyValueStore,jsonToMap],removeParameters->[jsonToList,getKeyValueStore]]]
This method is called when the object is no longer referenced by any of the blobs in the.
Consider invoking gc.stop(true) in finally block to ensure gc is always stopped. Also, be sure to handle potential concurrent blobKey removal between this.getBlobKeys() and gc.mark().
@@ -90,8 +90,12 @@ public class PipelineReportHandler implements try { processPipelineReport(report, dn, publisher); } catch (IOException e) { - LOGGER.error("Could not process pipeline report={} from dn={}.", - report, dn, e); + // Avoid NotLeaderException logging which happens when processing + // pipeline report on followers. + if (!(e instanceof NotLeaderException)) { + LOGGER.error("Could not process pipeline report={} from dn={}.", + report, dn, e); + } } } }
[PipelineReportHandler->[processPipelineReport->[ClosePipelineCommand,fireEvent,debug,isHealthy,setPipelineLeaderId,getPipelineID,getFromProtobuf,getUuid,getReplicationConfig,setTerm,openPipeline,getPipelineState,isDebugEnabled,getPipeline,getTermOfLeader,getInSafeMode,setReportedDatanode,getId],onMessage->[getDatanodeDetails,getReport,isTraceEnabled,error,trace,getPipelineReportList,checkNotNull,processPipelineReport],setReportedDatanode->[reportDatanode],setPipelineLeaderId->[setLeaderId,getReplicationConfig,getUuid,incNumPipelineBytesWritten,getBytesWritten,getIsLeader,hasFactor],checkNotNull,getLogger,getBoolean,create]]
Process a pipeline report from a datanode.
SonarLint: Replace the usage of the `instanceof` operator by a catch block.
@@ -346,11 +346,6 @@ int write_image(struct dt_imageio_module_data_t *data, if (imgid > 0) { gboolean use_icc = FALSE; -#if AVIF_VERSION >= 800 - image->colorPrimaries = AVIF_COLOR_PRIMARIES_UNKNOWN; - image->transferCharacteristics = AVIF_TRANSFER_CHARACTERISTICS_UNKNOWN; - image->matrixCoefficients = AVIF_MATRIX_COEFFICIENTS_UNSPECIFIED; - switch (over_type) { case DT_COLORSPACE_SRGB: image->colorPrimaries = AVIF_COLOR_PRIMARIES_BT709;
[No CFG could be retrieved]
region AVIF Image Functions AVIF_COLOR_PRIMARIES_BT709 AVIF_.
I don't think you want to remove those 3 lines, it ensure that the fields are initialized with meaningful values as the case below as a path without initialization.
@@ -21,11 +21,13 @@ import ( "time" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/outputs" ) type Pipeline interface { PipelineConnector SetACKHandler(PipelineACKHandler) error + GetOutputGroup() outputs.Group // FIXME! Causes circular import } // PipelineConnector creates a publishing Client. This is typically backed by a Pipeline.
[No CFG could be retrieved]
type returns a new instance of the type. This is the main entry point for the publisher pipeline.
I'd like some guidance on how to fix this (again, assuming this PR is heading in the right direction, of course).
@@ -172,12 +172,12 @@ def pow(x, y, name=None): x = paddle.to_tensor([1, 2, 3]) y = 2 res = paddle.pow(x, y) - print(res.numpy()) # [1 4 9] + print(res) # [1 4 9] # example 2: y is a Tensor y = paddle.full(shape=[1], fill_value=2, dtype='float32') res = paddle.pow(x, y) - print(res.numpy()) # [1 4 9] + print(res) # [1 4 9] """ # in dynamic graph mode
[broadcast_shape->[broadcast_shape],multiply->[_elementwise_op_in_dygraph,_elementwise_op],tanh->[tanh],clip->[clip],inverse->[inverse,_check_input],pow->[pow],divide->[_elementwise_op_in_dygraph,_elementwise_op],log2->[log2],trace->[trace,__check_input],add_n->[sum],mm->[__check_input],increment->[increment],minimum->[_elementwise_op_in_dygraph,_elementwise_op],sign->[sign],log1p->[log1p],log10->[log10],floor_divide->[_elementwise_op_in_dygraph,_elementwise_op],logsumexp->[logsumexp],addmm->[addmm],maximum->[_elementwise_op_in_dygraph,_elementwise_op],remainder->[_elementwise_op_in_dygraph,_elementwise_op],kron->[kron],add->[_elementwise_op_in_dygraph,_elementwise_op],cumsum->[cumsum]]
Compute the power of tensor elements. Operation that pows x to y.
The following code affects this version of paddle.pow api. This block of code is meant to Align paddlepaddle pow api's functionality and behavior with numpy.power api, but it slowdowns the speed of this api call by 90%.
@@ -12,7 +12,6 @@ from mne.utils import run_tests_if_main, slow_test base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(base_dir, 'test_raw.fif') event_name = op.join(base_dir, 'test-eve.fif') -evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif') event_id, tmin, tmax = 1, -0.2, 0.5 event_id_2 = 2
[_load_data->[read_events,dict,catch_warnings,read_raw_fif,Epochs,pick_types],test_interpolation->[all,hasattr,pick_channels,assert_raises,assert_allclose,corrcoef,interpolate_bads,get_data,assert_equal,_load_data,dot,_get_channel_positions,assert_array_equal,len,normalize_proj,index,average,_make_interpolation_matrix,RawArray,assert_true,ones],run_tests_if_main,dirname,join]
Load data from a file in a base directory. Get the raw data and the epochs of a given .
@Eric89GXL any idea what this file was for?
@@ -49,15 +49,6 @@ feature 'Changing authentication factor' do end end end - - scenario 'deleting account' do - visit account_delete_path - - expect(page).to have_content t('help_text.no_factor.delete_account') - complete_2fa_confirmation - - expect(current_path).to eq account_delete_path - end end def complete_2fa_confirmation
[complete_2fa_confirmation_without_entering_otp->[fill_in,login_two_factor_path,to,click_button,otp_delivery_preference,eq,t],submit_current_password_and_totp->[fill_in,to,click_button,generate_totp_code,eq,t],enter_incorrect_otp_code->[fill_in],visit,phone,let,describe,feature,manage_phone_path,first,it,travel,to,have_content,return,before,click_link,scenario,with,t,require,login_two_factor_path,from_now,to_i,context,eq,and_call_original,after,direct_otp]
clicks the 2FA confirmation page with the last otpn code.
cc @jmhooper this spec checked that we were prompted for 2fa before deleting account, basically testing that the controller inherited from the Reauth controller. I figured that by prompting for the password we'd be OK without, but I think that controller also checks another factor. Do you think it's still OK to remove this?
@@ -598,6 +598,12 @@ internal static class Sdl X2Mask = 1 << 4 } + public enum ButtonState : byte + { + Released = 0, + Pressed = 1, + } + public enum SystemCursor { Arrow,
[Sdl->[Window->[GetDisplayIndex->[GetError],SetFullscreen->[GetError],IntPtr->[GetError]],IntPtr->[GetError],Haptic->[NewEffect->[GetError],IntPtr->[GetError],UpdateEffect->[GetError],RumbleSupported->[GetError],RunEffect->[GetError],RumblePlay->[GetError],RumbleInit->[GetError],StopAll->[GetError]],GameController->[IntPtr->[GetError]],GL->[SetAttribute->[GetError],IntPtr->[GetError]],Joystick->[NumJoysticks->[GetError],NumButtons->[GetError],NumAxes->[GetError],IntPtr->[GetError],NumHats->[GetError]],Mouse->[IntPtr->[GetError]],Display->[GetDisplayName->[GetError],GetClosestDisplayMode->[GetError],GetNumVideoDisplays->[GetError],GetDisplayMode->[GetError],GetCurrentDisplayMode->[GetError],GetWindowDisplayIndex->[GetError],GetNumDisplayModes->[GetError],GetBounds->[GetError]],GetError->[GetError]]]
private static class GetError Method region System. Collections. Method.
Add ButtonOrdinal: `public enum ButtonOrdinal : byte { Left = 1, Middle = 2, Right = 3, X1 = 4, X2 = 5, }`
@@ -6,6 +6,7 @@ from azure.data.tables._models import TableServiceStats from ._entity import TableEntity, EntityProperty, EdmType +from ._error import RequestEntityTooLargeError from ._table_shared_access_signature import generate_table_sas, generate_account_sas from ._table_client import TableClient from ._table_service_client import TableServiceClient
[No CFG could be retrieved]
Creates a new object of type u_nsp_access_signature from a given object.
I would prefer `RequestTooLargeError` `Entity` already means something different in the Tables context :)
@@ -12224,6 +12224,11 @@ ParseNodePtr Parser::ParseDestructuredArrayLiteral(tokens declarationType, bool Error(ERRDestructNoOper); } + if (seenRest) // Rest must be in the last position. + { + Error(ERRDestructRestLast); + } + m_pscan->Scan(); // break if we have the trailing comma as well, eg. [a,]
[No CFG could be retrieved]
Reads a variable declaration from the input stream and returns a node that can be used to parse Capture the next node in the array.
What was the issue that required this change?