patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -84,6 +84,8 @@ def write_hdf5(fname, data, overwrite=False, compression=4, keys in data. This does not apply to the top level name (title). If 'error', '/' is not allowed in any lower-level keys. """ + from ...utils import _validate_type + h5py = _check_h5py() mode = 'w' if op.isfile(fname):
[read_hdf5->[_check_h5py],_create_pandas_dataset->[_check_h5py],list_file_contents->[_check_h5py,_list_file_contents],_triage_write->[_triage_write,_create_titled_dataset,_create_titled_group],_triage_read->[_check_h5py,_triage_read],write_hdf5->[_create_pandas_dataset,_check_h5py],object_diff->[_sort_keys,object_diff],_TempDir->[__new__->[__new__]]]
Write a python object to an HDF5 file. Missing key value in the data.
do not change things in `mne/externals` we try to keep these as verbatim copies of the upstream versions
@@ -486,8 +486,10 @@ public class KsqlResourceTest { public void shouldReturn5xxOnSystemError() { // Given: givenMockEngine(mockEngine -> - EasyMock.expect(mockEngine.parseStatements(EasyMock.anyString())) - .andThrow(new RuntimeException("internal error"))); + { + EasyMock.expect(mockEngine.parseStatements(EasyMock.anyString())) + .andThrow(new RuntimeException("internal error")); + }); // When: final KsqlErrorMessage result = makeFailingRequest(
[KsqlResourceTest->[givenKsqlConfigWith->[setUpKsqlResource],givenCommandStore->[setUpKsqlResource],makeSingleRequest->[makeSingleRequest],validateQueryDescription->[validateQueryDescription]]]
Checks if there is a system error in the database.
nit: back out this change as the only change is adding unnecessary braces.
@@ -0,0 +1,3 @@ +<?php +$serial = snmp_walk($device, ".1.3.6.1.4.1.32285.11.1.1.2.1.1.1.16", "-OQv"); +$hardware = snmp_walk($device, ".1.3.6.1.4.1.32285.11.1.1.2.1.1.1.18", "-OQv");
[No CFG could be retrieved]
No Summary Found.
You should use snmp_getnext_multi() for this
@@ -760,11 +760,14 @@ struct step { }; static struct step steps[] = { + /** Pre-test checks */ + { 0, "Dumping AttachInfo", attachinfo, 100 }, + /** Set up */ - { 0, "Initializing DAOS", init , 100 }, - { 1, "Connecting to pool", pconnect, 99 }, - { 2, "Creating container", ccreate, 98 }, - { 3, "Opening container", copen, 97 }, + { 1, "Initializing DAOS", init, 100 }, + { 2, "Connecting to pool", pconnect, 99 }, + { 3, "Creating container", ccreate, 98 }, + { 4, "Opening container", copen, 97 }, /** Layout generation tests */ { 10, "Generating 1M S1 layouts", oS1, 96 },
[inline->[va_start,clock,duration,strlen,sprintf,vfprintf,va_end,step_print,fprintf],pool_autotest_hdlr->[assert,step_new,step_init,fprintf],int->[new_oid,daos_obj_close,uuid_generate,daos_fini,daos_event_init,daos_obj_open,daos_pool_connect,daos_kv_open,daos_cont_destroy,daos_init,daos_cont_close,kv_get,daos_eq_create,d_errdesc,DP_UUID,daos_kv_put,kv_put,daos_kv_get,D_ALLOC,D_FREE,daos_obj_generate_oid,sprintf,daos_obj_punch,daos_cont_create,step_fail,memset,daos_pool_disconnect,step_success,daos_kv_close,daos_eq_destroy,daos_eq_poll,daos_cont_open]]
This function returns - 1 if the object is not in the system and - 1 if the The main entry point for the list command.
This should verify that the agent is correctly running instead.
@@ -48,7 +48,7 @@ $pdf->setTextShadow(array('enabled' => false, 'depth_w' => 0.2, 'depth_h' => 0.2 if (isset($_GET['report']) && !empty($_GET['report'])) { $report = mres($_GET['report']); - $pdf->SetHeaderData('../../'.$config['title_image'], 40, ucfirst($report), $config['project_name'], array(0, 0, 0), array(0, 64, 128)); + $pdf->SetHeaderData('../../../../../html/'.$config['title_image'], 40, ucfirst($report), $config['project_name'], array(0, 0, 0), array(0, 64, 128)); include_once "includes/reports/$report.pdf.inc.php"; } else { $report = 'report';
[setFooterFont,SetCreator,SetHeaderMargin,SetDefaultMonospacedFont,SetHeaderData,Output,setHeaderFont,setTextShadow,SetFooterMargin,setImageScale,SetMargins,setFooterData,SetAuthor,setFontSubsetting,SetAutoPageBreak,SetFont]
Adds a report to the PDF.
Why not use $config['install_dir']? That way you aren't using a relative directory.
@@ -35,8 +35,8 @@ namespace System.Net.Sockets.Tests await Assert.ThrowsAsync<SocketException>(async () => await SocketTaskExtensions.ReceiveAsync(s, new ArraySegment<byte>(buffer), SocketFlags.None)); await Assert.ThrowsAsync<SocketException>(async () => await SocketTaskExtensions.ReceiveAsync(s, buffer.AsMemory(), SocketFlags.None)); await Assert.ThrowsAsync<SocketException>(async () => await SocketTaskExtensions.ReceiveAsync(s, new ArraySegment<byte>[] { new ArraySegment<byte>(buffer) }, SocketFlags.None)); - await Assert.ThrowsAsync<SocketException>(async () => await SocketTaskExtensions.ReceiveFromAsync(s, new ArraySegment<byte>(buffer), SocketFlags.None, badEndPoint)); - await Assert.ThrowsAsync<SocketException>(async () => await SocketTaskExtensions.ReceiveMessageFromAsync(s, new ArraySegment<byte>(buffer), SocketFlags.None, badEndPoint)); + await Assert.ThrowsAsync<InvalidOperationException>(async () => await SocketTaskExtensions.ReceiveFromAsync(s, new ArraySegment<byte>(buffer), SocketFlags.None, badEndPoint)); + await Assert.ThrowsAsync<InvalidOperationException>(async () => await SocketTaskExtensions.ReceiveMessageFromAsync(s, new ArraySegment<byte>(buffer), SocketFlags.None, badEndPoint)); await Assert.ThrowsAsync<SocketException>(async () => await SocketTaskExtensions.SendAsync(s, new ArraySegment<byte>(buffer), SocketFlags.None)); await Assert.ThrowsAsync<SocketException>(async () => await SocketTaskExtensions.SendAsync(s, buffer.AsMemory(), SocketFlags.None));
[SocketTaskExtensionsTest->[Task->[Stream,ReceiveFromAsync,AsMemory,ReceiveMessageFromAsync,None,ReceiveAsync,AcceptAsync,Port,SendToAsync,SendAsync,Tcp,ConnectAsync,Assert,Address,InterNetwork]]]
This method ensures that all methods in SocketTaskExtensions are callable. Assert that asynchronous call is completed.
Do you understand why this exception changed? It's weird that this PR made it different.
@@ -27,6 +27,7 @@ urlpatterns = patterns( url(r'^order/', include(order_urls, namespace='order')), url(r'^products/', include(product_urls, namespace='product')), url(r'^profile/', include(userprofile_urls, namespace='profile')), + url(r'^selectable/', include('selectable.urls')), url(r'', include('payments.urls')) )
[autodiscover,patterns,include,url]
Returns a url that can serve static files.
This line is not related with issue. Why it is here?
@@ -63,6 +63,12 @@ class ElementStream: """Returns the variable named that defined this PCollection.""" return self._var + def cache_key(self): + # type: () -> str + + """Returns the cache key for this stream.""" + return self._cache_key + def display_id(self, suffix): # type: (str) -> str
[RecordingManager->[record->[Recording,_watch,clear],clear->[clear]],Recording->[_mark_all_computed->[is_done],__init__->[ElementStream],computed->[is_computed],cancel->[cancel],uncomputed->[is_computed],is_computed->[is_computed]],ElementStream->[read->[read]]]
Returns the variable name that defined this PCollection.
Generally doing this is not necessary. You can name the attribute `self.cache_key`, and users can access the attribute directly, unless there's a strong reason not to.
@@ -183,10 +183,6 @@ feature 'SP-initiated authentication with login.gov', :user_flow do end end end - - # context 'when choosing to sign in' do - # TODO: duplicate scenarios from Create Account here - # end end context 'when LOA1' do
[complete_phone_form_with_valid_phone->[fill_in,valid_for_country?,cell_phone],visit,email,password,create,feature,first_name,select_option,it,fill_in_credentials_and_submit,city,find_with_email,before,click_button,click_link,xit,select,t,require,sample,include,each,safe_email,click_on,choose,zip_code,fill_in,context,last_name,direct_otp]
fill_in user_password - fill - in user_email - fill - in user - - - - - - - - - - - - - - - - - -.
We already have a scenario for signing in at the bottom of this file. Furthermore, it doesn't look like anyone is using this feature anymore because it hasn't been updated in a while and no one has complained about it being broken.
@@ -172,6 +172,14 @@ public class DefaultExtensionSchemaGeneratorTestCase extends AbstractMuleTestCas .collect(toList()); } + private boolean shouldUpdateExpectedFilesOnError() { + // Utility to batch fix input files when severe model changes are introduced. + // Use carefully, not a mechanism to get away with anything. + // First check why the generated json is different and make sure you're not introducing any bugs. + // This should NEVER be committed as true + return false; + } + @Before public void setup() throws IOException { expectedSchema = getResourceAsString("schemas/" + expectedXSD, getClass());
[DefaultExtensionSchemaGeneratorTestCase->[generate->[generate]]]
Provides a collection of all schema generators that can generate the schema. This test method creates a new extension model based on the extension class and the schema.
excess of chetness
@@ -195,10 +195,10 @@ namespace Dynamo.Tests // reference to specific testing nodes in test graph string[] testingNodeGUIDS = new string[] { - "845d532fdf874d939f2ed66509413ea6", - "cb037a9debd54ce79a4007b6ea11de25", - "a9bb1b12fbbd4aa19299f0d30c9f99b2", - "b6bd3049034f488a9bed0373f05fd021" + "845d532f-df87-4d93-9f2e-d66509413ea6", + "cb037a9d-ebd5-4ce7-9a40-07b6ea11de25", + "a9bb1b12-fbbd-4aa1-9299-f0d30c9f99b2", + "b6bd3049-034f-488a-9bed-0373f05fd021" }; // get test nodes
[PythonEditTests->[GetLibrariesToPreload->[GetLibrariesToPreload]]]
Verify that the n - th node is loaded from Core location and that it is loaded from.
@QilongTang This test was not asserting the truth values as it doesn't match the GUID pattern. Had to insert the '-' character.
@@ -91,9 +91,9 @@ class BaseTestPrebucklingAnalysis(KratosUnittest.TestCase): { "max_iteration" : 1000, "tolerance" : 1e-6, - "number_of_eigenvalues" : 2, + "number_of_eigenvalues" : 1, "echo_level" : 0, - "normalize_eigenvectors": true + "normalize_eigenvectors": false } """)
[TestPrebucklingAnalysis->[test_dynamic_eigenvalue_analysis->[_check_load_multiplier,_solve_prebuckling_problem,_set_up_system]],BaseTestPrebucklingAnalysis->[_set_up_system->[_apply_material_properties,_apply_Bcs_Symmetry,_create_nodes,_set_conditions,_apply_Bcs_Full,_add_variables,_create_elements,_add_dofs],_apply_Bcs_Full->[_apply_BCs_simple_vertical,_apply_BCs_simple_horizontal],_apply_Bcs_Symmetry->[_apply_BCs_simple_vertical,_apply_BCs_sym_horizontal,_apply_BCs_sym_vertical,_apply_BCs_simple_horizontal]]]
Solve a prebuckling problem.
are these changed on purpose?
@@ -70,12 +70,12 @@ func TestPodMetadataDeDot(t *testing.T) { }, }, meta: common.MapStr{ - "pod": common.MapStr{"name": "", "uid": "005f3b90-4b9d-12f8-acf0-31020a840133"}, - "namespace": "", - "node": common.MapStr{"name": "test"}, - "labels": common.MapStr{"a": common.MapStr{"value": "bar", "key": "foo"}}, + "pod": common.MapStr{"name": ""}, + "uid": "005f3b90-4b9d-12f8-acf0-31020a840133", + "node": common.MapStr{"name": "test"}, + "labels": common.MapStr{"a": common.MapStr{"value": "bar", "key": "foo"}}, }, - config: withPodUID, + config: withUID, }, { pod: &Pod{
[Equal,NewConfigFrom,Fatal,NewConfig,PodMetadata]
withPodUID is a helper function to create a list of all the objects that are managed Labels returns a map of labels and names of the nodes that are not covered by any.
As `uid` has be already around before, we should not change anything here so this is only a note from my side. I often use `id` instead of `uuid` or `uid` for consistency. And as long as there is only one `id` I expect it to be unique.
@@ -97,7 +97,7 @@ class EvalbBracketingScorer(Metric): self._correct_predicted_brackets += numeric_line[5] self._gold_brackets += numeric_line[6] self._predicted_brackets += numeric_line[7] - + shutil.rmtree(tempdir) @overrides def get_metric(self, reset: bool = False): """
[EvalbBracketingScorer->[compile_evalb->[system,format],__init__->[join],clean_evalb->[system,format,join],__call__->[write,float,gettempdir,len,strip,format,join,run,exists,open,pformat,ConfigurationError],get_metric->[reset]],abspath,dirname,register,realpath,join]
Evaluate the NLTK and predict the NLTK. Returns the average precision recall and f1 measure of a .
Blank lines are off here.
@@ -15,10 +15,17 @@ class AccountErrorCode(Enum): INVALID = "invalid" INVALID_PASSWORD = "invalid_password" NOT_FOUND = "not_found" - NO_PERMISSION = "no_permission" PASSWORD_ENTIRELY_NUMERIC = "password_entirely_numeric" PASSWORD_TOO_COMMON = "password_too_common" PASSWORD_TOO_SHORT = "password_too_short" PASSWORD_TOO_SIMILAR = "password_too_similar" REQUIRED = "required" UNIQUE = "unique" + + +class PermissionGroupErrorCode(Enum): + ASSIGN_NON_STAFF_MEMBER = "assign_non_staff_member" + CANNOT_ADD_AND_REMOVE = "cannot_add_and_remove" + NO_PERMISSION = "no_permission" + REQUIRED = "required" + UNIQUE = "unique"
[No CFG could be retrieved]
Get the name of the resource from the config file.
This enum should be more specific as now it may be misinterpreted as `PermissionsDenied` exception. This enum indeed means that you cannot do something, but it's rather a validation error. I would suggest something like `OUT_OF_SCOPE_PERMISSION` which I would understand as "I cannot use this permission because it's out of my permissions scope". What do you think?
@@ -57,7 +57,6 @@ defineSuite([ var texturedBoxUrl = './Data/Models/Box-Textured/CesiumTexturedBoxTest.gltf'; var texturedBoxSeparateUrl = './Data/Models/Box-Textured-Separate/CesiumTexturedBoxTest.gltf'; var texturedBoxCustomUrl = './Data/Models/Box-Textured-Custom/CesiumTexturedBoxTest.gltf'; - var texturedBoxBinaryUrl = './Data/Models/Box-Textured-Binary/CesiumTexturedBoxTest.bgltf'; var texturedBoxKhrBinaryUrl = './Data/Models/Box-Textured-Binary/CesiumTexturedBoxTest.glb'; var boxRtcUrl = './Data/Models/Box-RTC/Box.gltf'; var cesiumAirUrl = './Data/Models/CesiumAir/Cesium_Air.gltf';
[No CFG could be retrieved]
The constructor of the object is the base class for all of the components of the object. Missing models - > Facebook.
Also delete this file (use `git rm`).
@@ -85,6 +85,7 @@ class Openmpi(Package): variant('verbs', default=_verbs_dir() is not None, description='Build support for OpenFabrics verbs.') variant('mxm', default=False, description='Build Mellanox Messaging support') + variant('cxx', default=False, description='Enable deprecated C++ bindings.') variant('thread_multiple', default=False, description='Enable MPI_THREAD_MULTIPLE support')
[_verbs_dir->[which,dirname,ibv_devices],Openmpi->[install->[ver,satisfies,append,getattr,_verbs_dir,filter_compilers,extend,configure,make],url_for_version->[up_to],filter_compilers->[join_path,filter_file,format,islink],setup_dependent_environment->[join_path,set],setup_dependent_package->[join_path,format],setup_environment->[unset,warn],verbs->[satisfies],depends_on,_verbs_dir,patch,version,provides,variant]]
Create a list of all packages in the OpenMPi. Create a new instance of the object with the given name.
any reason for this variant? why not have it always `on`? If you really need an option to turn it `off`, i would keep it `True` by default so that the default installation is the same as before this PR.
@@ -799,6 +799,9 @@ namespace Internal.JitInterface case CorInfoHelpFunc.CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT: id = ReadyToRunHelper.ReversePInvokeExit; break; + case CorInfoHelpFunc.CORINFO_HELP_STRCNS_CURRENT_MODULE: + id = ReadyToRunHelper.GetString; + break; case CorInfoHelpFunc.CORINFO_HELP_INITCLASS: case CorInfoHelpFunc.CORINFO_HELP_INITINSTCLASS:
[CorInfoImpl->[classMustBeLoadedBeforeCodeIsRun->[classMustBeLoadedBeforeCodeIsRun],VerifyMethodSignatureIsStable->[MethodSignatureIsUnstable],EncodeFieldBaseOffset->[HasLayoutMetadata,PreventRecursiveFieldInlinesOutsideVersionBubble],getFieldInfo->[IsClassPreInited],getCallInfo->[ceeInfoGetCallInfo,UpdateConstLookupWithRequiresRuntimeJitSymbolIfNeeded,VerifyMethodSignatureIsStable],UpdateConstLookupWithRequiresRuntimeJitSymbolIfNeeded->[MethodSignatureIsUnstable],IsGenericTooDeeplyNested->[IsGenericTooDeeplyNested],embedGenericHandle->[ceeInfoEmbedGenericHandle],setEHinfo->[classMustBeLoadedBeforeCodeIsRun],ceeInfoGetCallInfo->[IsGenericTooDeeplyNested,IsTypeSpecForTypicalInstantiation],ceeInfoEmbedGenericHandle->[ComputeRuntimeLookupForSharedGenericToken],CompileMethod->[ShouldSkipCompilation]],MethodWithToken->[Equals->[Equals],GetHashCode->[GetHashCode],CompareTo->[CompareTo]],GenericContext->[GetHashCode->[GetHashCode]]]
Get the symbol node that is cached for the given CorInfoHelpFunc. Get the correct object for a given node type. private static int MAX_NUMBER_OF_CORINFO_SUPPLEMENTARY_FUNCTION region CorInfoHelpFunc. region Private methods Get the id of a node in the list of known coroutines.
I do not think it is as simple as this. There needs to be a code to add the current module argument for the helper (it is what `ZapLazyHelperThunk` does in crossgen1.
@@ -46,7 +46,11 @@ func pulumiAPICall(method string, path string, body []byte, accessToken string) return nil, fmt.Errorf("getting Pulumi API endpoint: %v", err) } - url := fmt.Sprintf("%s/api%s", apiEndpoint, path) + // Normalize URL components + apiEndpoint = strings.TrimSuffix(apiEndpoint, "/") + path = strings.TrimPrefix(path, "/") + + url := fmt.Sprintf("%s/api/%s", apiEndpoint, path) req, err := http.NewRequest(method, url, bytes.NewBuffer(body)) if err != nil { return nil, fmt.Errorf("creating new HTTP request: %v", err)
[NewBuffer,IgnoreClose,Sprintf,NewRequest,Do,Marshal,Unmarshal,Errorf,ReadAll,Getenv,Set]
pulumiAPICall makes a HTTP request to the Pulumi Cloud API and returns pulumiRESTCall is a wrapper for the REST call that stores the credentials in req.
Sort of wish here we just contract.Required that callers (e.g. us) did the right thing. But that's a small concern and just maybe the way I view the world. LGTM either way...
@@ -626,8 +626,8 @@ function conversation(App $a, $items, $mode, $update, $preview = false) { $owner_name = ''; $sparkle = ''; - if($mode === 'search' || $mode === 'community') { - if(((activity_match($item['verb'],ACTIVITY_LIKE)) || (activity_match($item['verb'],ACTIVITY_DISLIKE))) + if ($mode === 'search' || $mode === 'community') { + if (((activity_match($item['verb'],ACTIVITY_LIKE)) || (activity_match($item['verb'],ACTIVITY_DISLIKE))) && ($item['id'] != $item['parent'])) continue; $nickname = $item['nickname'];
[localize_item->[attributes],get_responses->[get_id],conversation->[add_thread,get_template_data]]
The conversation function JS AJAX for the user s n - index page. The main page of the network. This function renders the conversation template This function is used to generate a link to the activity. This function is used to render a single item in a profile.
Standards: Please add a space after commas.
@@ -86,7 +86,8 @@ func newVersionCmd() *cobra.Command { }, } - versionCmdDescription := fmt.Sprintf("Output format to use: %s", outputFormatOptions) + versionCmdDescription := fmt.Sprintf("Output format. Allowed values: %s", + strings.Join(outputFormatOptions, ", ")) versionCmd.Flags().StringVarP(&outputFormat, "output", "o", "human", versionCmdDescription)
[JSONMarshalIndent,Println,Sprintf,StringVarP,Fatalf,Flags]
Run executes the command.
Is `outputFormatOptions` superfluous? Should we use `getVersionsOutputFormatOptions` everywhere?
@@ -269,6 +269,8 @@ def _check_complete_surface(surf, copy=False, incomplete='raise', extra=''): ', '.join(str(f) for f in fewer), extra)) if incomplete == 'raise': raise RuntimeError(msg) + elif incomplete == 'ignore': + pass else: warn(msg) return surf
[make_sphere_model->[ConductorModel,_fwd_eeg_fit_berg_scherg],_lin_pot_coeff->[_calc_beta],_check_surfaces->[_assert_complete_surface,_assert_inside],make_scalp_surfaces->[_check_file,check_seghead,_surfaces_to_bem,write_bem_surfaces],_write_bem_solution_fif->[_write_bem_surfaces_block],_fwd_bem_homog_solution->[_fwd_bem_multi_solution],_fwd_eeg_fit_berg_scherg->[_compute_linear_parameters,_fwd_eeg_get_multi_sphere_model_coeffs],make_bem_model->[_surfaces_to_bem],_one_step->[_compose_linear_fitting_data],read_bem_surfaces->[_check_complete_surface],write_head_bem->[_surfaces_to_bem,write_bem_surfaces],make_bem_solution->[_fwd_bem_linear_collocation_solution],make_watershed_bem->[_surfaces_to_bem],_prepare_env->[copy],_ensure_bem_surfaces->[ConductorModel,copy,read_bem_surfaces],read_bem_solution->[ConductorModel],_fit_sphere_to_headshape->[get_fitting_dig],_surfaces_to_bem->[_check_complete_surface,_ico_downsample,_check_surfaces,_check_thicknesses,_order_surfaces,_check_surface_size],get_fitting_dig->[get_fitting_dig],_symlink->[copy],_fwd_bem_linear_collocation_solution->[_check_complete_surface,_fwd_bem_multi_solution,_fwd_bem_ip_modify_solution,_fwd_bem_lin_pot_coeff,_fwd_bem_homog_solution],_check_origin->[fit_sphere_to_headshape],_read_bem_solution_fif->[read_bem_surfaces],_fwd_bem_lin_pot_coeff->[_correct_auto_elements,_lin_pot_coeff],convert_flash_mris->[_prepare_env],_compute_linear_parameters->[_compose_linear_fitting_data],make_flash_bem->[_prepare_env]]
Check if a surface has a topological defect.
You should use `_on_missing` here
@@ -310,6 +310,10 @@ func validate(errs binding.Errors, data map[string]interface{}, f Form, l macaro } data["HasError"] = true + // If the field with name errs[0].FieldNames[0] is not found in form + // somehow, some code later on will panic on Data["ErrorMsg"].(string). + // So initialize it to some default. + data["ErrorMsg"] = l.Tr("form.unknown_error") AssignForm(f, data) typ := reflect.TypeOf(f)
[ToSnakeCase,GetAccessTokenBySHA,Now,ValueOf,CreateUser,Fields,GetUserByID,HasPrefix,GetOAuth2GrantByID,Field,Error,TimeStampNow,NewV4,Len,Trace,UserSignIn,Tr,BasicAuthDecode,Unix,Contains,TypeOf,ParseOAuth2Token,ToLower,IsErrUserNotExist,Get,GetUserByName,Split,Query,SetNameMapper,IsErrAccessTokenNotExist,IsErrAccessTokenEmpty,Kind,Elem,String,Interface,NumField,UpdateAccessToken]
GetRuleBody returns the body of the rule that should be used to generate the rule. form. field. name.
This change is not really needed for my pull request. Before I made changes below, the code was unable to find the field that failed validation inside embedded anonymous struct, and I got panic due ErrorMsg being not set. I decided to add this so the code won't crash in similar cases (which should not happen unless there's a bug). In my opinion, the entire function is somewhat sloppy and needs rewriting. I only modified it as much as I needed, and didn't try to refactor it.
@@ -1621,7 +1621,9 @@ dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting) * other direct or indirect hold on the dnode must first drop the dnode * handle. */ +#ifdef ZFS_DEBUG ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread); +#endif /* NOTE: the DNODE_DNODE does not have a dn_dbuf */ if (refs == 0 && db != NULL) {
[No CFG could be retrieved]
Adds a reference to the dnode and returns TRUE if the dnode is already held. The dnode_hold_impl function is called by the dnode_hold_impl.
Why was this needed? Are the new ASSERT definitions not happy with this when !ZFS_DEBUG?
@@ -55,6 +55,7 @@ public class SharedLimiterKey implements SharedResourceKey { @Override public String toConfigurationKey() { - return this.resourceLimited; + // remove leading "/" + return this.resourceLimitedPath.substring(1).replace("/", "."); } }
[SharedLimiterKey->[toString->[toConfigurationKey]]]
Returns the configuration key for this resource limiting.
Doesn't `substring(1)` already remove the leading "/"?
@@ -3838,6 +3838,16 @@ def try_fuse(equiv_set, parfor1, parfor2): def is_equiv(x, y): return x == y or equiv_set.is_equiv(x, y) + def get_user_varname(v): + """get original variable name by user if possible""" + if not isinstance(v, ir.Var): + return v + v = v.name + if "var_rename_map" in metadata and v in metadata["var_rename_map"]: + user_varname = metadata["var_rename_map"][v] + return user_varname + return v + for i in range(ndims): nest1 = parfor1.loop_nests[i] nest2 = parfor2.loop_nests[i]
[ConvertReducePass->[_reduce_to_parfor->[_mk_parfor_loops,Parfor,dump],_mk_reduction_body->[_make_index_var]],simplify_parfor_body_CFG->[simplify_parfor_body_CFG],remove_dead_parfor->[list_vars],repr_arrayexpr->[repr_arrayexpr],get_parfor_array_accesses->[unwrap_parfor_blocks,wrap_parfor_blocks],get_parfor_params_inner->[get_parfor_params],ParforPassStates->[__init__->[ParforDiagnostics]],prod_parallel_impl->[prod_1->[init_prange,internal_prange]],min_parallel_impl->[min_1->[init_prange,internal_prange]],linspace_parallel_impl->[linspace_3->[init_prange,internal_prange]],get_parfor_writes->[get_parfor_writes],_combine_params_races_for_ssa_names->[unversion],_lower_parfor_sequential_block->[_lower_parfor_sequential_block],_update_parfor_get_setitems->[list_vars],mean_parallel_impl->[mean_1->[init_prange,internal_prange]],get_copies_parfor->[unwrap_parfor_blocks,wrap_parfor_blocks],has_cross_iter_dep->[list_vars],visit_vars_parfor->[visit_parfor_pattern_vars],dotvm_parallel_impl->[init_prange,internal_prange],argmin_parallel_impl->[init_prange,internal_prange],arange_parallel_impl->[arange_4->[init_prange,internal_prange]],push_call_vars->[list_vars,process_assign,push_call_vars],argmax_parallel_impl->[init_prange,internal_prange],sum_parallel_impl->[sum_1->[init_prange,internal_prange]],_arrayexpr_tree_to_ir->[_arrayexpr_tree_to_ir],_can_reorder_stmts->[list_vars,expand_aliases],ConvertInplaceBinop->[_inplace_binop_to_parfor->[Parfor,dump]],_mk_parfor_loops->[LoopNest],dotmv_parallel_impl->[init_prange,internal_prange],ConvertNumpyPass->[_numpy_map_to_parfor->[_make_index_var,_mk_parfor_loops,Parfor,dump],_arrayexpr_to_parfor->[_make_index_var,_mk_parfor_loops,Parfor,dump]],_get_saved_call_nodes->[rename_global_or_getattr],ConvertSetItemPass->[_setitem_to_parfor->[LoopNest,_type_getitem,Parfor,dump]],max_parallel_impl->[max_1->[init_prange,internal_prange]],lower_parfor_sequential->[run],var_parallel_impl->[var_1->[init_prange,internal_prange]],ParforPass->[_pre_run->[run],fuse_recursive_parfor->[run,fuse_parfors],run->[find_indexed_arrays->[],find_mask_from_size->[],_pre_run,ConvertReducePass,setup,run,ConvertInplaceBinop,ConvertNumpyPass,ConvertLoopPass,ConvertSetItemPass,validate_params],_mk_parfor_loops->[_mk_parfor_loops],_find_mask->[_find_mask]],parfor_find_max_label->[unwrap_parfor_blocks,wrap_parfor_blocks],parfor_insert_dels->[DummyFuncIR],get_parfor_call_table->[unwrap_parfor_blocks,wrap_parfor_blocks],get_parfor_tuple_table->[unwrap_parfor_blocks,wrap_parfor_blocks],fill_parallel_impl->[fill_1->[init_prange,internal_prange]],Parfor->[list_vars->[list_vars],get_shape_classes->[get_shape_classes],dump->[dump_graph_indented->[print_graph->[print_g->[]]],print_unoptimised->[print_fuse->[],print_nest->[print_g->[]]],print_optimised->[print_fuse->[],print_nest->[print_g->[]]],dump]],parfor_typeinfer->[unwrap_parfor_blocks,wrap_parfor_blocks],ParforDiagnostics->[reachable_nodes->[reachable_nodes],_get_parfors->[_get_nested_parfors],get_parfors->[_get_parfors],sort_pf_by_line->[sort_pf_by_line,compute_graph_info],get_stats->[count_root->[count_root],count_root],dump->[dump_graph_indented->[print_graph->[print_g->[print_g,print_wrapped],print_g,print_wrapped],print_graph,compute_graph_info],print_unoptimised->[print_fuse->[reachable_nodes,print_wrapped],print_nest->[print_g->[reachable_nodes,print_g,print_wrapped],print_g,print_wrapped],print_fuse,print_nest],print_optimised->[print_fuse->[reachable_nodes,print_wrapped],print_nest->[print_g->[reachable_nodes,print_g,print_wrapped],print_g,print_wrapped],print_fuse,print_wrapped,print_nest],dump_graph_indented,get_parfors,sort_pf_by_line,print_wrapped,count_parfors,print_unoptimised,print_optimised,compute_graph_info]],apply_copies_parfor->[unwrap_parfor_blocks,wrap_parfor_blocks],get_reduce_nodes->[lookup->[lookup],list_vars,supported_reduction,lookup],get_parfor_reductions->[list_vars,get_parfor_reductions],ConvertLoopPass->[run->[find_indexed_arrays->[list_vars],find_mask_from_size->[find_indexed_arrays],LoopNest,Parfor,_mk_parfor_loops,_make_index_var,dump],_get_prange_init_block->[list_vars,_is_parallel_loop],_replace_loop_access_indices->[unver,_replace_loop_access_indices]],parfor_add_offset_to_labels->[unwrap_parfor_blocks,wrap_parfor_blocks],dotvv_parallel_impl->[init_prange,internal_prange],try_fuse->[is_equiv->[is_equiv],is_equiv]]
try to fuse parfors and return a fused parfor if it can be found parfor1 and parfor2 must have a non - cross - iteration dependency.
I think that this ought to go into a `parfors` key in the `metadata` as it's reasonably likely that other `parfors` pass specific things could exist in the future. i.e. access via `metadata["parfors"]["var_rename_map"]`
@@ -92,6 +92,8 @@ public class DocumentsMarshaller implements JsonMarshaller<Documents> { docs.setCurrentPageIndex(jp.getIntValue()); } else if ("entries".equals(key)) { readDocumentEntries(jp, docs); + } else if ("aggregations".equals(key)) { + docs.setHasAggregates(true); } tok = jp.nextToken(); }
[DocumentsMarshaller->[readDocuments->[readDocumentEntries],read->[readPaginableDocuments,readDocuments],readPaginableDocuments->[readDocumentEntries]]]
Reads PaginableDocuments from the JSON stream.
This part doesn't seem tested in this PR. This is automation-client so I can understand if it's too complex. Can this be "aggregates" instead of "aggregations" for consistency?
@@ -84,7 +84,7 @@ func GetBuild() *Build { BuildDate: BuildDate, BuildNumber: BuildNumber, State: State, - PluginVersion: MaxPluginVersion, + PluginVersion: feature.MaxPluginVersion, } }
[ShortVersion->[Sprintf],String->[Sprintf,Version],IsOlder->[Equal,Atoi,Errorf],IsNewer->[IsOlder,Equal],Sprintf,HasPrefix,String,BoolVar]
String returns a string representation of a build.
(MaxPluginVersion - 1) should be used as current version, because the plugin is used to migrate data from (X-1) to X. if we use MaxPluginVersion directly here, plugin should work as X => (X+1). I think current logic is easy to understand, so please keep that, and make sure current version is always same to latest plugin version.
@@ -347,7 +347,7 @@ func (mod *modContext) genAwaitableType(w io.Writer, obj *schema.ObjectType) str baseName := pyClassName(tokenToName(obj.Token)) // Produce a class definition with optional """ comment. - fmt.Fprintf(w, "class %s:\n", baseName) + fmt.Fprintf(w, "\nclass %s:\n", baseName) printComment(w, obj.Comment, " ") // Now generate an initializer with properties for all inputs.
[gen->[genHeader,add],genResource->[genHeader,has,add],genFunction->[genHeader,genAwaitableType],genPropertyConversionTables->[genHeader],genInit->[genHeader,submodulesExist],genNestedStructureBullets->[genNestedStructureBullets],genConfig->[genHeader],genHeader,genPropertyConversionTables,add,gen]
genAwaitableType generates a string that represents a awaitable type. Print a type - specific awaitable object to the given writer.
Nit: Instead of printing the `\n` in front of `class`, what do you think of having a single `fmt.Fprintf(w, "\n")` call before the call to `mod.genAwaitableType` on line 686 below?
@@ -444,11 +444,6 @@ module Engine @revenue_to_render ||= @stops.map(&:revenue_to_render) end - # Used to set label for a recently placed tile - def label=(label_name) - @label = Part::Label.new(label_name) - end - def restore_borders(edges = nil) edges ||= ALL_EDGES
[Tile->[compute_city_town_edges->[compute_loc],add_reservation!->[add_reservation!],dup->[decode],paths_are_subset_of?->[rotate],paths->[rotate],restore_borders->[restore_borders],from_code->[decode]]]
Revenue to render.
why did you change this
@@ -35,7 +35,7 @@ ob_start(); ?> <div class="elgg-page-messages"> <?php echo $messages ?> </div> - <div class="$wg_body_class"> + <div class="<?php echo $wg_body_class; ?>"> <?php echo $content ?> </div> </div>
[No CFG could be retrieved]
Print a single .
You can use short tags: `<?= ... ?>` We're PHP 5.5+!
@@ -750,6 +750,13 @@ export class ViewportBindingNatural_ { if (this.win.document.defaultView) { waitForBody(this.win.document, () => { this.win.document.body.style.overflow = 'visible'; + if (isExperimentOn(this.win, 'amp-ios-overflow-x') && + this.viewer_.getParam('webview') === '1') { + setStyles(this.win.document.body, { + overflowX: 'hidden', + overflowY: 'visible', + }); + } }); }
[No CFG could be retrieved]
Provides a class which is responsible for handling the native window s scroll and resize events. Updates the paddingTop and size of the window.
We also need to test that this is actually iOS.
@@ -593,9 +593,7 @@ class PhpcrMapper extends RlpMapper // route already exists and referenced on contentNode return true; } else { - throw new ResourceLocatorAlreadyExistsException(sprintf( - 'Resource locator "%s" already exists. Route node at path "%s"', $resourceLocator, $routeNode->getPath() - )); + throw new ResourceLocatorAlreadyExistsException($resourceLocator, $routeNode->getPath()); } }
[PhpcrMapper->[move->[save],loadHistoryByContentUuid->[iterateRouteNodes],getResourceLocator->[getWebspaceRouteNodeBasePath],getParentPath->[loadByContent],deleteByNode->[iterateRouteNodes,deleteByNode],save->[save],loadByContentUuid->[loadByContent],restoreByPath->[iterateRouteNodes,save],getPath->[getWebspaceRouteNodeBasePath]]]
Checks if resource locator is unique.
Instead of adapting the standard constructor of the exception, I'd rather add a static factory method, such as `ResourceLocatorAlreadyExistsException::forResourceLocator()`. This is a generally useful practice that allows to add additional factory methods or subclassing the exception, while still being able to call the standard constructor and passing a custom message.
@@ -84,7 +84,11 @@ namespace Dynamo.Engine public FunctionDescriptor(FunctionDescriptorParams funcDescParams) { - summary = funcDescParams.Summary; + if (!String.IsNullOrEmpty(funcDescParams.Summary)) + { + summary = funcDescParams.Summary; + } + pathManager = funcDescParams.PathManager; Assembly = funcDescParams.Assembly; ClassName = funcDescParams.ClassName;
[FunctionDescriptor->[Equals->[Equals],GetHashCode->[GetHashCode]]]
Creates a new FunctionDescriptor object. The type of the block that contains the block of code.
Fixed bug with DSFunction description. It didn't work.
@@ -187,7 +187,9 @@ int PKCS12_set_mac(PKCS12 *p12, const char *pass, int passlen, ASN1_OCTET_STRING *macoct; if (!md_type) - md_type = EVP_sha1(); + md_type = EVP_sha256(); + if (!iter) + iter = PKCS12_DEFAULT_ITER; if (PKCS12_setup_mac(p12, iter, salt, saltlen, md_type) == PKCS12_ERROR) { ERR_raise(ERR_LIB_PKCS12, PKCS12_R_MAC_SETUP_ERROR); return 0;
[int->[X509_ALGOR_get0,HMAC_Update,ASN1_INTEGER_get,EVP_MD_type,OPENSSL_cleanse,PKCS5_PBKDF2_HMAC,HMAC_Final,memcpy,HMAC_CTX_new,X509_SIG_get0,HMAC_Init_ex,EVP_get_digestbyobj,pkcs12_gen_gost_mac_key,ossl_safe_getenv,EVP_MD_size,ERR_raise,HMAC_CTX_free,PKCS7_type_is_data],PKCS12_get0_mac->[X509_SIG_get0],PKCS12_set_mac->[EVP_sha1,PKCS12_setup_mac,ASN1_OCTET_STRING_set,X509_SIG_getm,pkcs12_gen_mac,ERR_raise],PKCS12_setup_mac->[EVP_MD_type,ASN1_INTEGER_new,memcpy,ASN1_INTEGER_set,X509_ALGOR_set0,PKCS12_MAC_DATA_free,OBJ_nid2obj,RAND_bytes,X509_SIG_getm,OPENSSL_malloc,PKCS12_MAC_DATA_new,ERR_raise],PKCS12_verify_mac->[ASN1_STRING_length,CRYPTO_memcmp,X509_SIG_get0,ASN1_STRING_get0_data,pkcs12_gen_mac,ERR_raise],PKCS12_gen_mac->[pkcs12_gen_mac]]
private private methods.
Fetch instead of the legacy implementation?
@@ -101,7 +101,15 @@ public class EncryptorInitializer { encryptorPasswordPropFile = getPropertyFromEnv(PASSWORD_KEY, ""); // Creates a random encryptor password if the password is empty if (StringUtils.isEmpty(encryptorPasswordPropFile)) { - Log.info(LOG_MODULE, "Generating a random password for the database password encryptor"); + if (!firstInitialSetupFlag) { + Log.error(LOG_MODULE, String.format( + "Password database encryptor initialization error - could not locate encryptor password via encryption " + + "file %s or supplied properties/environment variable (%s). " + + "GeoNetwork can not decrypt passwords already stored in the database. " + + "Either recover the previous password and restart the application or manually null all existing encrypted " + + "passwords in the database and re-enter passwords via the application", conf.getPath(), PASSWORD_KEY)); + } + Log.info(LOG_MODULE, "Generating a new random password for the database password encryptor"); encryptorPasswordPropFile = RandomStringUtils.randomAlphanumeric(10); } encryptorPassword = encryptorPasswordPropFile;
[EncryptorInitializer->[init->[getMessage,equals,setAlgorithm,getEncryptorPropertiesFile,info,initialize,getProperty,updateDb,getPropertyFromEnv,getInstance,setProperty,isEmpty,setPassword,error,getCause,format,registerPBEStringEncryptor,RuntimeException,setHeader,isNotEmpty,randomAlphanumeric,StandardPBEStringEncryptor,save,getPath],updateDb->[execute,debug,error,get,encryptDatabaseValuesIntegerId,createStatement,getConnection,printStackTrace,executeQuery,keySet,commit,encryptDatabaseValuesStringId],getPropertyFromEnv->[isEmpty,replace,getProperty,getenv],encryptDatabaseValuesIntegerId->[decrypt,isNotEmpty,getString,getInt,encrypt,put,next],getEncryptorPropertiesFile->[PropertiesConfiguration,resolve,createFile,exists,toFile],encryptDatabaseValuesStringId->[isNotEmpty,decrypt,getString,encrypt,put,next]]]
Initializes the encryptor configuration. Initialize the password encryptor. This method creates a new HibernateEncryptor object if it is not already registered.
it seems working file, but this message is displayed with a new setup, what can be a bit confusing asn it's not really an error in this case.
@@ -45,7 +45,17 @@ class UselessPackageInfoCheckTest { @Test void notAPackageInfo() { JavaCheckVerifier.newVerifier() - .onFile(testSourcesPath("checks/UselessPackageInfoCheck/packageWithNoOtherFilesButNotPackageInfo/HelloWorld.java")) + .onFiles( + testSourcesPath("checks/UselessPackageInfoCheck/packageWithNoOtherFilesButNotPackageInfo/HelloWorld1.java"), + testSourcesPath("checks/UselessPackageInfoCheck/packageWithNoOtherFilesButNotPackageInfo/HelloWorld2.java")) + .withCheck(new UselessPackageInfoCheck()) + .verifyNoIssues(); + } + + @Test + void defaultPackage() { + JavaCheckVerifier.newVerifier() + .onFile(testSourcesPath("DefaultPackage.java")) .withCheck(new UselessPackageInfoCheck()) .verifyNoIssues(); }
[UselessPackageInfoCheckTest->[notAPackageInfo->[verifyNoIssues],withOtherFile->[verifyNoIssues],withNoOtherFile->[verifyIssueOnFile]]]
Tests if there is a package with no other files but not package info.
To avoid the silent corruption of one of the test files, we should probably repeat the test with a call to singular `onFile`.
@@ -69,10 +69,10 @@ public class StaticEmailAlertSender implements AlertSender { private void sendEmail(String emailAddress, Stream stream, AlertCondition.CheckResult checkResult, List<Message> backlog) throws TransportConfigurationException, EmailException { LOG.debug("Sending mail to " + emailAddress); if(!configuration.isEnabled()) { - throw new TransportConfigurationException("Email transport is not enabled!"); + throw new TransportConfigurationException("Email transport is not enabled in configuration file!"); } - Email email = new SimpleEmail(); + final Email email = new SimpleEmail(); email.setHostName(configuration.getHostname()); email.setSmtpPort(configuration.getPort()); if (configuration.isUseSsl()) {
[StaticEmailAlertSender->[sendEmails->[sendEmails,sendEmail]]]
Send an email to the specified email address.
I would change the exclamation mark to a full-stop. Sounds less aggressive/panicky.
@@ -134,8 +134,6 @@ public class TestDataflowRunner extends PipelineRunner<DataflowPipelineJob> { throw new RuntimeException(errorMessage(job, messageHandler)); } - // If there is no reason to immediately fail, run the success matcher. - assertThat(job, testPipelineOptions.getOnSuccessMatcher()); return job; }
[TestDataflowRunner->[run->[run],fromOptions->[TestDataflowRunner],getJobMetrics->[getJobMetrics],ErrorMonitorMessagesHandler->[getErrorMessage->[toString],process->[process]],fromOptionsAndClient->[TestDataflowRunner],CancelOnError->[call->[hasSeenError]]]]
Runs a dataflow job with the given pipeline and runner.
@iemejia given that we need to deprecate `TestPipelineOptions` and not remove it, is it ok to remove the few places where the matchers are consumed (in `TestDataflowRunner` and `TestSparkRunner`), or is there a concern that that will break some user tests?
@@ -14,7 +14,7 @@ * limitations under the License. */ -import {Messaging, WindowPortEmulator, parseMessage} from './messaging'; +import {Messaging, WindowPortEmulator, parseMessage} from 'messaging'; import {TouchHandler} from './touch-handler'; import {getAmpDoc} from '../../../src/ampdoc'; import {isIframed} from '../../../src/dom';
[No CFG could be retrieved]
Creates an object that represents a single unique identifier for a window. Initializes the object.
Can you explain how "messaging" name here is getting resolved? Also, what will this look like when imported from the "amp-viewer" repo?
@@ -55,11 +55,13 @@ func TestAuthenticateUserByToken(t *testing.T) { var user models.User token, err := user.GenerateAuthToken() + assert.NoError(t, err, "failed when generate auth token") ok, err := models.AuthenticateUserByToken(token, &user) require.NoError(t, err) assert.True(t, ok, "authentication must be successful") _, err = user.GenerateAuthToken() + assert.NoError(t, err, "failed to generate auth token") ok, err = models.AuthenticateUserByToken(token, &user) require.NoError(t, err) assert.False(t, ok, "authentication must fail with past token")
[Error,Equal,NewUser,AuthenticateUserByToken,HashPassword,NotEmpty,False,Parallel,NoError,GenerateAuthToken,NotEqual,Run,True]
TestAuthenticateUserByToken tests that the user has a valid token.
`failed to generate auth token`
@@ -183,7 +183,7 @@ maven-compile: maven-test: stage: test script: - - ./mvnw -ntp verify -Dmaven.repo.local=$MAVEN_USER_HOME + - ./mvnw verify -P-webpack -Dmaven.repo.local=$MAVEN_USER_HOME artifacts: reports: junit: target/test-results/**/TEST-*.xml
[No CFG could be retrieved]
Exports all of the dependencies of the given package manager. Maven agent reports.
Did the `-ntp` got lost on purpose here?
@@ -443,6 +443,11 @@ class UserActivation extends Component { ) } + refetchQueries() { + this.props.identityRefetch() + this.props.client.reFetchObservableQueries() + } + onDeployComplete = () => { clearVerifiedAccounts() this.clearStoredUserData()
[No CFG could be retrieved]
Displays a hidden block of code that can be generated by the user. Displays a hidden input for the user s unique identifier.
do we need both of these? Does reFetchObservableQueries also cause an identity refetch?
@@ -365,7 +365,7 @@ class RequestController < ApplicationController end # This automatically saves dependent objects, such as @outgoing_message, in the same transaction @info_request.save! - # XXX send_message needs the database id, so we send after saving, which isn't ideal if the request broke here. + # TODO: send_message needs the database id, so we send after saving, which isn't ideal if the request broke here. @outgoing_message.send_message flash[:notice] = _("<p>Your {{law_used_full}} request has been <strong>sent on its way</strong>!</p> <p><strong>We will email you</strong> when there is a response, or after {{late_number_of_days}} working days if the authority still hasn't
[RequestController->[get_attachment_internal->[new],check_batch_requests_and_user_allowed->[render_hidden,new],show_response->[new],authenticate_attachment->[new],get_attachment_as_html->[new],render_new_compose->[new],make_request_summary_file->[new,assign_variables_for_show_template]]]
Displays a new if the user has not submitted any undescribed requests and if This action creates a new non - nil non - nil non - nil non - nil non finds the n - node object and sends it to the client.
Line is too long. [121/80]
@@ -69,12 +69,10 @@ func TestVerifyNewBlock(t *testing.T) { } node := New(host, consensus, testDBFactory, false) - selectedTxs, selectedStakingTxs := node.getTransactionsForNewBlock(common.Address{}) - node.Worker.CommitTransactions( - selectedTxs, selectedStakingTxs, common.Address{}, - func([]types.RPCTransactionError) {}, - ) - + txs := make(map[common.Address]types.Transactions) + stks := types2.StakingTransactions{} + node.Worker.CommitTransactions(txs, stks, common.Address{}, + func([]types.RPCTransactionError) {}) block, _ := node.Worker.FinalizeNewBlock([]byte{}, []byte{}, 0, common.Address{}, nil, nil) if err := node.VerifyNewBlock(block); err != nil {
[NewDecider,VerifyNewBlock,Error,CurrentBlock,Blockchain,AddNewBlock,New,NewHost,GetPublicKey,RandPrivateKey,GenKeyP2P,Fatalf,CommitTransactions,FinalizeNewBlock,Errorf,getTransactionsForNewBlock,SetNetworkType,NumberU64]
craeateNewBlock creates a new block in the consensus chain and commits all transactions.
we don't have staking tx in s3, right?
@@ -1212,6 +1212,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, private boolean doMaintain(final long hostId) { final HostVO host = _hostDao.findById(hostId); + s_logger.info("Maintenance: attempting maintenance of host " + host.getUuid()); + ResourceState hostState = host.getResourceState(); + if (hostState == ResourceState.PrepareForMaintenance || hostState == ResourceState.ErrorInPrepareForMaintenance || + hostState == ResourceState.Maintenance || hostState == ResourceState.ErrorInMaintenance) { + throw new CloudRuntimeException("Cannot perform maintain when resource state is " + hostState + ", hostId = " + hostId); + } + final MaintainAnswer answer = (MaintainAnswer)_agentMgr.easySend(hostId, new MaintainCommand()); if (answer == null || !answer.getResult()) { s_logger.warn("Unable to send MaintainCommand to host: " + hostId);
[ResourceManagerImpl->[getGPUDevice->[listAvailableGPUDevice],updateClusterPassword->[doUpdateHostPassword],createHostAndAgentDeferred->[markHostAsDisconnected,getNewHost,isFirstHostInCluster,createHostVO],setHostIntoMaintenance->[resourceStateTransitTo],propagateResourceEvent->[getPeerName],createHostVOForConnectedAgent->[createHostVO],dispatchToStateAdapters->[deleteHost],umanageHost->[doUmanageHost],getAvailableHypervisor->[getSupportedHypervisorTypes,getDefaultHypervisor],doMaintain->[resourceStateTransitTo],checkAndMaintain->[isHostInMaintenance],updateHost->[resourceStateTransitTo],deleteHost->[doDeleteHost],addHost->[createHostAndAgent],updateHostPassword->[doUpdateHostPassword],setHostIntoErrorInMaintenance->[resourceStateTransitTo,configureVncAccessForKVMHostFailedMigrations],maintenanceFailed->[resourceStateTransitTo],doCancelMaintenance->[resourceStateTransitTo],configureVncAccessForKVMHostFailedMigrations->[setKVMVncAccess],listAllUpAndEnabledNonHAHosts->[listAllUpAndEnabledNonHAHosts],executeUserRequest->[doCancelMaintenance,doDeleteHost,doMaintain],fillRoutingHostVO->[checkIPConflicts],createHostAndAgent->[markHostAsDisconnected,createHostAndAgent,getNewHost,createHostVO],discoverHostsFull->[processResourceEvent],createHostVO->[checkCIDR,dispatchToStateAdapters,getCluster,resourceStateTransitTo,getNewHost],maintain->[processResourceEvent,maintain,doMaintain],isHostInMaintenance->[setHostIntoMaintenance,setHostIntoErrorInMaintenance],registerResourceEvent->[insertListener],cancelMaintenance->[doCancelMaintenance,cancelMaintenance,processResourceEvent]]]
This method is called to send a MaintainCommand to a host. Checks if the VM is a user VM.
Same as before, using `Array/list::contains()` or equivalent. If this is frequenty used code, refactor as a separate internal method and re-use wherever needed.
@@ -196,6 +196,15 @@ public final class CentralAuthenticationServiceImpl implements CentralAuthentica throw new UnauthorizedSsoServiceException(); } + //CAS-1019 + final List<Authentication> authns = ticketGrantingTicket.getChainedAuthentications(); + if(authns.size() > 1) { + if (!registeredService.isAllowedToProxy()) { + log.warn("ServiceManagement: Service Attempted to Proxy, but is not allowed. Service: [" + service.getId() + "]"); + throw new UnauthorizedProxyingException(); + } + } + if (credentials != null) { try { final Authentication authentication = this.authenticationManager
[CentralAuthenticationServiceImpl->[grantServiceTicket->[grantServiceTicket]]]
Grant a service ticket. Create a service ticket with a unique identifier for the given service and user.
This log message might be even more helpful if it also logged the registeredService. That way if the service's not being permitted to proxy is a surprise, it's easier to determine what registration matched and caused the disallow.
@@ -1495,11 +1495,13 @@ class OutScaleForTrainingPass(object): Args: scope(fluid.Scope): The scope is used to initialize these new parameters. - place(fluid.CPUPlace|fluid.CUDAPlace): The place is used to initialize new parameters. + place(fluid.CPUPlace|fluid.CUDAPlace|str): The place is used to initialize new parameters. + If it's string, It can be ``cpu``, and ``gpu:x``, where ``x`` is the + index of the GPUs or XPUs. moving_rate(float): The decay coefficient of moving average. The default value is 0.9. """ self._scope = scope - self._place = place + self._place = _get_paddle_place(place) self._moving_rate = moving_rate self._is_test = None self._teller_set = _out_scale_op_list
[_is_input_all_not_persistable->[_get_op_input_var_names],AddQuantDequantPass->[apply->[_is_input_all_not_persistable,_get_op_input_var_names],_inser_quant_dequant_moving_average_abs_max_op->[_init_var_node]],ConvertToInt8Pass->[apply->[_remove_unused_var_nodes],_convert_to_int8->[_load_var]],OutScaleForTrainingPass->[apply->[_get_op_output_var_names,_init_var_node]],OutScaleForInferencePass->[apply->[_get_output_name_index,_scale_name,_get_op_output_var_names]],QuantizationTransformPass->[_insert_quant_range_abs_max_op->[_init_var_node],_insert_func->[_copy_graph],_create_global_step->[_init_var_node],_is_skip_quant->[_is_input_all_not_persistable],_insert_channel_quant_op->[_init_var_node],_insert_quant_abs_max_op->[_init_var_node],_copy_graph->[_copy_graph,_create_new_node],_insert_quant_moving_average_abs_max_op->[_init_var_node],apply->[_transform_backward,_quant_preprocess,_transform_forward]],QuantizationFreezePass->[_insert_post_channel_dequant_op->[_dequantized_var_name,_init_var_node],_quant->[_clip],_insert_post_dequant_op->[_dequantized_var_name],apply->[_check_grandchild_op_node]]]
Initialize a base base on a sequence of networkx variables. This method creates a persistent persistent node that represents the maximum absolute value of the values of the Link to the given node.
It can be ``cpu``, and ``gpu:x``xGPUXPUIt can be ``cpu``, ``gpu:x`` or ``xpu:x``. placexpu
@@ -205,8 +205,8 @@ def add_myunit(driver: Driver) -> None: mod = file_to_module(f) if '.codec.test.' in mod: # myunit is Python3 only. - driver.add_python_mod('unittest %s' % mod, 'unittest', mod) - driver.add_python2('unittest %s' % mod, '-m', 'unittest', mod) + driver.add_python_mod('unittest %s' % mod, 'unittest', '-q', mod) + driver.add_python2('unittest %s' % mod, '-m', 'unittest', '-q', mod) elif mod == 'mypy.test.testpythoneval': # Run Python evaluation integration tests separetely since they are much slower # than proper unit tests.
[add_imports->[add_python_string,find_files,add_mypy_string,add_flake8,file_to_module],add_myunit->[find_files,add_python_mod,file_to_module,add_python2],Driver->[add_python->[allow],add_both_string->[add_python_string,add_mypy_string],add_python_string->[allow],add_python2->[allow],__init__->[get_versions],add_both->[add_python,add_mypy],add_both_mod->[add_python_mod,add_mypy_mod],add_mypy_string->[allow],add_python_mod->[allow],add_mypy_mod->[allow],add_mypy->[allow],add_flake8->[allow]],add_samples->[find_files,add_mypy,add_mypy_string],main->[sanity,add_imports,add_myunit,list_tasks,Driver,prepend_path,add_samples,add_stubs,add_basic,usage,add_libpython],add_stubs->[find_files,file_to_module,add_mypy_string],add_basic->[add_mypy,add_flake8,add_mypy_mod],add_libpython->[find_files,file_to_module,add_mypy_mod],get_versions,main]
Add a module to the driver for myunit.
It'd like to see this in a separate pull request, or a mention in the description of the pull request, as this is logically independent from the rest of this PR. No need to anything about this right now, but maybe next time.
@@ -954,5 +954,16 @@ namespace Dynamo.ViewModels Handled = false; } } + + /// <summary> + /// Extension method to check if a model exists in a group + /// </summary> + internal static class Extensions + { + public static bool CheckIfModelExistsInAGroup(this ObservableCollection<AnnotationModel> groups, Guid nodeGuid) + { + return (groups.SelectMany(m => m.SelectedModels).Any(m => m.GUID == nodeGuid)); + } + } }
[NodeViewModel->[ShowRename->[OnRequestShowNodeRename],ShowHelp->[OnRequestShowNodeHelp],Select->[OnRequestsSelection],ValidateConnections->[ValidateConnections]]]
Handle the neccesary types.
A few things: This feels buried. I'd recommend giving it a more precise name and moving it to a separate file. While you're at it, it would probably be more flexible and accurate to use `IEnumerable<AnnotationModel>`.
@@ -365,15 +365,12 @@ define([ property = dynamicCone.intersectionColor; if (typeof property !== 'undefined') { - intersectionColor = property.getValue(time, intersectionColor); - if (typeof intersectionColor !== 'undefined') { - cone.intersectionColor = intersectionColor; - } + property.getValue(time, cone.intersectionColor); } property = dynamicCone.intersectionWidth; if (typeof property !== 'undefined') { - intersectionWidth = property.getValue(time, intersectionWidth); + var intersectionWidth = property.getValue(time, intersectionWidth); if (typeof intersectionWidth !== 'undefined') { cone.intersectionWidth = intersectionWidth; }
[No CFG could be retrieved]
Creates a single object with properties that can be visualized. DynamicConeVisualizerUsingCustomSensor - A helper to create a DynamicConeVisual.
`intersectionWidth` is always undefined on the right-hand side here and should be omitted.
@@ -19,10 +19,11 @@ class Gzip(object): 'gzip' not in accept_encoding.lower() or \ 'Content-Encoding' in response.headers: return response - print 'GZIPPING' + print('GZIPPING') response.direct_passthrough = False gzip_buffer = StringIO() - gzip_file = gzip.GzipFile(mode='wb', compresslevel=self.compress_level, fileobj=gzip_buffer) + gzip_file = gzip.GzipFile(mode='wb', compresslevel=self.compress_level, + fileobj=gzip_buffer) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue()
[Gzip->[after_request->[write,get,len,getvalue,close,StringIO,GzipFile,lower],__init__->[after_request]]]
This method is called after the request is sent to the client.
I'm curious, is this easier for you to read and/or work with?
@@ -93,6 +93,17 @@ namespace Content.Server.Chat _mommiLink.SendOOCMessage(player.SessionId.ToString(), message); } + public void SendDeadChat(IPlayerSession player, string message) + { + var clients = _playerManager.GetPlayersBy(x => x.AttachedEntity != null && x.AttachedEntity.HasComponent<GhostComponent>()).Select(p => p.ConnectedClient);; + + var msg = _netManager.CreateNetMessage<MsgChatMessage>(); + msg.Channel = ChatChannel.Dead; + msg.Message = message; + msg.MessageWrap = $"DEAD: {player.AttachedEntity.Name}: {{0}}"; + _netManager.ServerSendToMany(msg, clients.ToList()); + } + public void SendHookOOC(string sender, string message) { var msg = _netManager.CreateNetMessage<MsgChatMessage>();
[ChatManager->[EntityMe->[Channel,ServerSendToMany,Message,Uid,MessageWrap,Name,ConnectedClient,Select,SenderEntity,ToList,Emotes,_netManager,GridPosition,CanEmote],SendHookOOC->[Channel,Message,MessageWrap,OOC,_netManager,ServerSendToAll],EntitySay->[Channel,ServerSendToMany,Local,Message,Uid,MessageWrap,Name,ConnectedClient,Select,SenderEntity,ToList,_netManager,GridPosition,CanSpeak],Initialize->[_netManager,NAME],DispatchServerMessage->[Channel,Message,MessageWrap,ServerSendMessage,ConnectedClient,Server,_netManager],DispatchServerAnnouncement->[Channel,Message,MessageWrap,Server,_netManager,ServerSendToAll],SendOOC->[Channel,SessionId,Message,MessageWrap,SendOOCMessage,OOC,_netManager,ToString,ServerSendToAll]]]
Send OOC message to all players in the channel.
This should be localized.
@@ -854,8 +854,8 @@ def get_current_balanceproof(end_state: NettingChannelEndState) -> BalanceProofD else: locksroot = EMPTY_MERKLE_ROOT nonce = 0 - transferred_amount = 0 - locked_amount = 0 + transferred_amount: typing.TokenAmount = 0 + locked_amount: typing.Balance = 0 return (locksroot, nonce, transferred_amount, locked_amount)
[get_current_balanceproof->[get_amount_locked],register_secret_endstate->[is_lock_locked],handle_block->[is_deposit_confirmed,get_status],send_refundtransfer->[create_sendlockedtransfer],handle_send_directtransfer->[get_current_balanceproof,get_status,send_directtransfer,get_distributable],send_directtransfer->[create_senddirecttransfer],is_valid_refund->[refund_transfer_matches_received,valid_lockedtransfer_check],register_onchain_secret_endstate->[_del_lock,is_lock_locked],is_balance_proof_usable_onchain->[is_valid_signature,is_balance_proof_safe_for_onchain_operations],get_distributable->[get_current_balanceproof,get_amount_locked,get_balance],create_sendlockedtransfer->[get_next_nonce,compute_merkletree_with,get_amount_locked,get_distributable,get_status],handle_channel_closed->[get_status,set_closed],valid_lockedtransfer_check->[is_balance_proof_usable_onchain],handle_channel_newbalance->[is_transaction_confirmed],create_senddirecttransfer->[get_next_nonce,get_status,get_amount_locked,get_distributable],apply_channel_newbalance->[update_contract_balance],send_unlock->[create_unlock,get_lock,_del_lock],handle_receive_refundtransfercancelroute->[handle_receive_lockedtransfer],create_unlock->[get_next_nonce,get_amount_locked,compute_merkletree_without,is_lock_pending,get_status],is_valid_lock_expired->[is_lock_expired,is_balance_proof_usable_onchain],send_lockedtransfer->[create_sendlockedtransfer],register_onchain_secret->[register_onchain_secret_endstate],handle_refundtransfer->[is_valid_refund],events_for_expired_lock->[get_next_nonce,get_amount_locked,compute_merkletree_without],delete_secrethash_endstate->[compute_merkletree_without,is_lock_locked],is_valid_directtransfer->[is_balance_proof_usable_onchain],handle_channel_batch_unlock->[get_status],handle_channel_settled->[set_settled,get_batch_unlock],get_secret->[is_secret_known],handle_receive_directtransfer->[get_current_balanceproof,is_valid_directtransfer],get_number_of_pending_transfers->[_merkletree_width],events_for_close->[get_status],_del_lock->[is_lock_pending],state_transition->[handle_action_close,handle_channel_updated_transfer,handle_block,handle_channel_batch_unlock,handle_channel_closed,handle_channel_newbalance,handle_receive_directtransfer,handle_send_directtransfer,handle_channel_settled],handle_action_close->[events_for_close],handle_unlock->[_del_lock,is_valid_unlock],handle_receive_lock_expired->[delete_secrethash_endstate,is_valid_lock_expired],register_secret->[register_secret_endstate],is_valid_unlock->[is_balance_proof_usable_onchain],handle_receive_lockedtransfer->[is_valid_lockedtransfer]]
Get the current balance proof data.
Humm, I think we should use `TokenAmount` for this one
@@ -205,9 +205,12 @@ module.exports = JhipsterServerGenerator.extend({ prompting: { askForModuleName: function () { - if (this.baseName) return; + //need this for having a concrete name in gateway app to build urls like "/jhipster-uaa/api/account" + // @TODO read this application name from gateway during its generation + if (this.baseName || this.applicationType == 'uaa') return; this.askModuleName(this, currentQuestion++, totalQuestions); + }, askForServerSideOpts: function () {
[No CFG could be retrieved]
The main entry point for the command line interface. Missing configuration options for the Nexus application.
`this.applicationType == 'uaa'` check seems redundant as its done in the following method already, can we keep it either here or in method only
@@ -1053,6 +1053,10 @@ void setup() { #if HAS_FILAMENT_SENSOR SETUP_RUN(runout.setup()); #endif + + #if HAS_TMC220x + SETUP_RUN(tmc_serial_begin()); + #endif #if ENABLED(PSU_CONTROL) SETUP_LOG("PSU_CONTROL");
[No CFG could be retrieved]
Creates a new system with the given name. The main entry point for the exception handling system.
Does this have to occur ahead of the PSU control? As I understand it, TMC stepper drivers require 12V before they can be configured. But perhaps this doesn't matter to the initial serial connection here.
@@ -56,6 +56,15 @@ public class ExtensionPointImpl implements ExtensionPoint { @XParent public RegistrationInfo ri; + // potential registry class declaration + @XNode(value = "registry@class") + protected String registryKlass; + + // final operating registry class + protected Registry registry; + + protected static final Registry NULL_REGISTRY = new NullRegistry(); + @Override public Class<?>[] getContributions() { return contributions;
[ExtensionPointImpl->[loadContributions->[getName,getContributions]]]
Gets the list of all the contributions of this node.
Could we have it outside of this class, like in `ComponentManager`? The reason is that this object should be a POJO imho.
@@ -10,13 +10,13 @@ ## \author Stefan Wunsch import ROOT -from ROOT.VecOps import RVec, Take, Combinations +from ROOT.VecOps import Take, Combinations # RVec can be sorted in Python with the inbuilt sorting function because # PyROOT implements a Python iterator -v1 = RVec("double")(3) +v1 = ROOT.RVecD(3) v1[0], v1[1], v1[2] = 1, 2, 3 -v2 = RVec("double")(2) +v2 = ROOT.RVecD(2) v2[0], v2[1] = -4, -5 # To get the indices, which result in all combinations, you can call the
[print,len,range,format,Combinations,Take,RVec]
\ macro description Find unique triples in v4 and v5.
I understand that in Python tutorials this is nicer than having to do the explicit instantiation. I really don't like it for C++ code and tutorials in general. Just my personal preference though, not blocking this PR definitely
@@ -43,14 +43,14 @@ public class LdapConfiguration implements Configurable{ private ConfigurationDao _configDao; @Inject - private LdapManager _ldapManager; + private LdapConfigurationDao _ldapConfigurationDao; public LdapConfiguration() { } - public LdapConfiguration(final ConfigurationDao configDao, final LdapManager ldapManager) { + public LdapConfiguration(final ConfigurationDao configDao, final LdapConfigurationDao ldapConfigurationDao) { _configDao = configDao; - _ldapManager = ldapManager; + _ldapConfigurationDao = ldapConfigurationDao; } public String getAuthentication() {
[LdapConfiguration->[getReturnAttributes->[getLastnameAttribute,getFirstnameAttribute,getEmailAttribute]]]
getAuthentication - returns the authentication of the connection.
Why inject and pass on in the constructor. Do both have usage scenarios?
@@ -14,6 +14,9 @@ def nms(boxes, scores, iou_threshold): IoU greater than iou_threshold with another (higher scoring) box. + When all the values of the scores are the same, the results are + not guaranteed to be the same between CPU and GPU. + Parameters ---------- boxes : Tensor[N, 4])
[box_iou->[box_area],batched_nms->[nms],nms->[nms]]
Performs non - maximum suppression on the boxes according to their intersection - over - union.
I think I would phrase it slightly differently, because this is a subset of the cases where this inconsistency happens. I would maybe say something like: > If multiple boxes have the exact same score and satisfy the IoU criterion with respect to a reference box, the selected box is not guaranteed to be the same between CPU and GPU. This is similar to the behavior of argsort in PyTorch when repeated values are present.
@@ -246,6 +246,7 @@ func (s *State) CreateRule(_ context.Context, rule *storage.Rule) (*storage.Rule if err := s.rules.Add(rule.ID, rule, cache.NoExpiration); err != nil { return nil, storage_errors.ErrConflict } + s.bumpPolicyVersion() return rule, nil }
[AddPolicyMembers->[bumpPolicyVersion],PurgeSubjectFromPolicies->[bumpPolicyVersion],Reset->[bumpPolicyVersion],CreateRole->[bumpPolicyVersion],RemovePolicyMembers->[bumpPolicyVersion],DeletePolicy->[bumpPolicyVersion,GetPolicy],DeleteRule->[GetRule],DeleteProject->[GetProject],CreatePolicy->[bumpPolicyVersion],UpdatePolicy->[bumpPolicyVersion],UpdateRole->[bumpPolicyVersion],DeleteRole->[GetRole,bumpPolicyVersion],ReplacePolicyMembers->[bumpPolicyVersion]]
CreateRule creates a new rule in the state.
`bumpPolicyVersion` is used to signal other servers that the OPA cache needs refreshing. A. Are you now using it to also signal that the separate rules cache needs refreshing? If so, should probably rename to e.g., `bumpCacheContentsVersion`. B. If you are not, how is the rules cache update being signaled in the HA environment? ... Looks like you are reusing it for both, so the rename would be helpful.
@@ -112,8 +112,16 @@ export class Xhr { * @private */ fetchAmpCors_(input, opt_init) { + const init = opt_init || {}; input = this.getCorsUrl(this.win, input); - return this.fetch_(input, opt_init).then(response => { + // For some same origin requests, add AMP-Same-Origin: true header to allow + // publishers to validate that this request came from their own origin + // when Origin header is not going to be set by the browser. + if (!this.willSetOriginHeader_(input, init)) { + init['headers'] = init['headers'] || {}; + init['headers']['AMP-Same-Origin'] = 'true'; + } + return this.fetch_(input, init).then(response => { const allowSourceOriginHeader = response.headers.get( ALLOW_SOURCE_ORIGIN_HEADER); if (allowSourceOriginHeader) {
[No CFG could be retrieved]
Fetches a single from the server. Fetches and constructs JSON object based on the fetch polyfill.
Could we simplify this (origin + method + browser checking) to "if same origin then set AMP-Same-Origin header"? It's simpler, and it can also be surprising that not all AMP same origin XHRs contain the AMP-Same-Origin: true header (there are no security consequences, just weird).
@@ -343,6 +343,7 @@ public final class OzoneConsts { // Default OMServiceID for OM Ratis servers to use as RaftGroupId public static final String OM_SERVICE_ID_DEFAULT = "omServiceIdDefault"; + public static final String OM_DEFAULT_NODE_ID = "om1"; // Dummy OMNodeID for OM Clients to use for a non-HA OM setup public static final String OM_NODE_ID_DUMMY = "omNodeIdDummy";
[OzoneConsts->[key,of,compile]]
Creates a unique name for a node. region Block Key Name.
I assume this `omNodeIdDummy` will only be use if someone disables Ratis?
@@ -79,7 +79,7 @@ public class ProducerView extends ActiveMQAbstractView<ServerProducer> { case "user": return session.getUsername(); case "clientID": - return session.getRemotingConnection().getClientID(); + return session.getMetaData("jms-client-id") == null ? session.getRemotingConnection().getClientID() : session.getMetaData("jms-client-id"); case "protocol": return session.getRemotingConnection().getProtocolName(); case "address":
[ProducerView->[getField->[getRemoteAddress,IllegalArgumentException,getName,getAddress,getSessionID,getDefaultAddress,getCreationTime,getUsername,getClientID,getLocalAddress,getSessionByID,getProtocolName,getID],toJson->[getSessionID,getCreationTime,toString,add,getSessionByID],ProducerFilterPredicate]]
This method returns the field of the given producer.
this should not be needed, if anything the session and producer models should be re-worked to allow for a client id to be set at session level generically. And ditto around solution with consumer
@@ -6,10 +6,6 @@ class RulesOfUseForm attr_reader :terms_accepted - def self.model_name - ActiveModel::Name.new(self, nil, 'User') - end - def initialize(user) @user = user end
[RulesOfUseForm->[validate_terms_accepted->[add,t],process_successful_submission->[call,success],model_name->[new],submit->[success,new,valid?],attr_reader,validate,include,attr_accessor]]
missing - user - name - missing - terms - accepted - name.
this isn't used implicitly for error messages?
@@ -110,9 +110,8 @@ import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_ public class ThriftHiveMetastore implements HiveMetastore { - private final ThriftHiveMetastoreStats stats; + private final ThriftHiveMetastoreStats stats = new ThriftHiveMetastoreStats(); private final MetastoreLocator clientProvider; - private final Function<Exception, Exception> exceptionMapper; private final double backoffScaleFactor; private final Duration minBackoffDelay; private final Duration maxBackoffDelay;
[ThriftHiveMetastore->[dropRole->[dropRole],listTablePrivileges->[getTable],updateTableStatistics->[getTableStatistics],alterTable->[getTable,alterTable],dropDatabase->[dropDatabase],getPartitionsByNames->[getPartitionsByNames],storePartitionColumnStatistics->[setPartitionColumnStatistics],getTableColumnStatistics->[getTableColumnStatistics],deletePartitionColumnStatistics->[deletePartitionColumnStatistics],deleteTableColumnStatistics->[deleteTableColumnStatistics],createDatabase->[createDatabase],getDatabase->[getDatabase],dropPartition->[dropPartition],addPartitionsWithoutStatistics->[addPartitions],dropTable->[dropTable],getAllDatabases->[getAllDatabases],setTableColumnStatistics->[setTableColumnStatistics],createTable->[createTable],getPartition->[getPartition],getTable->[getTable],createRole->[createRole],setPartitionColumnStatistics->[setPartitionColumnStatistics],dropExtraColumnStatisticsAfterAlterPartition->[deletePartitionColumnStatistics],alterDatabase->[alterDatabase],getFields->[getFields],alterPartitionWithoutStatistics->[alterPartition],getMetastorePartitionColumnStatistics->[getPartitionColumnStatistics],getSupportedColumnStatistics->[getSupportedColumnStatistics],revokeRole->[revokeRole],getAllTables->[getDatabase,getAllTables],getPartitionNames->[getPartitionNames],listRoleGrants->[listRoleGrants],grantRole->[grantRole]]]
Creates a new ThriftHiveMetastore object. Constructor for metastore.
nit: unrelated change (it can stay though)
@@ -72,6 +72,10 @@ DATABASES = { ) } +DATABASES["default"]["HOST"] = ( + DATABASES["default"]["HOST"].replace("%3a", ":").replace("%3A", ":") +) + DEFAULT_AUTO_FIELD = "django.db.models.AutoField" TIME_ZONE = "UTC"
[get_list->[strip,split],get_bool_from_env->[literal_eval,ValueError,format],get_host->[get_current],hasattr,timedelta,join,config,warn,CeleryIntegration,int,init,get_list,iter_entry_points,parse,append,dirname,get_random_secret_key,__import__,setdefault,ignore_logger,format,normpath,Config,ImproperlyConfigured,DjangoIntegration,get,get_bool_from_env]
Get the value of the variable name in the environment. Returns a list of all known alphabetical characters.
Does it have to be done by `replace` here or could we use `urllib.parse.unquote` instead? Are there any escape sequences we don't want to decode?
@@ -156,6 +156,14 @@ class ParlaiParser(argparse.ArgumentParser): help='number of HITs that can be launched at the same time, 0 is ' 'unlimited.' ) + mturk.add_argument( + '--min-messages', dest='min-messages', + default=0, type=int, + help='number of messages required to be sent by MTurk agent when ' + 'considering whether to approve a HIT in the event of a ' + 'partner disconnect. I.e. if the number of messages ' + 'exceeds this number, the turker can submit the HIT.' + ) mturk.set_defaults(is_sandbox=True) mturk.set_defaults(is_debug=False)
[ParlaiParser->[add_parlai_args->[add_parlai_data_path],add_model_args->[class2str],print_args->[parse_args]]]
Add MTurk arguments based on the passed - in flags. Adds command line options for a specific MTurk worker.
dest should be `min_messages`
@@ -107,13 +107,13 @@ abstract class VcsDriver implements VcsDriverInterface protected function getBaseComposerInformation($identifier) { - $composerFileContent = $this->getFileContent('composer.json', $identifier); + $composerFileContent = $this->getFileContent(ltrim(\Composer\Factory::getComposerFile(),'/.'), $identifier); if (!$composerFileContent) { return null; } - $composer = JsonFile::parseJson($composerFileContent, $identifier . ':composer.json'); + $composer = JsonFile::parseJson($composerFileContent, $identifier . ':' . ltrim(\Composer\Factory::getComposerFile(),'/.')); if (empty($composer['time']) && $changeDate = $this->getChangeDate($identifier)) { $composer['time'] = $changeDate->format(DATE_RFC3339);
[VcsDriver->[getContents->[getContents],getComposerInformation->[shouldCache],hasComposerFile->[getComposerInformation]]]
Get base composer information.
This is really bad. Changing the name of the composer.json used locally must absolutely not impact the name used for repositories (the whole ecosystem will not have duplicated their config with your custom name)
@@ -27,12 +27,13 @@ class DvcIgnore(object): raise NotImplementedError -class DvcIgnoreFromFile(DvcIgnore): - def __init__(self, ignore_file_path, ignore_handler): +class DvcIgnorePatterns(DvcIgnore): + def __init__(self, ignore_file_path): self.ignore_file_path = ignore_file_path self.dirname = os.path.normpath(os.path.dirname(ignore_file_path)) - self.ignore_spec = ignore_handler.read_patterns(ignore_file_path) + with open(ignore_file_path, encoding="utf-8") as fobj: + self.ignore_spec = PathSpec.from_lines(GitWildMatchPattern, fobj) def __call__(self, root, dirs, files): files = [f for f in files if not self.matches(root, f)]
[DvcIgnoreFromFile->[__init__->[read_patterns]],DvcIgnoreFilter->[__call__->[update],update->[DvcIgnoreFromFile],_process_ignores_in_parent_dirs->[get_repo_root],__init__->[DvcIgnoreFile,DvcIgnoreDir]]]
Initialize a object.
Judging by `__eq__`, we expect only abs path here, right? Should we put a check/assert here? Otherwise it might get nasty with `__eq__` saying True for two relative paths.
@@ -242,6 +242,13 @@ def _use_remote_execution(oauth_token_path: str) -> List[str]: ] +def _use_remote_cache(oauth_token_path: str) -> List[str]: + return [ + "--pants-config-files=pants.remote-cache.toml", + f"--remote-oauth-bearer-token-path={oauth_token_path}", + ] + + def _run_command( command: List[str], *,
[_run_command->[check_pants_pex_exists],_test_command->[_use_remote_execution],run_platform_specific_tests->[_run_command,_test_command],run_clippy->[_run_command],run_githooks->[_run_command],run_lint->[_run_command,_use_remote_execution],run_python_tests->[_run_command,_test_command],run_smoke_tests->[run_check,check_pants_pex_exists],main]
Run a command in travis section.
Btw, you can delete the remote_execution code. It's stale and should have probably been deleted earlier.
@@ -52,8 +52,11 @@ class SimpleLanguageModelingDatasetReader(DatasetReader): sentence: str) -> Instance: # pylint: disable=arguments-differ tokenized = self._tokenizer.tokenize(sentence) + tokenized_with_ends = [Token(ELMoCharacterMapper.bos_token)] + tokenized_with_ends.extend(tokenized) + tokenized_with_ends.append(Token(ELMoCharacterMapper.eos_token)) return_instance = Instance({ - 'source': TextField(tokenized, self._token_indexers), + 'source': TextField(tokenized_with_ends, self._token_indexers), }) return return_instance
[SimpleLanguageModelingDatasetReader->[_read->[text_to_instance]]]
Return a single instance of the class with the specified text.
This part would be good to get a sanity check on. Any implications to just throwing these bos and eos tags back in?
@@ -195,6 +195,8 @@ class FileSink(iobase.Sink): The returned file handle is passed to ``write_[encoded_]record`` and ``close``. """ + if self._file_system is None: + self._file_system = get_filesystem(temp_path) return self._file_system.create(temp_path, self.mime_type, self.compression_type)
[FileSink->[close->[close],finalize_write->[_rename_batch->[exists,rename]]],ChannelFactory->[exists->[exists],rename_batch->[rename],rename->[rename],open->[open]],FileSinkWriter->[close->[close],write->[write_record],__init__->[open]]]
Opens a temporary file and returns an opaque file handle object.
Should not this be `get_filesystem(self.file_path_prefix.get())`? Otherwise the type of `self._file_system` might be different depending on which function is called first and its parameters.
@@ -46,7 +46,7 @@ class NegotiationTeacher(Teacher): def __init__(self, opt, shared=None): super().__init__(opt, shared) self.datatype = opt['datatype'].split(':')[0] - self.random = self.datatype == 'train' + self.random = self.datatype == 'train' and 'ordered' not in opt['datatype'] build(opt) filename = 'val' if self.datatype == 'valid' else self.datatype
[NegotiationTeacher->[num_examples->[get_tag],_start_dialogue->[get_tag,_split_dialogue]]]
Initialize a sequence of n - tuple objects.
in retrospect, i don't think this is the patch. pre-split should be the right choice here...
@@ -103,6 +103,11 @@ func (c *DeploymentController) Handle(deployment *kapi.ReplicationController) er // Don't try and re-create the deployer pod. break } + + if _, ok := deployment.Annotations[deployapi.DeploymentIgnorePodAnnotation]; ok { + return nil + } + // Generate a deployer pod spec. podTemplate, err := c.makeDeployerPod(deployment) if err != nil {
[Handle->[getDeployerPodsFor,LabelForDeployment,HandleError,getPod,makeDeployerPod,CanTransitionPhase,Errorf,decodeConfig,DeploymentStatusFor,Infof,cancelDeployerPods,createPod,deletePod,V,DeploymentNameFor,IsDeploymentCancelled,updateDeployment,DeployerPodNameForDeployment,Sprintf,IsNotFound,emitDeploymentEvent],getPod->[getPodFunc],cancelDeployerPods->[Infof,V,getDeployerPodsFor,Errorf,emitDeploymentEvent,LabelForDeployment,HandleError,updatePod],makeDeployerPod->[makeContainer,MergeInto,DeployerPodNameForDeployment,decodeConfig],createPod->[createPodFunc],deletePod->[deletePodFunc],getDeployerPodsFor->[getDeployerPodsForFunc],getDeployment->[getDeploymentFunc],emitDeploymentEvent->[Sprintf,Eventf,decodeConfig],updatePod->[updatePodFunc],updateDeployment->[updateDeploymentFunc]]
Handle handles the deployment Creates a deployer pod if the existing pod does not exist in the deployment This function is used to get the deployer pod from the deployment client and if the deploy This function deletes all the deployer pods that have the specified label in the deployment cleanUpAll checks if all pods in the deployment are in the same status as the next.
Can you talk more about this? Callers are expected to manually run deployers? Can you add an example using it? Will we automate setting the annotation for users (maybe a `oc deploy` subcommand) ?
@@ -32,13 +32,8 @@ import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; -public class ObjectFlatteners +public abstract class ObjectFlatteners { - private ObjectFlatteners() - { - // No instantiation. - } - public static <T> ObjectFlattener<T> create( final JSONPathSpec flattenSpec, final FlattenerMaker<T> flattenerMaker
[ObjectFlatteners->[create->[flatten->[size->[size],get->[get],keySet->[keySet,isEmpty]]]]]
Create a new object flattener that flattens the given JSON path spec and flat Remove all entries with a key.
This seems like an unneeded change, I guess it achieves the same thing, but I think it's a bit more confusing because I at least expect a class to be abstract because it's going to be implemented somewhere, where as previously it was obviously just trying to be a static class
@@ -169,7 +169,7 @@ def main(): new_model.to(device) new_model.load_state_dict(torch.load('pruned_vgg19_cifar10.pth')) test(new_model, device, test_loader) - # top1 = 93.61% + # top1 = 93.74% if __name__ == '__main__':
[train->[train,updateBN],main->[train,vgg,test],main]
Train the unpruned VGG - 19 model and test the model. al networks through network slimming.
does this value will change by some random seed? If will, we should not assume the top1 value is a specific value.
@@ -49,7 +49,7 @@ client = WebPubSubServiceClient(credential=DefaultAzureCredential(), endpoint=en # Send a json message to everybody on the given hub... try: # Raise an exception if the service rejected the call - client.send_to_all('Hub', message={'Hello': 'all'}) + client.send_to_all('Hub', message={'Hello': 'all'}, content_type='application/json') print('Successfully sent a JSON message') except HttpResponseError as e: print('Failed to send JSON message: {}'.format(e.response.json()))
[send_to_all,error,print,getLogger,basicConfig,json,BytesIO,WebPubSubServiceClient,DefaultAzureCredential,format,exit]
This function builds a client through AAD and sends a message to everybody on the hub.
I don't think you actually need to specify content type here, it should default to application/json
@@ -16,4 +16,14 @@ class T::Private::DeclState def reset! self.active_declaration = nil end + + def self.without_on_method_added + begin + old_value = current.skip_on_method_added + current.skip_on_method_added = true + yield + ensure + current.skip_on_method_added = old_value + end + end end
[reset!->[active_declaration],current->[new,current],attr_accessor,current]
Resets the current node s node ID to nil.
Sorry I mispoke. I think this should be an instance method, so that it's `current.without`
@@ -118,7 +118,7 @@ for n_train in samples_epochs: for cov in noise_covs: inverse_operator = make_inverse_operator(evokeds[0].info, forward, cov, loose=0.2, depth=0.8, - rank=274) + rank=dict(meg=274)) stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM", pick_ori=None) for e in evokeds) stc = stc_a - stc_b
[,std,filter,show_view,make_inverse_operator,make_forward_solution,enumerate,get_xaxis,screenshot,imshow,methods_ordered,dict,compute_covariance,set_title,fill_between,crop,set,argsort,stcs,list,plot,append,read_raw_ctf,print,find_events,data_path,mean,len,axis,get_yaxis,format,concatenate,zip,close,subplots_adjust,subplots,apply_inverse,legend,equalize_event_counts,epochs_train,Epochs,pick_types]
Plots the results of a single . Plot the worst and worst - worst components of the .
any idea why we need to specify it manually? it should not be necessary here I think
@@ -45,4 +45,12 @@ public class JupyterUtilTest { Note n = new JupyterUtil().getNote(new InputStreamReader(resource), "%python", "%md"); } + @Test + public void getNote2() { + InputStream resource = getClass().getResourceAsStream("/iruby.ipynb"); + Note n = new JupyterUtil().getNote(new InputStreamReader(resource), "%python", "%md"); + Gson gson = new Gson(); + System.out.println(gson.toJson(n)); + } + }
[JupyterUtilTest->[getNbFormat->[assertTrue,get,getResourceAsStream,InputStreamReader,getNbformat],getNote->[InputStreamReader,getNote,getResourceAsStream]]]
Get note from resource.
this `/iruby.ipynb` seems to be absent, may need to add it
@@ -20,9 +20,12 @@ """Counters collect the progress of the Worker for reporting to the service.""" from __future__ import absolute_import +from __future__ import division import math import random +from builtins import hex +from builtins import object from apache_beam.utils import counters from apache_beam.utils.counters import Counter
[SideInputReadCounter->[__init__->[update_current_step]],TransformIOCounter->[__exit__->[__exit__],__enter__->[__enter__]],OperationCounters->[_observable_callback->[_observable_callback_inner->[update]],_should_sample->[_compute_next_sample],update_from->[update],do_sample->[SumAccumulator,_observable_callback,update],should_sample->[_should_sample],update_collect->[value,update]]]
Creates a new instance of a object. Update the current step.
Let's add # cython: language_level=3, since this file will be cythonized.
@@ -103,6 +103,14 @@ func TestAgentConfig(t *testing.T) { assert.Equal(t, configured2, settings) assert.NotEqual(t, etags[url], resp.Header.Get("Etag")) } + + t.Log("waiting for the agent_config metrics to be published...") + waitForLogMessage(t, srv, "bulk request completed: 2 indexed", 1) + + result := systemtest.Elasticsearch.ExpectMinDocs(t, 2, "metrics-apm.internal-*", + estest.TermQuery{Field: "metricset.name", Value: "agent_config"}, + ) + systemtest.ApproveEvents(t, t.Name(), result.Hits.Hits[:2], "@timestamp") } func queryAgentConfig(t testing.TB, serverURL, serviceName, serviceEnvironment, etag string) (map[string]string, *http.Response) {
[NewRequest,NewUnstartedServer,Close,Encode,Is,Set,DeleteAgentConfig,Stop,NewTimer,CleanupElasticsearch,Empty,Start,NotEqual,After,Logf,Equal,Do,NoError,Fatalf,CreateAgentConfig,Get,NewDecoder,Decode,String,Fatal,Parse,Helper]
queryAgentConfig queries the agent config for the given service name and environment. apm - server does not return an ETag on read.
ExpectMinDocs will retry for up to 10 seconds. Is it needed/useful to wait for the metrics to be published first?
@@ -19,7 +19,7 @@ public class TooltipProperties { private static final String TOOLTIP = "tooltip"; private static final String UNIT = "unit"; private static TooltipProperties ttp = null; - private static long timestamp = 0; + private static Instant timestamp = Instant.now(); private final Properties properties = new Properties(); protected TooltipProperties() {
[TooltipProperties->[getInstance->[TooltipProperties]]]
package for testing purposes get the tooltip of the unit.
Should the right-hand side be `Instant#EPOCH`?
@@ -43,4 +43,12 @@ module.exports = { }; validator.description = 'Builds and tests the AMP validator.'; +validator.flags = { + 'update_tests': 'Updates validation test output files', +}; + validatorWebui.description = 'Builds and tests the AMP validator web UI.'; +validatorWebui.flags = { + 'update_tests': 'Updates validation test output files', +}; +
[No CFG could be retrieved]
Description of the AMP validator.
Add two spaces between `'` and `Updates` to make the output of `gulp help` more readable.
@@ -84,7 +84,7 @@ class ShippingZone(models.Model): def countries_display(self): countries = self.countries if self.default: - from ..dashboard.shipping.forms import get_available_countries + from .utils import get_available_countries countries = get_available_countries() if countries and len(countries) <= 3:
[ShippingMethodQueryset->[applicable_shipping_methods_for_instance->[applicable_shipping_methods],applicable_shipping_methods->[_applicable_price_based_methods,_applicable_weight_based_methods]],ShippingMethod->[__repr__->[_get_weight_type_display],get_type_display->[_get_weight_type_display,_get_price_type_display]]]
Returns a string describing the number of countries available in the system.
Are you sure that circular imports still exist? Maybe we could move this import at the beginning of the file?
@@ -15,7 +15,17 @@ def Factory(settings, model): ) return TemporalStatisticsProcess(model, settings["Parameters"]) +''' + Temporal Statistics process + This process calculates temporal statistics for given input variables in given container, and outputs to chosen variables + and chosen container. + + This is compatible in OpenMP and MPI + + Note: When this process is used with restarting, please don't use restarting start timestep and "statistics_start_point_control_value" time step + same. This will have an error in averaging, once simulation is restarted. +''' class TemporalStatisticsProcess(Kratos.Process): def __init__(self, model, settings): Kratos.Process.__init__(self)
[TemporalStatisticsProcess->[__init__->[__init__]]]
Factory method for creating a Kratos process object. Initialize the object with the current statistics control variable.
I don't know the details but it would be nice if this can work in restarts without manual modifications ;) Speaking from experience it is difficult keeping track of what needs to be changed where (for restarts)
@@ -151,7 +151,7 @@ public class JoltTransformRecord extends AbstractProcessor { .displayName("Custom Transformation Class Name") .description("Fully Qualified Class Name for Custom Transformation") .required(false) - .expressionLanguageSupported(ExpressionLanguageScope.NONE) + .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES) .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) .build();
[JoltTransformRecord->[customValidate->[customValidate],normalizeJoltObjects->[normalizeJoltObjects],createTransform->[getTransform],transform->[transform],normalizeRecordObjects->[normalizeRecordObjects]]]
Default value for chainr.
This property is ignored unless the transformation type is CUSTOMR right? If so, it should have a `.dependsOn()` call in the builder, same goes for Custom Module Directory below.
@@ -124,11 +124,11 @@ trait RangeFilterTrait /** * Normalize the value. * - * @return int|float|null + * @return int|float|string | null */ private function normalizeValue(string $value, string $operator) { - if (!is_numeric($value)) { + if (!is_numeric($value) && !UuidV6::isValid($value)) { $this->getLogger()->notice('Invalid filter ignored', [ 'exception' => new InvalidArgumentException(sprintf('Invalid value for "[%s]", expected number', $operator)), ]);
[getDescription->[getFilterDescription,getProperties,getFieldNames,isPropertyMapped],normalizeBetweenValues->[notice],normalizeValue->[notice],getFilterDescription->[normalizePropertyName],normalizeValues->[notice]]
Normalizes the value to the right type.
We don't want to depend on an external vendor, like Ramsey or even Symfony UID component. Maybe we should have our own check?
@@ -115,11 +115,11 @@ func (r *Recurring) Stop() { // AddJob looks for "cron" initiators, adds them to cron's schedule // for execution when specified. func (r *Recurring) AddJob(job models.JobSpec) { - for _, initr := range job.InitiatorsFor(models.InitiatorCron) { - cronStr := string(initr.Schedule) + for _, i := range job.InitiatorsFor(models.InitiatorCron) { + initr := i if !job.Ended(r.Clock.Now()) { - r.Cron.AddFunc(cronStr, func() { - _, err := BeginRun(job, r.store, models.RunResult{}) + r.Cron.AddFunc(string(initr.Schedule), func() { + _, err := BeginRun(job, initr, models.RunResult{}, r.store) if err != nil && !expectedRecurringError(err) { logger.Error(err.Error()) }
[Start->[AddJob,New,Start,Errorf,Jobs],AddJob->[Ended,AddFunc,Error,InitiatorsFor,AddJob,Now,RunJobAt],Stop->[Wait,Stop],RunJobAt->[After,DurationFromNow,Error],New]
AddJob adds a job to the recurring schedule.
way to watch out for the closure trap.
@@ -272,7 +272,10 @@ public class VirtualRoutingResource { _port = NumbersUtil.parseInt(value, 3922); value = (String)params.get("router.aggregation.command.each.timeout"); - _eachTimeout = Duration.standardSeconds(NumbersUtil.parseInt(value, 10)); + _eachTimeout = Duration.standardSeconds(NumbersUtil.parseInt(value, (int)VRScripts.VR_SCRIPT_EXEC_TIMEOUT.getStandardSeconds())); + if (s_logger.isDebugEnabled()){ + s_logger.debug("The router.aggregation.command.each.timeout in seconds is set to " + _eachTimeout.getStandardSeconds()); + } if (_vrDeployer == null) { throw new ConfigurationException("Unable to find the resource for VirtualRouterDeployer!");
[VirtualRoutingResource->[connect->[connect],execute->[generateCommandCfg,applyConfigToVR],applyConfigToVR->[applyConfigToVR],applyConfig->[applyConfigToVR]]]
Configures the virtual router deployer.
@borisstoyanov however this fix makes sense to use the default value when not parse-able.
@@ -134,5 +134,5 @@ public interface CEQueueStatus { /** * Time spent processing batch reports since startup. */ - long getProcessingTime(); + long getProcessingTimeInMs(); }
[No CFG could be retrieved]
Returns the processing time of the current task.
I don't think the `InMs` is useful. Is `Date.getTime()` named `Date.getTimeInMs()` ? In addition, `addError` and `addSuccess` take a long which is a time in ms
@@ -43,7 +43,7 @@ class KratosFreeSurfaceGeneralTests(KratosUnittest.TestCase): self.checkResults() def setUp(self): - self.check_tolerance = 1e-6 + self.check_tolerance = 1e-5 self.print_reference_values = False def setUpProblem(self):
[KratosFreeSurfaceGeneralTests->[runTest->[AddDofs,ModelPartIO,SetBufferSize,EdgeBasedLevelSetSolver,int,GetSolutionStepValue,SetNumThreads,ReduceTimeStep,ReadModelPart,AddVariables,Solve,CloneTimeStep,OpenMPUtils,SetSolutionStepValue,Initialize,EstimateTimeStep,CreateModelPart,Vector,Model],ExecuteReservoirTests->[runTest,checkResults,WorkFolderScope,setUp,tearDown,setUpProblem],checkResults->[rstrip,write,GetSolutionStepValue,assertAlmostEqual,format,readline,float,open,WorkFolderScope,fail],testReservoir3D->[ExecuteReservoirTests,Vector],testReservoir2D->[ExecuteReservoirTests,Vector]],GetFilePath->[dirname]]
Set up the object for the test.
@joaquinirazabal may I ask why you required to change the tolerance? I think the changes in this PR must not affect the results of the tests.
@@ -1,11 +1,11 @@ class ServiceProviderRequest attr_accessor :uuid, :issuer, :url, :ial, :aal, :requested_attributes - def initialize(uuid: nil, issuer: nil, url: nil, ial: nil, aal: nil, requested_attributes: []) + def initialize(uuid: nil, issuer: nil, url: nil, loa: nil, ial: nil, aal: nil, requested_attributes: []) @uuid = uuid @issuer = issuer @url = url - @ial = ial + @ial = ial || loa @aal = aal @requested_attributes = requested_attributes&.map(&:to_s) end
[ServiceProviderRequest->[initialize->[map],to_json,attr_accessor]]
Initialize a new object with the given arguments.
What about adding a `logger.info` or `logger.warn` if we detect an `loa` param, so that way when we stop seeing them in the logs, we know it's safe to remove?
@@ -122,8 +122,8 @@ public class NumericValueWidget extends Composite fireEvent(new EnsureVisibleEvent()); textBox_.getElement().focus(); RStudioGinjector.INSTANCE.getGlobalDisplay().showErrorMessage( - "Error", - label_ + " must be a valid number.", + constants_.errorCaption(), + label_ + " " + constants_.rStudioGinjectorErrorMessage() , textBox_); return false; }
[NumericValueWidget->[addValueChangeHandler->[addValueChangeHandler],setValue->[setValue],setEnabled->[setEnabled],getValue->[getValue],setWidth->[setWidth]]]
Validate the .
Use a placeholder in the string, not string concatenation.
@@ -395,8 +395,7 @@ class ShareOperations(object): # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') # Construct and send request
[ShareOperations->[create_snapshot->[query,_deserialize,map_error,pop,get,url,format_url,run,cls,put,header,StorageErrorException],set_metadata->[query,_deserialize,map_error,pop,get,url,format_url,run,cls,put,header,StorageErrorException],create->[query,_deserialize,map_error,pop,get,url,format_url,run,cls,put,header,StorageErrorException],set_access_policy->[query,_deserialize,map_error,pop,body,get,url,format_url,run,cls,put,header,StorageErrorException],create_permission->[query,_deserialize,map_error,pop,body,get,url,format_url,run,cls,put,header,StorageErrorException],get_access_policy->[query,_deserialize,map_error,pop,get,url,format_url,run,cls,StorageErrorException,header],set_quota->[query,_deserialize,map_error,pop,get,url,format_url,run,cls,put,header,StorageErrorException],get_properties->[query,_deserialize,map_error,pop,get,url,format_url,run,cls,StorageErrorException,header],get_statistics->[query,_deserialize,map_error,pop,get,url,format_url,run,cls,StorageErrorException,header],get_permission->[query,_deserialize,map_error,pop,get,url,format_url,run,cls,StorageErrorException,header],delete->[query,_deserialize,map_error,pop,get,url,format_url,run,cls,StorageErrorException,header,delete]]]
Returns the permission for a given key. Get a single lease object from the server.
This means permissions is now required, did you check this doesn't break anything and our surface API already was making this required?
@@ -2,13 +2,13 @@ # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) - -from spack import * - import os import re +import spack.package + +@spack.package.detectable class Automake(AutotoolsPackage, GNUMirrorPackage): """Automake -- make file builder part of autotools"""
[Automake->[setup_dependent_package->[_make_executable]]]
Create a new object of type with the given name. Create a class method for the Seaborn - related classes.
What does this get us? Does it speed things up when running `spack external find` or `spack external list`? Will this have problems like the ordering of `@run_after` and `@on_packages_attributes` if we ever add another decorator?
@@ -10,13 +10,12 @@ import ( "github.com/harmony-one/harmony/core/types" "github.com/harmony-one/harmony/internal/utils" "github.com/harmony-one/harmony/shard" - "github.com/harmony-one/harmony/staking/slash" staking "github.com/harmony-one/harmony/staking/types" ) // Constants of proposing a new block const ( - PeriodicBlock = 200 * time.Millisecond + PeriodicBlock = 20 * time.Millisecond IncomingReceiptsLimit = 6000 // 2000 * (numShards - 1) )
[proposeNewBlock->[ShardID,SetCoinbase,proposeReceiptsProof,Msgf,IsStaking,FinalizeNewBlock,Blockchain,Beaconchain,Error,GetViewID,CommitReceipts,BlockNum,ReadCrossLink,CommitTransactions,Len,LastCommitSig,UpdateCurrent,Logger,Debug,GetCurrentHeader,Pending,ReadPendingCrossLinks,IsCrossLink,Err,Config,Msg,IsPreStaking,SuperCommitteeForNextEpoch,GetAddress,VerifyCrossLink,GetECDSAFromCoinbase,Epoch,SetBytes],proposeReceiptsProof->[ShardID,Unlock,Msgf,Blockchain,Error,HasCrossTxFields,Cmp,ValidateCXReceiptsProof,Lock,Logger,Debug,GetCurrentHeader,SliceStable,IsSpent,Uint64,Contains,GetPendingCXKey,Err,Config,Msg,Epoch,Interface,Validator,Number],WaitForConsensusReadyV2->[Before,Msg,Int,Add,CurrentBlock,Uint64,IncomingReceipts,IsLeader,Transactions,Now,Len,Blockchain,proposeNewBlock,Sleep,Err,Logger,NumberU64,Debug]]
WaitForConsensusReadyV2 waits for a new block to be probed and generates a proposeNewBlock proposes a new block.
What is the basis of reducing the period to 20ms from 200ms?
@@ -17,7 +17,9 @@ const colors = require('ansi-colors'); const debounce = require('debounce'); const fs = require('fs-extra'); +const globby = require('globby'); const log = require('fancy-log'); +const pathmod = require('path'); const wrappers = require('../compile/compile-wrappers'); const { extensionAliasBundles,
[No CFG could be retrieved]
Creates an AMPHTML build for a given node id. This function is a utility function to provide a default implementation of an extension that is not supported.
Another `path` clash rears its head For consistency with other parts of the codebase, could we call this `pathModule` instead of `pathmod`? A future cleanup is forthcoming.
@@ -34,14 +34,3 @@ using System.Reflection; [assembly: AssemblyProduct("http://www.dnnsoftware.com")] [assembly: AssemblyCopyright("DotNetNuke is copyright 2002-2018 by DNN Corporation. All Rights Reserved.")] [assembly: AssemblyTrademark("DNN")] -// Version information for an assembly consists of the following four values: -// -// Major Version -// Minor Version -// Build Number -// Revision -// -// You can specify all the values or you can default the Build and Revision Numbers -// by using the '*' as shown below: -[assembly: AssemblyVersion("9.2.2.0")] -[assembly: AssemblyFileVersion("9.2.2.0")]
[No CFG could be retrieved]
Assembly options for the DNA version.
Existing versions removed, because they are essentially unnecessary and would be confusing