patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -1234,6 +1234,7 @@ class TestVideoReader: with pytest.raises(RuntimeError): io.read_video('foo.mp4') + @PY39_SKIP def test_audio_present_pts(self): """Test if audio frames are returned with pts unit.""" backends = ['video_reader', 'pyav']
[TestVideoReader->[test_compare_read_video_from_memory_and_file->[compare_decoding_result,check_separate_decoding_result,_get_video_tensor],test_read_video_from_file->[check_separate_decoding_result,compare_decoding_result,_decode_frames_by_av_module],test_read_video_from_memory_scripted->[_get_video_tensor],test_probe_video_from_memory->[check_probe_result,_get_video_tensor],test_read_video_in_range_from_memory->[_pts_convert,_get_timebase_by_av_module,_get_video_tensor,compare_decoding_result,_decode_frames_by_av_module],test_read_video_from_memory->[compare_decoding_result,check_separate_decoding_result,_get_video_tensor,_decode_frames_by_av_module],test_probe_video_from_memory_script->[check_meta_result,_get_video_tensor],test_probe_video_from_file->[check_probe_result],test_read_video_from_memory_get_pts_only->[compare_decoding_result,_get_video_tensor]],_decode_frames_by_av_module->[_read_from_stream,_fraction_to_tensor]]
Test if a file is invalid.
The linux job was segfaulting on 3.9, so instead of skipping the tests should we instead check for the python version in the `setup.py`? I.e. follow the same logic as for the `sys.platform` check? Skipping the test while allowing to build will make the user believe that the build went fine, but in reality they still can't use the ffmpeg functionalities
@@ -123,7 +123,7 @@ class Sorbet::Private::HiddenMethodFinder 'tc', # Make sure we don't load a sorbet/config in your cwd '--no-config', - '--print=symbol-table-json', + '--print=symbol-table-full-json', # Method redefined with mismatched argument is ok since sometime # people monkeypatch over method '--error-black-list=4010',
[serialize_values->[real_name],serialize_class->[real_name,serialize_constants],main->[main],symbols_id_to_name_real->[symbols_id_to_name_real],require_everything->[require_everything],main]
Writes the constant table of a single node in the RBI file. Returns true if the result of the operation is a valid NID.
Same here. Please let me know if we can get away with the short version.
@@ -121,8 +121,11 @@ public class Request extends HashMap<String, String> { * Must read the object from the server response and return it or throw a {@link RemoteException} if server sent an * error. */ - public Object handleResult(int status, String ctype, String disp, InputStream stream) + public Object handleResult(int status, Header[] headers, InputStream stream, HttpContext ctx) throws RemoteException, IOException { + // TODO kevin: check if it's enough regarding to entity content type + String ctype = getHeaderValue(headers, "Content-Type"); + // Specific http status handling if (status >= Response.Status.BAD_REQUEST.getStatusCode()) { handleException(status, ctype, stream);
[Request->[readBlobs->[InputStreamDataSource,MimeMultipart,FileInputStream,copyToTempFile,Blobs,getInputStream,getFileName,getContentType,getBodyPart,IOException,readBlob,add,getCount,delete],handleResult->[readEntity,readBlobs,RemoteException,Blobs,readStringBlob,getFileName,handleException,readRegistry,read,getStatusCode,readBlob,toLowerCase,startsWith],readStringBlob->[StringBlob],getFileName->[group,RuntimeException,find,decode,matcher],handleException->[RemoteException,equalsIgnoreCase,read,readException],readBlob->[setMimeType,setFileName,copyToTempFile,FileBlob],compile]]
Handle the result of a request.
Use constants for header names
@@ -76,3 +76,8 @@ class ShippingZone(CountableDjangoObjectType): @staticmethod def resolve_shipping_methods(root: models.ShippingZone, *_args): return root.shipping_methods.all() + + @staticmethod + @permission_required(ProductPermissions.MANAGE_PRODUCTS) + def resolve_warehouses(root: models.ShippingZone, *_args): + return root.warehouses.all()
[ShippingMethod->[TranslationField,ShippingMethodTypeEnum],ShippingZone->[resolve_shipping_methods->[all],resolve_countries->[CountryDisplay],List,field,Field]]
Resolve shipping methods.
I almost all ower simple resolvers we use arguments `(root: <TYPE>, _info):` instead of `(root: <TYPE>, *_args):` can you make resolvers in this file more coherent to other.
@@ -156,7 +156,9 @@ var $AnimateProvider = ['$provide', function($provide) { className = isString(className) ? className : isArray(className) ? className.join(' ') : ''; - element.addClass(className); + forEach(element, function (element) { + JQLiteAddClass(element, className); + }); done && $timeout(done, 0, false); },
[No CFG could be retrieved]
Adds the provided className CSS class value to the provided element. Remove the neccessary class from the element.
wouldn't forEach try to iterate over element properties if `element` is not an array? I suspect that we need an isArray check here
@@ -96,6 +96,9 @@ namespace System.Reflection.Emit return t == typeof(object); } + [MemberNotNull(nameof(m_con))] + [MemberNotNull(nameof(m_constructorArgs))] + [MemberNotNull(nameof(m_blob))] internal void InitCustomAttributeBuilder(ConstructorInfo con, object?[] constructorArgs, PropertyInfo[] namedProperties, object?[] propertyValues, FieldInfo[] namedFields, object?[] fieldValues)
[CustomAttributeBuilder->[ValidateType->[ValidateType],CreateCustomAttribute->[CreateCustomAttribute],EmitValue->[EmitType,EmitString,EmitValue],EmitType->[EmitType],InitCustomAttributeBuilder->[ValidateType]]]
Checks if the given type is a valid type. This method is called by the constructor that is called by the constructor. It is called by Emit all the property sets that can be written. Emit a property type name and value.
Can this method be folded into constructor? We would not need the `MemberNotNull` annotations at all.
@@ -479,8 +479,10 @@ public abstract class AbstractAmqpOutboundEndpoint extends AbstractReplyProducin if (messageId == null) { messageId = NO_ID; } - correlationData = new CorrelationDataWrapper(messageId.toString(), - this.correlationDataGenerator.processMessage(requestMessage), requestMessage); + Object userData = this.correlationDataGenerator.processMessage(requestMessage); + if (userData != null) { + correlationData = new CorrelationDataWrapper(messageId.toString(), userData, requestMessage); + } } return correlationData; }
[AbstractAmqpOutboundEndpoint->[handleConfirm->[getConfirmNackChannel,getConfirmAckChannel],addDelayProperty->[setDelay]]]
This method generates the correlation data for the given request message.
Looks like previously we have had a `CorrelationDataWrapper` instance independently of the `userData`, but now you restrict it only if that one is not null. Also I see that you are protected against `null` in the `CorrelationDataWrapper` any way. So, what is the motivation to not do that any more?
@@ -0,0 +1,15 @@ +// MonoGame - Copyright (C) The MonoGame Team +// This file is subject to the terms and conditions defined in +// file 'LICENSE.txt', which is part of this source code package. + +namespace Microsoft.Xna.Framework +{ + internal static class FrameworkResources + { + #region Error strings + + internal const string ResourceCreationWhenDeviceIsNull = "The GraphicsDevice must not be null when creating new resources."; + + #endregion + } +}
[No CFG could be retrieved]
No Summary Found.
@KonajuGames - What do you think about starting to follow this technique for message strings?
@@ -88,8 +88,9 @@ func (c *DynamicCA) CheckCerts() error { return fmt.Errorf("unable to load client CA file %q: %v", c.caFile.Cert, err) } pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) + for i, crt := range certs { + pool.AddCert(crt) + glog.V(2).Infof("[%d] %q client-ca certificate: %s", i, c.caFile.Cert, getCertDetail(crt)) } verifyOpts := kubex509.DefaultVerifyOptions()
[Run->[CheckCerts,Until,HandleError,Infof],GetVerifier->[GetVerifier,Load],Equals->[DeepEqual],CheckCerts->[NewCertPool,Store,ReadFile,ParseCertsPEM,AddCert,Errorf,DefaultVerifyOptions,Equals]]
CheckCerts checks if the certificates in the caFile are valid. If not it will create.
how often does this happen?
@@ -226,9 +226,12 @@ public class ListAzureDataLakeStorage extends AbstractListProcessor<ADLSFileInfo options.setRecursive(recurseSubdirectories); Pattern baseDirectoryPattern = Pattern.compile("^" + baseDirectory + "/?"); + final String tempFilePrefix = defaultIfBlank(System.getProperty("tempFilePrefix"), TEMP_FILE_PREFIX); + final boolean includeTempFiles = context.getProperty(INCLUDE_TEMP_FILES).asBoolean(); List<ADLSFileInfo> listing = fileSystemClient.listPaths(options, null).stream() .filter(pathItem -> !pathItem.isDirectory()) + .filter(pathItem -> includeTempFiles || !isTempFile(pathItem.getName(), tempFilePrefix)) .map(pathItem -> new ADLSFileInfo.Builder() .fileSystem(fileSystem) .filePath(pathItem.getName())
[ListAzureDataLakeStorage->[getRecordSchema->[getRecordSchema]]]
Perform listing of files in the base directory.
Direct calls to `System.getProperty()` should be avoided. Can you explain the reason for getting the value of a system property?
@@ -1,9 +1,10 @@ class Api::UserProgramsController < ApplicationController before_action :authenticate_user! - def index(page: nil) @programs = current_user.programs.unchecked + .work_published + .episode_published .where('started_at < ?', Date.tomorrow + 1.day + 5.hours) .includes(:channel, :work, episode: [:work]) .order(started_at: :desc)
[index->[page],before_action]
index all the nagios.
Place the . on the previous line, together with the method call receiver.<br>Align the operands of an expression in an assignment spanning multiple lines.
@@ -360,8 +360,8 @@ function triggerResizeRequest(width, height) { nonSensitiveDataPostMessage('embed-size', {width, height}); } -function triggerRenderStart() { - nonSensitiveDataPostMessage('render-start'); +function triggerRenderStart(width, height) { + nonSensitiveDataPostMessage('render-start', {width, height}); } /**
[No CFG could be retrieved]
Registers a callback that will be called when an entity is rendered. VisibilityState - DOM visibility state.
I copy the format from other function that postMessages. `triggerRenderStart()` and `triggerRenderStart(width, height)` both work, how can we represent this in our code and let 3p ad know?
@@ -127,7 +127,7 @@ export class LocalizationService { /** * @param {string} languageCode The language code to associate with the * specified localized string bundle. - * @param {!../localized-strings.LocalizedStringBundleDef} localizedStringBundle + * @param {!Object} localizedStringBundle * The localized string bundle to register. * @return {!LocalizationService} For chaining. */
[No CFG could be retrieved]
Gets the language codes to use for the given element. Get the localized string for the given string ID.
Why is this using `!Object` instead of the more specific type?
@@ -200,7 +200,8 @@ func MoveIssueAcrossProjectBoards(issue *Issue, board *ProjectBoard) error { } pis.ProjectBoardID = board.ID - if _, err := sess.ID(pis.ID).Cols("project_board_id").Update(&pis); err != nil { + pis.Sorting = sorting + if _, err := sess.ID(pis.ID).Cols("project_board_id").Cols("sorting").Update(&pis); err != nil { return err }
[projectBoardID->[Get,Where],ProjectBoardID->[projectBoardID,GetEngine],removeIssues->[Exec],LoadProject->[loadProject,GetEngine],NumIssues->[GroupBy,Table,Count,Where,GetEngine,Cols],ProjectID->[GetEngine,projectID],NumClosedIssues->[Join,Table,Count,Where,GetEngine,Cols],projectID->[Get,Where],loadProject->[Get,Table,Where,Join],NumOpenIssues->[Join,Table,Count,Where,GetEngine],ID,Commit,Begin,RegisterModel,Close,Where,Delete,Insert,Errorf,projectID,Cols,Get,NewSession,loadRepo,Update]
removeIssues removes all issues from the project board.
Out of context question: Does this query has a bug? `ProjectBoard` is atm the name for column from what I know #13802. It uses the `project_board_id` which in my understanding is the column number, which can exists in multiple boards. Shouldn't it be: `UPDATE project_issue SET project_board_id = 0, sorting = 0 WHERE project_board_id = ? AND project_id = ?`
@@ -484,6 +484,7 @@ func testAccAWSALBTargetGroupConfig_basic(targetGroupName string) string { vpc_id = "${aws_vpc.test.id}" deregistration_delay = 200 + slow_start = 0 stickiness { type = "lb_cookie"
[Wrapf,Meta,Test,Sprintf,TestCheckResourceAttrSet,TestMatchResourceAttr,New,TestCheckResourceAttr,RootModule,DescribeTargetGroups,ComposeTestCheckFunc,String,ComposeAggregateTestCheckFunc,Fatalf,Errorf,MustCompile,RandStringFromCharSet]
testAccAWSALBTargetGroupDestroy checks if the given target group is not already in TestAccAWSALBTargetGroupConfig_updatedPort returns a string that can be used.
We should not include the default value in all the test configurations. It should only be specified in either configurations that actually require the setting or if the default is overridden.
@@ -42,9 +42,15 @@ import org.springframework.util.StringUtils; */ public class ObjectToJsonTransformer extends AbstractTransformer { + public static enum ResultType { + STRING, NODE + } + public static final String JSON_CONTENT_TYPE = "application/json"; - private final JsonObjectMapper<?> jsonObjectMapper; + private final JsonObjectMapper<?, ?> jsonObjectMapper; + + private volatile ResultType resultType = ResultType.STRING; private volatile String contentType = JSON_CONTENT_TYPE;
[ObjectToJsonTransformer->[doTransform->[getHeaders,copyHeaders,putAll,build,toJson,getClass,withPayload,hasLength,populateJavaTypes,getPayload,containsKey,put],setContentType->[trim,notNull],IllegalArgumentException,newInstance,forName,getDefaultClassLoader,notNull,JacksonJsonObjectMapper,getClass,isTrue,isAssignableFrom]]
Provides a transformer that converts a single object into a JSON string. - ObjectMapper.
I think we should make this final and set it in a constructor - this is not something that people will want to change dynamically. We can also remove the deprecated constructor.
@@ -227,7 +227,7 @@ export function installAd(win) { // it is visible. if (inViewport) { this.unlistenViewportChanges_ = - this.getViewport().onChanged(this.sendAdIntersection_.bind(this)); + this.getViewport().onScroll(this.sendAdIntersection_.bind(this)); } else if (this.unlistenViewportChanges_) { this.unlistenViewportChanges_(); this.unlistenViewportChanges_ = null;
[No CFG could be retrieved]
Load an AMPAD and send it to the child iframe. Measure the layout box of the iframe and send an intersection message to the ad with intersection change.
Please correct if I'm wrong, but this will send an intersection signal on every scroll event? Is this what you want?
@@ -32,7 +32,7 @@ namespace System.Text.Json.Nodes /// </summary> /// <param name="properties">The properties to be added.</param> /// <param name="options">Options to control the behavior.</param> - public JsonObject(IEnumerable<KeyValuePair<string, JsonNode?>> properties, JsonNodeOptions? options = null) + public JsonObject(IEnumerable<KeyValuePair<string, JsonNode?>> properties, JsonNodeOptions? options = null) : base(options) { foreach (KeyValuePair<string, JsonNode?> node in properties) {
[JsonObject->[DebugView->[GetPath],GetItem->[TryGetPropertyValue],WriteTo->[WriteTo],GetPath->[GetPath]]]
JsonObject creates a new object that represents a collection of objects that contain the specified properties. Get the object from the given element.
This should probably route through `this` instead of directly to `base`
@@ -41,6 +41,7 @@ var ( redis = "docker.io/library/redis:alpine" registry = "docker.io/library/registry:2" infra = "k8s.gcr.io/pause:3.1" + labels = "quay.io/baude/alpine_labels:latest" defaultWaitTimeout = 90 )
[GetContainerStatus->[Podman,WaitWithDefaultTimeout,OutputToString],NumberOfRunningContainers->[Podman,WaitWithDefaultTimeout,OutputToStringArray],Cleanup->[Podman],RunLsContainer->[Podman,WaitWithDefaultTimeout,OutputToString],RestoreAllArtifacts->[RestoreArtifact],BuildImage->[Podman],LineInOutputContainsTag->[OutputToStringArray],CreateArtifact->[Podman],RunLsContainerInPod->[Podman,WaitWithDefaultTimeout,OutputToString],NumberOfPods->[Podman,WaitWithDefaultTimeout,OutputToStringArray],LineInOutputContains->[OutputToStringArray],PodmanAsUser->[MakeOptions],Podman->[PodmanAsUser],RunTopContainer->[Podman],RunTopContainerInPod->[Podman],NumberOfContainersRunning->[Podman,WaitWithDefaultTimeout,OutputToStringArray],PullImage->[Podman],CleanupPod->[Podman],CreatePod->[Podman,WaitWithDefaultTimeout,OutputToString],LineInOuputStartsWith->[OutputToStringArray],RestoreArtifact->[Podman],WaitWithDefaultTimeout->[OutputToString],NumberOfContainers->[Podman,WaitWithDefaultTimeout,OutputToStringArray],Podman,WaitWithDefaultTimeout,OutputToString]
Imports a variable number of non - terminal variables. TestLibpod is a simple struct for the test os.
I know it's just a test, but should there be a "baude" reference? Can we do something more generic?
@@ -51,7 +51,11 @@ func WatchRepo(userID, repoID int64, watch bool) (err error) { func getWatchers(e Engine, repoID int64) ([]*Watch, error) { watches := make([]*Watch, 0, 10) - return watches, e.Find(&watches, &Watch{RepoID: repoID}) + return watches, e.Where("`watch`.repo_id=?", repoID). + And("`user`.is_active=?", true). + And("`user`.prohibit_login=?", false). + Join("LEFT", "user", "`user`.id = `watch`.user_id"). + Find(&watches) } // GetWatchers returns all watchers of given repository.
[GetWatchers->[Limit,Where,Join,Find],InsertOne,Exec,Find,Delete,Insert,Errorf,Get]
IsWatching checks if user has watched given repository. notifyWatchers is a helper function that notify the user that a user has changed.
I don't think user who is `prohibit_login` will be moved from watchers UI.
@@ -173,13 +173,17 @@ class KnowledgeGraphField(Field[Dict[str, torch.Tensor]]): desired_num_entities, default_value=lambda: []) padded_arrays = [] + unpadded_lengths = [] for padded_entity in padded_entities: padded_array = indexer.pad_token_sequence(padded_entity, desired_num_entity_tokens, padding_lengths) padded_arrays.append(padded_array) + unpadded_lengths.append(len(padded_entity)) tensor = Variable(torch.LongTensor(padded_arrays), volatile=not for_training) + # tensor2 = Variable(torch.LongTensor(unpadded_lengths), volatile=not for_training) tensors[indexer_name] = tensor if cuda_device == -1 else tensor.cuda(cuda_device) + # tensors['token_lengths'] = tensor2 if cuda_device == -1 else tensor.cuda(cuda_device) linking_features = self._compute_linking_features(desired_num_entities, desired_num_utterance_tokens, cuda_device,
[KnowledgeGraphField->[count_vocab_items->[count_vocab_items],get_padding_lengths->[get_padding_lengths],empty_field->[KnowledgeGraphField]]]
Returns a tensor of the entities linking features and padding lengths.
Looks like these changes were probably from an earlier version of your code. I think you can remove all the changes you made to this file. (Sorry if I caused this during my rebase!)
@@ -68,6 +68,7 @@ class PyMatplotlib(PythonPackage): depends_on('libpng@1.2:') depends_on('freetype@2.3:') + patch('freetype-include-path.patch', when='@2.2.2:') # Patch to pick up correct freetype headers depends_on('py-numpy@1.6:', type=('build', 'run')) depends_on('py-dateutil@1.1:', type=('build', 'run'))
[PyMatplotlib->[set_backend->[join_path,walk,find,filter,InstallError,FileFilter],set_cc->[satisfies],depends_on,extends,run_before,version,variant,run_after]]
Creates a list of variants that can be used to create a single object. requires_on - >.
Since this has been merged upstream, it should probably be `when='@2.2.2'`, unless you can get it to apply to older versions as well.
@@ -243,7 +243,7 @@ function LocationHashbangUrl(appBase, hashPrefix) { }; this.$$parseLinkUrl = function(url, relHref) { - if(stripHash(appBase) == stripHash(url)) { + if (stripHash(appBase) == stripHash(url)) { this.$$parse(url); return true; }
[No CFG could be retrieved]
Creates a new object with the hashbang url and absUrl properties. parse link url.
BTW, why the `==` ?
@@ -7,7 +7,7 @@ from KratosMultiphysics import * import KratosMultiphysics.KratosUnittest as KratosUnittest import Kratos_Execute_Contact_Structural_Test as Execute_Test -# This utiltiy will control the execution scope in case we need to acces files or we depend +# This utiltiy will control the execution scope in case we need to access files or we depend # on specific relative locations of the files. # TODO: Should we move this to KratosUnittest?
[StructuralMechanichsTestFactory->[test_execution->[controlledExecutionScope],setUp->[controlledExecutionScope]]]
Initialize a new object with the current path and scope.
We can correct the "utiltiy" word if we correct access...
@@ -992,6 +992,7 @@ class Spec(object): self._dependents = DependencyMap() self._dependencies = DependencyMap() self.namespace = None + self._explicit_providers = {} self._hash = None self._build_hash = None
[colorize_spec->[insert_color],save_dependency_spec_yamls->[to_yaml,format,from_yaml,traverse,write],DependencySpec->[copy->[DependencySpec]],ConflictsInSpecError->[__init__->[format,tree]],SpecParser->[check_identifier->[format],spec->[_add_default_platform,_dup,_set_compiler,_add_versions,satisfies,Spec,_add_flag,spec_by_hash,dag_hash],version_list->[version],spec_from_file->[from_yaml,format],compiler->[version_list,_add_versions],do_parse->[copy]],parse->[SpecParser],AmbiguousHashError->[__init__->[format]],SpecBuildInterface->[ForwardQueryToPackage],FlagMap->[copy->[FlagMap]],InvalidDependencyError->[__init__->[format]],ArchSpec->[copy->[_dup],os->[platform],from_dict->[ArchSpec],satisfies->[_autospec],__init__->[_string_or_none],constrain->[_autospec,satisfies],target->[platform,target_or_none,target],_autospec->[ArchSpec]],Spec->[dependents_dict->[_find_deps],dep_difference->[traverse],from_node_dict->[valid_compiler_flags,Spec,from_dict,from_node_dict],dependencies_dict->[_find_deps],_evaluate_dependency_conditions->[Spec,satisfies],satisfies_dependencies->[_autospec,traverse,common_dependencies,satisfies],constrain->[_autospec,constrain,satisfies],_merge_dependency->[_replace_with,_find_provider,_add_dependency],_normalize_helper->[_evaluate_dependency_conditions,_merge_dependency],dep_string->[sorted_deps,format],eq_node->[_cmp_node],_add_dependency->[DependencySpec],to_record_dict->[to_node_dict,dag_hash],common_dependencies->[traverse],to_json->[to_dict],normalized->[copy,normalize],copy->[_dup],_set_architecture->[ArchSpec],from_dict->[read_yaml_dep_specs,from_node_dict],__contains__->[_autospec,traverse,satisfies],cformat->[copy,format],_dup->[DependencyMap,copy],_cached_hash->[_spec_hash],_find_provider->[satisfies],virtual_dependencies->[traverse],_replace_with->[_add_dependency],_cmp_key->[_cmp_node],_constrain_dependencies->[_autospec,copy,get_dependency,_add_dependency],ne_node->[_cmp_node],ne_dag->[eq_dag],old_format->[write,format,dag_hash],to_dict->[traverse,build_hash,to_node_dict,dag_hash],traverse_edges->[return_val->[DependencySpec],validate,return_val],dag_hash_bit_prefix->[dag_hash],__str__->[dep_string,format],_expand_virtual_packages->[copy,traverse,_dup,DependencyMap,_replace_with,feq],_add_default_platform->[platform,_set_architecture],sorted_deps->[flat_dependencies],format->[write_attribute->[write,dag_hash],write_attribute,write],eq_dag->[_eq_dag],colorized->[colorize_spec],tree->[prefix,traverse_edges,format,dependents_dict,dag_hash],__init__->[DependencyMap,_dup,FlagMap],from_json->[from_dict],constrained->[copy,constrain],__getitem__->[traverse,SpecBuildInterface],build_hash->[_cached_hash],satisfies->[_autospec,satisfies,dag_hash],_autospec->[Spec],normalize->[flat_dependencies,_normalize_helper,_mark_concrete],from_literal->[spec_builder->[spec_builder,spec_and_dependency_types,Spec,_add_dependency,name_and_dependency_types],spec_builder],dag_hash->[_cached_hash],to_yaml->[to_dict],_dup_deps->[copy,traverse_edges],_mark_concrete->[traverse],flat_dependencies->[copy,traverse],from_yaml->[from_dict],validate_or_raise->[traverse],dependencies->[_find_deps],concretize->[copy,traverse_edges,_expand_virtual_packages,traverse,dag_hash,satisfies,_concretize_helper],dependents->[_find_deps],_concretize_helper->[constrain],concretized->[copy,concretize],to_node_dict->[_cached_hash,to_dict,dependencies_dict],_eq_dag->[_eq_dag],index->[DependencyMap,traverse],_add_flag->[valid_compiler_flags],full_hash->[_cached_hash],from_detection->[Spec]],CompilerSpec->[copy->[copy],from_dict->[from_dict,CompilerSpec],__init__->[copy],constrain->[_autospec,satisfies],satisfies->[_autospec,satisfies],_autospec->[CompilerSpec],to_dict->[to_dict]],SpecLexer]
Initialize a new object with all the basic fields that are required by the Spec object. Check if there is a single spec in the string.
I don't think this should be a spec-level attribute. It should be an *edge* attribute in the DAG -- and I think it should also be parsed as one.
@@ -48,6 +48,13 @@ func (s *CfgMgmtServer) GetRuns( return runs, status.Errorf(codes.InvalidArgument, "Invalid start/end time. (format: YYYY-MM-DD)") } + projectFilters, err := filterByProjects(ctx, map[string][]string{}) + if err != nil { + return runs, status.Errorf(codes.Internal, err.Error()) + } + + nodeExistsChan := s.nodeExistsAsync(request.GetNodeId(), projectFilters) + bRuns, err := s.client.GetRuns( request.GetNodeId(), int(page),
[GetRuns->[GetRuns],GetInventoryNodes->[GetInventoryNodes],GetNodes->[GetNodes]]
GetRuns returns a list of runs that match the given filter Get a list of runs that have a node with the given uuid.
Running the node exists check concurrently with retrieving the runs.
@@ -19,7 +19,16 @@ import static org.mule.runtime.core.util.ExceptionUtils.createErrorEvent; import static org.mule.runtime.core.util.ExceptionUtils.putContext; import static org.slf4j.LoggerFactory.getLogger; import static reactor.core.publisher.Flux.from; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Function; + import org.mule.runtime.api.exception.MuleException; +import org.mule.runtime.api.interception.ProcessorInterceptor; import org.mule.runtime.api.lifecycle.InitialisationException; import org.mule.runtime.api.lifecycle.Startable; import org.mule.runtime.core.AbstractAnnotatedObject;
[AbstractMessageProcessorChain->[fireNotification->[fireNotification],setFlowConstruct->[setFlowConstruct,getMessageProcessorsForLifecycle],addMessageProcessorPathElements->[getMessageProcessors,addMessageProcessorPathElements],setMessagingExceptionHandler->[setMessagingExceptionHandler],setMuleContext->[getMessageProcessorsForLifecycle,setMuleContext],stop->[getMessageProcessorsForLifecycle],initialise->[getMessageProcessorsForLifecycle],start->[start,getMessageProcessorsForLifecycle],dispose->[getMessageProcessorsForLifecycle],toString->[toString]]]
Imports all methods of the Event interface. Imports all the components of the Mule package.
Mule imports go first.
@@ -1838,7 +1838,7 @@ namespace Kratos for (int i = 0; i < 4; i++) { //eqn 5.2.29 - const Vector vec1 = MathUtils<double>::CrossProduct(data.r_cartesian[i], s_xi); + const Vector vec1 = MathUtils<double>::CrossProduct(data.r_cartesian[i], s_xi); d_xi_i[i] = std::sqrt(inner_prod(vec1, vec1)); chi_xi_i[i] = d_xi_i[i] / l_xi;
[No CFG could be retrieved]
Matrix H = Matrix H_theta H_tv H_xi = Matrix H_xi - - - - - - - - - - - - - - - - - -.
this code should be (to my understanding) Vector vec1(3); MathUtils<double>::CrossProduct(vec1,s_xi,data.r_cartesian[i]); //changed the order to maintain the sign
@@ -64,7 +64,7 @@ module Engine min_price = stock_market.par_prices.map(&:price).min self.class::CORPORATIONS.map do |corporation| - Engine::G1824::Corporation.new( + Engine::Corporation.new( min_price: min_price, capitalization: self.class::CAPITALIZATION, **corporation.merge(corporation_opts),
[G1824->[operating_round->[new],init_corporations->[new,min,merge,class,map],ipo_name->[include?],setup->[coordinates,buy_train,end_with?,corporation_by_id,shift,place_token,select,each,next_token,hex_by_id,spend,map],init_round->[new],register_colors,freeze,load_from_json],require_relative]
Initialize corporations with the lowest price.
is this even necessary anymore
@@ -3,6 +3,10 @@ class AnnotationText < ApplicationRecord belongs_to :creator, class_name: 'User', foreign_key: :creator_id belongs_to :last_editor, class_name: 'User', foreign_key: :last_editor_id, optional: true + after_update :update_mark_deductions + before_update :check_if_released + before_destroy :check_if_released + # An AnnotationText has many Annotations that are destroyed when an # AnnotationText is destroyed. has_many :annotations, dependent: :destroy
[AnnotationText->[escape_content->[gsub],validates_associated,belongs_to,has_many]]
Annotations that are destroyed when an AnnotationText is destroyed.
This callback can prevent an annotation text from being destroyed or edited even if the annotation text's deduction is 0 because this callback is also used in the case the deduction is not zero, and should not allow update of an annotation with deduction 0 to some other value. Perhaps this callback should be augmented to allow deletion of an annotation text if the deduction value is currently 0 and updating of an annotation text _content_ (but not deduction) if the deduction value is zero and marks are released.
@@ -1356,7 +1356,14 @@ function show_actions_done($conf, $langs, $db, $filterobj, $objcon='', $noprint= elseif (is_object($filterobj) && get_class($filterobj) == 'Ticket') $sql.= ", ".MAIN_DB_PREFIX."ticket as o"; $sql.= " WHERE a.entity IN (".getEntity('agenda').")"; - if (is_object($filterobj) && in_array(get_class($filterobj), array('Societe', 'Client', 'Fournisseur')) && $filterobj->id) $sql.= " AND a.fk_soc = ".$filterobj->id; + + // Work with new table actioncomm_resources and multiple contact affectation. + if (is_object($objcon) && $objcon->id) + { + $sql.= " AND r.element_type = '" . $objcon->table_element . "'" . + " AND r.fk_element = " . $objcon->id; + } + elseif (is_object($filterobj) && in_array(get_class($filterobj), array('Societe', 'Client', 'Fournisseur')) && $filterobj->id) $sql.= " AND a.fk_soc = ".$filterobj->id; elseif (is_object($filterobj) && get_class($filterobj) == 'Project' && $filterobj->id) $sql.= " AND a.fk_project = ".$filterobj->id; elseif (is_object($filterobj) && get_class($filterobj) == 'Adherent') {
[show_actions_done->[fetch,free,query,getNomUrl,select_type_actions,liste_array,fetchResources,trans,num_rows,fetch_object,jdate,LibStatut,escape,showFilterAndCheckAddButtons,idate,order,load],getState->[transnoentitiesnoconv,query,trans,fetch_object,load],societe_admin_prepare_head->[trans],getFormeJuridiqueLabel->[fetch_object,query,trans,num_rows],show_subsidiaries->[query,getNomUrl,trans,num_rows,fetch_object],show_contacts->[fetch_object,setGenderFromCivility,getNomUrl,getBannerAddress,executeHooks,LibStatut,escape,fetch_clicktodial,fetch_optionals,multiSelectArrayWithCheckbox,query,fetch_name_optionals_label,getLibStatut,trans,num_rows,getOptionalsFromPost,selectarray,showFilterButtons,showphoto],getCountry->[free,query,transnoentitiesnoconv,trans,fetch_object,escape,load],societe_prepare_head->[query,trans,num_rows,getCustomerAccount,fetch_object,liste_contact,load],show_projects->[fetch,free,query,restrictedProjectArea,getLibStatut,trans,num_rows,fetch_object,jdate,load],show_addresses->[fetch_lines,getNomUrl,trans],currency_name->[query,trans,num_rows,fetch_object,load],societe_prepare_head2->[trans]]
shows all actions done related to actioncomm and actioncomm_resources related to agenda Condition on actioncomm_resources and multiple contact affectation. search for action in action list Get all user events in order to add them to a user list.
If you introduce a if here before the elseif, all conditions after, on $filterobj (for example filter on project), will not be added. Does this commit fix the trouble ? 7c1ed5b913edbe03fc687029030d08c0889ec0b6
@@ -126,11 +126,6 @@ module Discourse config.assets.paths += %W(#{config.root}/config/locales #{config.root}/public/javascripts) - if Rails.env == "development" || Rails.env == "test" - config.assets.paths << "#{config.root}/app/assets/javascripts/discourse/tests" - config.assets.paths << "#{config.root}/node_modules" - end - # Allows us to skip minifincation on some files config.assets.skip_minification = []
[Application->[config->[production?,database_config],pbkdf2_algorithm,new,register_mime_type,ember_location,register_postprocessor,paths,activate_plugins!,ignore,skip_multisite_middleware,atomic_ln_s,after_initialize,autoloader,load_path,lambda,skip_message_bus_middleware,skip_rails_failover_active_record_middleware,pbkdf2_iterations,insert_after,join,path_for_fonts,env,register_transformer,root,time_zone,plugin_initialization_guard,generators,autoload_paths,require,templates_root,initializer,clear_active_connections!,to_s,enabled,start_with?,relative_url_root,skip_minification,include?,new_redis_store,schema_format,swap,precompiled_asset_checker,each,encoding,match,present?,development?,try,delete,cache_store,image_optim,rack_cache,test_framework,public_path,on_load,variant,filter_parameters,exceptions_app,store,handlebars_location,version,precompile,extname,raw_template_namespace,require_dependency],new,configure!,concat,groups,skip_db,require,skip_redis,cdn_url,skip_redis?,test?,require_relative,production?,present?,puts,development?,load_defaults,skip_db?,exit,expand_path]
This is the main entry point for the package. It loads the autoloader paths and plugins. Add JS files for all N - Version assets.
This is a nice side effect, Sprockets might be snappier without watching `node_modules`.
@@ -424,8 +424,8 @@ public class TestBackgroundHiveSplitLoader HiveSplitSource hiveSplitSource = hiveSplitSource(backgroundHiveSplitLoader); backgroundHiveSplitLoader.start(hiveSplitSource); List<String> splits = drain(hiveSplitSource); - assertTrue(splits.stream().anyMatch(p -> p.contains(filePaths.get(0))), format("%s not found in splits %s", filePaths.get(0), splits)); - assertTrue(splits.stream().anyMatch(p -> p.contains(filePaths.get(2))), format("%s not found in splits %s", filePaths.get(2), splits)); + assertTrue(splits.stream().anyMatch(p -> p.contains(filePaths.get(1))), format("%s not found in splits %s", filePaths.get(0), splits)); + assertTrue(splits.stream().anyMatch(p -> p.contains(filePaths.get(5))), format("%s not found in splits %s", filePaths.get(2), splits)); deleteRecursively(tablePath, ALLOW_INSECURE); }
[TestBackgroundHiveSplitLoader->[TestingHdfsFileSystem->[listLocatedStatus->[next->[next],hasNext->[hasNext]]],locatedFileStatus->[locatedFileStatus],backgroundHiveSplitLoader->[backgroundHiveSplitLoader],table->[table]]]
Test splits generation with aborted transactions. This test fails if the table with original files fails.
apparently both `filePaths.get` calls should have the same index here
@@ -254,7 +254,7 @@ class StubConv(StubWeightBiasLayer): keras_layer.set_weights((self.weights[0].T, self.weights[1])) def size(self): - return self.filters * self.kernel_size * self.kernel_size + self.filters + return (self.input_channel * self.kernel_size * self.kernel_size + 1) * self.filters @abstractmethod def to_real_layer(self):
[StubConv->[export_weights_keras->[set_weights],import_weights_keras->[get_weights,set_weights]],StubGlobalPooling3d->[to_real_layer->[GlobalAvgPool3d]],StubDense->[export_weights_keras->[set_weights],import_weights_keras->[get_weights,set_weights]],StubGlobalPooling1d->[to_real_layer->[GlobalAvgPool1d]],to_real_keras_layer->[keras_dropout],set_stub_weight_to_torch->[export_weights],StubWeightBiasLayer->[export_weights_keras->[set_weights],import_weights->[set_weights],import_weights_keras->[get_weights,set_weights]],set_stub_weight_to_keras->[export_weights_keras],layer_width->[is_layer],StubGlobalPooling2d->[to_real_layer->[GlobalAvgPool2d]],TorchFlatten->[forward->[size]],set_torch_weight_to_stub->[import_weights],set_keras_weight_to_stub->[import_weights_keras],StubBatchNormalization->[import_weights->[set_weights]]]
Returns the size of the kernel and filters.
why we change this?
@@ -57,7 +57,14 @@ public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWri public HoodieCreateHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T> hoodieTable, String partitionPath, String fileId, SparkTaskContextSupplier sparkTaskContextSupplier) { - super(config, instantTime, partitionPath, fileId, hoodieTable, sparkTaskContextSupplier); + this(config, instantTime, hoodieTable, partitionPath, fileId, generateOriginalAndHoodieWriteSchema(config), + sparkTaskContextSupplier); + } + + public HoodieCreateHandle(HoodieWriteConfig config, String instantTime, HoodieTable<T> hoodieTable, + String partitionPath, String fileId, Pair<Schema, Schema> originalAndHoodieSchema, + SparkTaskContextSupplier sparkTaskContextSupplier) { + super(config, instantTime, partitionPath, fileId, hoodieTable, originalAndHoodieSchema, sparkTaskContextSupplier); writeStatus.setFileId(fileId); writeStatus.setPartitionPath(partitionPath);
[HoodieCreateHandle->[close->[close],write->[write],canWrite->[canWrite]]]
Creates a HoodieCreateHandle object. Create a HoodieHandle for the given configuration.
if possible, can we rename `original` to be something more meaningful.. it does not really tell me what that schema is.. is it the schema the prev version of the file was written with
@@ -272,12 +272,11 @@ def renames(old, new): def is_local(path): """ - Return True if path is within sys.prefix, if we're running in a virtualenv. - - If we're not in a virtualenv, all paths are considered "local." + If we're not in a virtualenv, or path is in site.USER_BASE, then considered "local." + Else, return True if path is within sys.prefix """ - if not running_under_virtualenv(): + if not running_under_virtualenv() or path_in_userbase(path): return True return normalize_path(path).startswith(normalize_path(sys.prefix))
[has_leading_dir->[split_leading_dir],cache_download->[display_path],unzip_file->[has_leading_dir,split_leading_dir],splitext->[splitext],is_local->[normalize_path],dist_is_local->[is_local],get_terminal_size->[ioctl_GWINSZ],untar_file->[has_leading_dir,split_leading_dir],unpack_file->[unzip_file,splitext,untar_file,file_contents,is_svn_page],rmtree->[rmtree],dist_location->[egg_link_path],_Inf]
Checks if path is local or not.
user-site should definitely not be considered local in a no-site-packages virtualenv, but it seems that with this code it would.
@@ -39,7 +39,7 @@ import java.util.Objects; /** */ -public class LongMaxAggregatorFactory extends AggregatorFactory +public class LongMaxAggregatorFactory extends NullableAggregatorFactory { private final String name; private final String fieldName;
[LongMaxAggregatorFactory->[getMergingFactory->[getCombiningFactory],equals->[equals],getCombiningFactory->[LongMaxAggregatorFactory],hashCode->[hashCode],getRequiredColumns->[LongMaxAggregatorFactory]]]
Creates an AggregatorFactory that creates a LongMax object from a given object. Creates a LongMaxAggregatorFactory.
Could you please create SimpleLongAggregatorFactory to align with double and float? Maybe as a separate PR, which is merged quickly before this PR.
@@ -31,8 +31,10 @@ module Dependabot encoding = Dependabot::DependencyFile::ContentEncoding::BASE64 encoded_content = Base64.encode64(encoded_content) unless deleted end + + clean_base_directory = Pathname.new(base_directory).basename Dependabot::DependencyFile.new( - name: path, + name: path.sub("#{clean_base_directory}/", ""), content: encoded_content, directory: base_directory, deleted: deleted,
[VendorUpdater->[binary_file?->[strip,include?,exist?],updated_vendor_cache_files->[split,new,run_shell_command,sub,encode64,chdir,read,binary_file?,map],attr_reader,freeze],require]
Updates vendor cache files if there is a lease in the current vendor cache directory.
Is `basename` the right thing here? Looks like it just grabs the last component `Pathname.new("dir/nested").basename` > `nested` Should it instead be something like? ``` project_root = Pathname.new(File.expand_path(File.join(Dir.pwd, base_directory))) filename = Pathname.new(path).expand_path.relative_path_from(project_root)
@@ -34,7 +34,7 @@ class RXml(Package): url = "https://cran.r-project.org/src/contrib/XML_3.98-1.4.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/XML" - version('3.98-1', '1a7f3ce6f264eeb109bfa57bedb26c14') + version('3.98-1', 'aa373d6d301934e166e2a26a5ffe2a2e') extends('R')
[RXml->[install->[R,format],depends_on,version,extends]]
Provides two approaches for both reading and creating XML documents both local and accessible via HTTP or.
This should really be version `3.98-1.4`. The current latest version is `3.98-1.5`.
@@ -298,7 +298,7 @@ func (r *Registrar) writeRegistry() error { logp.Debug("registrar", "Write registry file: %s", r.registryFile) tempfile := r.registryFile + ".new" - f, err := os.Create(tempfile) + f, err := os.OpenFile(tempfile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { logp.Err("Failed to create tempfile (%s) for writing: %s", tempfile, err) return err
[Start->[loadStates],writeRegistry->[GetStates],GetStates->[GetStates]]
writeRegistry writes the current state of the registred state to a temporary file.
We could put these flags into a constant as we reuse them in several places or have a function in libbeat to open files which automatically applies flags and correct access rights.
@@ -236,9 +236,6 @@ class JSONRPCClient: """ Return the most recent block. """ return self.web3.eth.blockNumber - def inject_stop_event(self, event): - self.stop_event = event - def balance(self, account: Address): """ Return the balance of the account of given address. """ return self.web3.eth.getBalance(to_checksum_address(account), 'pending')
[JSONRPCClient->[poll->[block_number],send_transaction->[check_startgas,gasprice],__init__->[monkey_patch_web3],deploy_solidity_contract->[new_contract_proxy,get_transaction_receipt,dependencies_order_of_build,deploy_dependencies_symbols],check_startgas->[gaslimit]],monkey_patch_web3->[make_connection_test_middleware]]
Returns the block number of the current network.
Why was this removed?
@@ -308,7 +308,7 @@ func (consensus *Consensus) checkViewID(msg *FBFTMessage) error { consensus.current.SetViewID(msg.ViewID) consensus.LeaderPubKey = msg.SenderPubkey consensus.ignoreViewIDCheck = false - consensus.consensusTimeout[timeoutConsensus].Start() + consensus.timeouts.consensus.Start() utils.Logger().Debug(). Uint64("viewID", consensus.viewID). Str("leaderKey", consensus.LeaderPubKey.SerializeToHexStr()[:20]).
[verifyViewChangeSenderKey->[IsValidatorInCommittee],SetMode->[SetMode],verifySenderKey->[IsValidatorInCommittee],checkViewID->[SetViewID,SetMode],Mode->[Mode],getLogger->[Mode,String],UpdateConsensusInformation->[getLogger,UpdatePublicKeys,SetEpochNum,getLeaderPubKeyFromCoinbase],signConsensusMessage->[signMessage]]
checkViewID checks if the given message is valid for the current viewID.
Better add a func for updating the leaderPubKey with the logic to update IsLeader. There are many places LeaderPubKey is updated but without updating the IsLeader.
@@ -1587,9 +1587,6 @@ func (c *Container) cleanupStorage() error { if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown || errors.Cause(err) == storage.ErrLayerNotMounted { logrus.Errorf("Storage for container %s has been removed", c.ID()) } else { - if cleanupErr != nil { - logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), cleanupErr) - } cleanupErr = err } }
[chownVolume->[save],pause->[save],handleExitFile->[bundlePath],cleanup->[cleanupStorage,cleanupRuntime],restartWithTimeout->[ensureState,init,stop,start,reinit],CheckpointPath->[bundlePath],prepareCheckpointExport->[bundlePath,writeJSONFile],AttachSocketPath->[AttachSocketPath],syncContainer->[ensureState],setupStorage->[setupStorageMapping],cleanupRuntime->[save,ensureState,removeConmonFiles],getAllDependencies->[getAllDependencies],initAndStart->[init,ensureState,reinit],saveSpec->[bundlePath],mountNamedVolume->[save],writeJSONFile->[bundlePath],cleanupStorage->[save],checkReadyForRemoval->[ensureState],removeConmonFiles->[bundlePath,exitFilePath],start->[save],reinit->[init,cleanupRuntime],unpause->[save],init->[completeNetworkSetup,save,removeConmonFiles],stop->[waitForExitFileAndSync,save],isStopped->[syncContainer,ensureState],teardownStorage->[ensureState],ControlSocketPath->[bundlePath],prepareToStart->[ensureState],postDeleteHooks->[bundlePath],completeNetworkSetup->[syncContainer],waitForExitFileAndSync->[exitFilePath],checkExitFile->[handleExitFile,exitFilePath,ensureState]]
cleanupStorage unmounts all the storage volumes and mounts in the container. This function unmounts the volume and saves the container state.
It seems strange to log when we ignore the error and ignore the error here.
@@ -1136,6 +1136,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { n, events, logs, err := bc.insertChain(chain) bc.PostChainEvents(events, logs) if err == nil { + // TODO: incorporate these into insertChain for idx, block := range chain { header := block.Header() header.Logger(utils.Logger()).Info().
[updateCXReceiptsCheckpoints->[NextCXReceiptsCheckpoint],IsSameLeaderAsPreviousBlock->[GetHeaderByNumber],ReadTxLookupEntry->[ReadTxLookupEntry],GetBlockByNumber->[GetBlock],GetHeaderByNumber->[GetHeaderByNumber],ReadEpochVrfBlockNums->[ReadEpochVrfBlockNums],WriteCXReceiptsProofSpent->[WriteCXReceiptsProofSpent],GetBlockByHash->[GetBlock],WriteShardState->[WriteShardState],HasBlockAndState->[HasState],GetUnclesInChain->[GetBlock],Stop->[CurrentBlock],InsertReceiptChain->[Reset,CurrentFastBlock,HasBlock],GasLimit->[GasLimit],GetTdByHash->[GetTdByHash],update->[Stop,procFutureBlocks],GetVrfByNumber->[GetHeaderByNumber],ReadShardLastCrossLink->[ReadShardLastCrossLink],State->[CurrentBlock],reorg->[insert,GetBlock],GetTd->[GetTd],SetHead->[loadLastState,SetHead],insertChain->[HasState,GetBlock,CurrentBlock,insertChain,Validator,WriteBlockWithState],CurrentHeader->[CurrentHeader],ReadShardState->[ReadShardState],reportBlock->[addBadBlock],ReadCXReceipts->[ReadCXReceipts],WriteBlockWithState->[insert,CurrentBlock,ShardID],ShardID->[ShardID],DeleteCrossLinks->[ShardID],ResetWithGenesisBlock->[SetHead],ReadLastCommits->[ReadLastCommits],WriteCXReceipts->[WriteCXReceipts],CXMerkleProof->[ShardID,ReadCXReceipts],ReadEpochVdfBlockNum->[ReadEpochVdfBlockNum],GetHeaderByHash->[GetHeaderByHash],GetBlockHashesFromHash->[GetBlockHashesFromHash],WriteShardStateBytes->[WriteShardStateBytes],IsSpent->[LatestCXReceiptsCheckpoint],UpdateCXReceiptsCheckpointsByBlock->[updateCXReceiptsCheckpoints],WriteLastCommits->[WriteLastCommits],WriteCrossLinks->[ShardID],InsertChain->[ShardID],HasHeader->[HasHeader],WriteEpochVrfBlockNums->[WriteEpochVrfBlockNums],InsertHeaderChain->[InsertHeaderChain],GetHeader->[GetHeader],GetAncestor->[GetAncestor],GetBlocksFromHash->[GetBlock],WriteShardLastCrossLink->[ShardID,WriteShardLastCrossLink],WriteEpochVdfBlockNum->[WriteEpochVdfBlockNum],GetVdfByNumber->[GetHeaderByNumber],Export->[CurrentBlock],GetShardState->[ReadShardState,WriteShardState],Rollback->[CurrentBlock,GetBlock,CurrentFastBlock]]
InsertChain inserts a new chain into the Beaconchain. Delete all the crossLinks in the shard.
any impact? can you open a ticket on this?
@@ -410,9 +410,7 @@ const VideoLayout = { // Enable the currently set dominant speaker. if (currentDominantSpeaker) { - if (smallVideo && smallVideo.hasVideo()) { - this.updateLargeVideo(currentDominantSpeaker); - } + this.updateLargeVideo(currentDominantSpeaker); } else { // if there is no currentDominantSpeaker, it can also be // that local participant is the dominant speaker
[No CFG could be retrieved]
Handles the click on a participant. add a participant container for a given user and small video instance.
In updateLargeVideo there are some smallVideo operations which are not protected, which were by this if is that ok? Not sure also whether if not smallVideo.hasVideo() then is smallVideo.videoStream defined.
@@ -36,7 +36,7 @@ class SubversionRepositoryTest < Test::Unit::TestCase should "be able to open an existing Subversion repository" do SubversionRepository.create(TEST_REPO) repo = SubversionRepository.open(TEST_REPO) - assert_not_nil(repo, "Cannot open supversion repository") + assert_not_nil(repo, "Cannot open subversion repository") assert_instance_of(Repository::SubversionRepository, repo, "Repository is of wrong type")
[SubversionRepositoryTest->[add_file_helper->[add,commit,read,get_transaction],add_some_files_helper->[read,get_transaction,add,each,commit],create,closed?,new,set_bulk_permissions,shift,last_modified_date,assert_raises,cp,join,remove,remove_dir,mkdir_p,assert_raise,should,expand_path,add_file_helper,stringify_files,nil?,add_some_files_helper,rm,exists?,get_permissions,teardown,assert_equal,add,download_as_string,replace,get_revision,remove_file,revision_number,add_path,get_revision_by_timestamp,get_users,exist?,assert_not_nil,commit,assert_nil,add_user,sort,dirname,export,intern,assert_not_equal,rjust,read,get_transaction,now,close,open,each,push,delete,delete_bulk_permissions,context,get_latest_revision,path_exists?,remove_user,files_at_path,set_permissions,get_class,assert,setup,repository_exists?,assert_instance_of,access],include,dirname,join,require,expand_path]
Test suite for testing proper functioning of subversion repository END of method SubversionRepository.
Prefer single-quoted strings when you don't need string interpolation or special symbols.
@@ -0,0 +1,8 @@ +package statistics + +// HotRegionsStat records all hot regions statistics +type HotRegionsStat struct { + TotalFlowBytes uint64 `json:"total_flow_bytes"` + RegionsCount int `json:"regions_count"` + RegionsStat []HotPeerStat `json:"statistics"` +}
[No CFG could be retrieved]
No Summary Found.
need a license for new files
@@ -41,10 +41,12 @@ public final class UdtfUtil { final List<Expression> functionArgs = functionCall.getArguments(); - final List<SqlType> argTypes = functionArgs.isEmpty() - ? ImmutableList.of(FunctionRegistry.DEFAULT_FUNCTION_ARG_SCHEMA) - : functionArgs.stream().map(expressionTypeManager::getExpressionSqlType) - .collect(Collectors.toList()); + final List<SqlArgument> argTypes = functionArgs.isEmpty() + ? ImmutableList.of(SqlArgument.of(FunctionRegistry.DEFAULT_FUNCTION_ARG_SCHEMA)) + : new ArrayList<>(); + for (final Expression e : functionArgs) { + argTypes.add(SqlArgument.of(expressionTypeManager.getExpressionSqlType(e))); + } return functionRegistry.getTableFunction( functionCall.getName(),
[UdtfUtil->[resolveTableFunction->[getArguments,of,getTableFunction,getName,toList,ExpressionTypeManager,collect,isEmpty]]]
Resolve a table function given a sequence of arguments.
Same as above, for a minute I couldn't figure out how to get the additional `map(arg -> sqlArgument.of(arg.sqlType())` to work, but we should be able to add that to the original implementation and avoid the for loop
@@ -10,7 +10,6 @@ namespace System.DirectoryServices.Protocols private static void PALCertFreeCRLContext(IntPtr certPtr) { /* No op */ } private bool _secureSocketLayer; - public bool SecureSocketLayer { get
[LdapSessionOptions->[SetBoolValueHelper->[_ldapHandle,CheckAndSetLdapError,Name,SetBoolOption,_disposed],GetBoolValueHelper->[_ldapHandle,GetBoolOption,CheckAndSetLdapError,Name,_disposed],SetPtrValueHelper,nameof,ToInt32,None,ReferralChasingOptionsNotSupported,Name,All,LDAP_OPT_VERSION,LDAP_OPT_REFERRALS,GetBoolValueHelper,SetBoolValueHelper,_disposed]]
Creates an object that represents the LdapSessionOptions object that can be used to configure the LDAP Set a boolean value for the given option.
NIT: do not remove the extra line here.
@@ -43,8 +43,8 @@ * warning in sanity(...) with some newer compilers */ static char *mount_dir = "/tmp"; -static uint64_t max_read_size; -static uint64_t max_iov_read_size; +static uint64_t max_read_size = 4096; +static uint64_t max_iov_read_size = 4096; #define BUF_SIZE 4096
[bool->[CU_ASSERT_EQUAL,write,close,open,CU_ASSERT_GOTO],main->[CU_get_number_of_failures,CU_add_test,CU_initialize_registry,CU_get_error,printf,CU_cleanup_registry,CU_basic_run_tests,CU_add_suite,CU_basic_set_mode],void->[lseek,writev,do_large_read,printf,CU_ASSERT_STRING_EQUAL,CU_ASSERT_NOT_EQUAL,dup,CU_ASSERT_GOTO,readv,dfuse_get_bypass_status,memcmp,fsync,CU_ASSERT_PTR_NOT_EQUAL,malloc,calloc,CU_ASSERT_NOT_EQUAL_FATAL,CU_ASSERT_PTR_NOT_EQUAL_FATAL,CU_ASSERT,fread,stat,CU_ASSERT_EQUAL_FATAL,fcntl,mmap,fdatasync,do_large_write,read,close,open,pwrite,CU_ASSERT_PTR_NOT_NULL,free,CU_ASSERT_EQUAL,CU_ASSERT_NSTRING_EQUAL,munmap,write,memset,fclose,pread,dup2,fdopen],sanity->[free,do_large_io_test,unlink,do_misc_tests,fflush,asprintf,CU_ASSERT_NOT_EQUAL_FATAL,do_read_tests,open,do_write_tests,CU_ASSERT_PTR_NOT_NULL]]
This function is exported to the user. Reads and writes bytes and len bytes into the file.
Was this intentional? I think they were supposed to be set to zero, however this is also test code that we don't execute currently so I'm not sure.
@@ -451,12 +451,6 @@ public class TripleAPlayer extends AbstractHumanPlayer<TripleAFrame> implements } final BattleListing battles = battleDel.getBattles(); if (battles.isEmpty()) { - final IBattle battle = battleDel.getCurrentBattle(); - if (battle != null) { - // this should never happen, but it happened once.... - System.err.println("Current battle exists but is not on pending list: " + battle.toString()); - battleDel.fightCurrentBattle(); - } return; } if (!soundPlayedAlreadyBattle) {
[TripleAPlayer->[move->[move],pickTerritoryAndUnits->[pickTerritoryAndUnits],tech->[tech],userActions->[userActions],selectTerritoryForAirToLand->[selectTerritoryForAirToLand],selectKamikazeSuicideAttacks->[selectKamikazeSuicideAttacks],selectUnitsQuery->[selectUnitsQuery],selectFixedDice->[selectFixedDice],politics->[politics],start->[start],scrambleUnitsQuery->[scrambleUnitsQuery],place->[canAirLand],purchase->[purchase],acceptAction->[acceptAction]]]
Checks if a battle is currently in the battle list. If it is.
This looks like it was an attempt to fix this issue in the past but now this should never occur anymore and this only fixed a small portion of the cases anyways.
@@ -267,6 +267,10 @@ func (t TeamSigChainState) AssertWasRoleOrAboveAt(uv keybase1.UserVersion, return mkErr("%v role point not found", role) } +func (t TeamSigChainState) AssertWasBotAt(uv keybase1.UserVersion, scl keybase1.SigChainLocation) (err error) { + return t.AssertWasRoleOrAboveAt(uv, keybase1.TeamRole_BOT, scl) +} + func (t TeamSigChainState) AssertWasReaderAt(uv keybase1.UserVersion, scl keybase1.SigChainLocation) (err error) { return t.AssertWasRoleOrAboveAt(uv, keybase1.TeamRole_READER, scl) }
[GetAllUVs->[getUserRole],GetLatestUVWithUID->[getUserRole,DeepCopy],DeepCopyToPtr->[DeepCopy],GetLibkbLinkIDBySeqno->[GetLinkIDBySeqno],GetAdminUserLogPoint->[GetUserLogPoint],GetUsersWithRole->[getUserRole],obsoleteInvites->[findAndObsoleteInviteForUser],roleUpdatesDemoteOwners->[GetUserRole],updateInvites->[informNewInvite,informCanceledInvite],completeInvites->[informCompletedInvite],assertBecameAdminAt->[GetID],GetLatestLibkbLinkID->[GetLatestLinkID],informSubteam->[getLastSubteamPoint],AssertWasReaderAt->[AssertWasRoleOrAboveAt],AssertWasAdminAt->[AssertWasRoleOrAboveAt],roleUpdateChangedHighSet->[GetUserRole],addInnerLink->[GetParentID,FindActiveInviteByID,IsPublic,GetLatestUVWithUID,IsImplicit,informSubteamDelete,getUserRole,GetLatestPerTeamKey,informSubteam,inform,IsSubteam,GetUserRoleAtSeqno,FindActiveKeybaseInvite],AssertWasRoleOrAboveAt->[GetID],GetUserLogPoint->[DeepCopy],informSubteamDelete->[getLastSubteamPoint],checkSeqnoToAdd->[IsLinkFilled,GetLatestSeqno],DeepCopy->[DeepCopy],AssertWasWriterAt->[AssertWasRoleOrAboveAt],GetUsersWithRoleOrAbove->[getUserRole],appendChainLinkHelper->[DeepCopy],inform->[getUserRole],updateMembership->[inform],GetAllUVsWithUID->[getUserRole],GetLibkbLinkIDBySeqno,GetLatestSeqno]
AssertWasRoleOrAboveAt asserts that the given user version is a member of the given AssertWasWriterAt is like AssertWasRoleOrAboveAt but only if the user is.
this and AssertWasReaderAt are unused
@@ -57,10 +57,17 @@ from raiden.transfer.state_change import ( ContractReceiveChannelSettled, ContractReceiveChannelWithdraw, ReceiveTransferDirect, + ReceiveUnlock, ) from raiden.utils import publickey_to_address, typing from raiden.settings import DEFAULT_NUMBER_OF_CONFIRMATIONS_BLOCK +# This should be changed to `Union[str, MerkleTreeState]` +MerkletreeOrError = typing.Tuple[bool, typing.Optional[str], typing.Any] +EventsOrError = typing.Tuple[bool, typing.List[Event], typing.Any] +BalanceProofData = typing.Tuple[typing.Locksroot, typing.Nonce, typing.TokenAmount] +SendUnlockAndMerkleTree = typing.Tuple[SendBalanceProof, MerkleTreeState] + TransactionOrder = namedtuple( 'TransactionOrder',
[register_secret_endstate->[is_lock_locked],handle_block->[is_deposit_confirmed,get_status],send_refundtransfer->[create_sendlockedtransfer],handle_send_directtransfer->[get_status,send_directtransfer,get_distributable],send_directtransfer->[create_senddirecttransfer],handle_channel_withdraw->[_del_lock,compute_proof_for_lock,is_lock_pending,get_lock,register_secret,get_status],handle_receive_refundtransfer->[handle_receive_lockedtransfer],get_distributable->[get_amount_locked,get_balance],create_sendlockedtransfer->[get_next_nonce,compute_merkletree_with,get_amount_locked,get_distributable,get_status],handle_channel_closed->[get_known_unlocks,get_status,set_closed],handle_channel_newbalance->[is_transaction_confirmed],create_senddirecttransfer->[get_next_nonce,get_status,get_amount_locked,get_distributable],send_unlock->[create_unlock,get_lock,_del_lock],is_valid_lockedtransfer->[is_valid_signature],create_unlock->[get_next_nonce,get_amount_locked,compute_merkletree_without,is_lock_pending,get_status],send_lockedtransfer->[create_sendlockedtransfer],is_valid_directtransfer->[is_valid_signature],handle_receive_secretreveal->[register_secret],handle_channel_settled->[set_settled],handle_receive_directtransfer->[get_current_balanceproof,is_valid_directtransfer],events_for_close->[get_status],_del_lock->[is_lock_pending],state_transition->[handle_channel_withdraw,handle_action_close,handle_block,handle_channel_closed,handle_channel_newbalance,handle_receive_directtransfer,handle_send_directtransfer,handle_channel_settled],handle_action_close->[events_for_close],handle_unlock->[_del_lock,is_valid_unlock],register_secret->[register_secret_endstate],is_valid_unlock->[is_valid_signature],handle_receive_lockedtransfer->[is_valid_lockedtransfer]]
This function is used to provide a common interface between the raiden and raiden - core Returns whether a block is a lock that is locked or not.
When should that be done? Maybe add a `TODO` as well.
@@ -1271,6 +1271,8 @@ namespace System case CorElementType.ELEMENT_TYPE_R8: result = GenericLastIndexOf<double>(array, value, adjustedIndex, count); break; + + result = GenericBinarySearch<IntPtr>(array, value, adjustedIndex, count); default: Debug.Fail("All primitive types should be handled above"); break;
[Array->[IndexOf->[IndexOf,GetValue,Equals,Add],Clear->[Clear],GetValue->[GetValue],LastIndexOf->[GetValue,Equals,LastIndexOf,Add],GetHashCode->[GetValue,Add,GetHashCode],CopyTo->[Copy,CopyTo],Reverse->[SetValue,Reverse,GetValue,Add],BinarySearch->[GetMedian,GetValue,BinarySearch],Copy->[Copy],FindLastIndex->[FindLastIndex],Sort->[Add,Sort],Equals->[Equals,GetValue],SorterObjectArray->[IntroSort->[IntroSort,SwapIfGreater],Heapsort->[Swap],PickPivotAndPartition->[Swap,SwapIfGreater]],FindIndex->[FindIndex],SetValue->[SetValue],FindAll->[Add],SorterGenericArray->[IntroSort->[IntroSort,PickPivotAndPartition,InsertionSort,Heapsort,SwapIfGreater],PickPivotAndPartition->[Swap,GetValue,SwapIfGreater],DownHeap->[SetValue,GetValue],InsertionSort->[SetValue,GetValue],IntrospectiveSort->[IntroSort],Sort->[IntrospectiveSort],Swap->[SetValue,GetValue],Heapsort->[Swap,DownHeap],SwapIfGreater->[SetValue,GetValue]],CompareTo->[GetValue],SetValue,GetValue]]
This method returns the last index of value in array that is greater than or equal to value This method returns the index of the last occurrence of an element in the array.
There is some dead code here, likely just an issue in undoing the previous change
@@ -244,6 +244,18 @@ public class RedisQueueMessageDrivenEndpoint extends MessageProducerSupport { RedisQueueMessageDrivenEndpoint.this.popMessageAndSend(); } } + catch (NonTransientDataAccessException e) { + if (RedisQueueMessageDrivenEndpoint.this.active) { + if (e instanceof RedisConnectionFailureException || e instanceof RedisSystemException) { + RedisQueueMessageDrivenEndpoint.this.listening = false; + RedisQueueMessageDrivenEndpoint.this.sleepBeforeRecoveryAttempt(); + } + throw e; + } + else { + logger.error(e); + } + } finally { if (RedisQueueMessageDrivenEndpoint.this.active) { RedisQueueMessageDrivenEndpoint.this.restart();
[RedisQueueMessageDrivenEndpoint->[onInit->[onInit],setErrorChannel->[setErrorChannel],ListenerTask->[run->[popMessageAndSend,restart]]]]
This method is called by the application. It will run the thread.
I don't think this test is necessary. Thinking about it further, I think we should always apply the delay - for **any** `Exception`. For example, if the connection factory throws an `NPE` for some reason, we would go into a tight CPU loop. We should also log all exceptions.
@@ -537,6 +537,7 @@ public class Read { while (tracker.tryClaim(out) && out[0] != null) { receiver.outputWithTimestamp( new ValueWithRecordId<>(out[0].getValue(), out[0].getId()), out[0].getTimestamp()); + watermarkEstimator.setWatermark(tracker.currentRestriction().getWatermark()); } UnboundedSourceRestriction<OutputT, CheckpointT> currentRestriction =
[Read->[Unbounded->[populateDisplayData->[populateDisplayData]],UnboundedSourceAsSDFWrapperFn->[processElement->[tryClaim,getSource,currentRestriction],setUp->[restrictionCoder],UnboundedSourceAsSDFRestrictionTracker->[trySplit->[currentRestriction,create,getSource,getCheckpoint,createReader,createCacheKey],getProgress->[from,createReader,getSource,getCheckpoint],currentRestriction->[getCurrentSource,create,createReader,getCheckpointMark,getWatermark,ensureTimestampWithinBounds,close],tryClaim->[getCurrentTimestamp,create,close,start,getSource,getCheckpoint,getCurrent,createReader,advance,createCacheKey],createCacheKey->[create]],UnboundedSourceRestrictionCoder->[decode->[create,decode],verifyDeterministic->[verifyDeterministic],encode->[getSource,getCheckpoint,getWatermark,encode]],EmptyUnboundedSource->[EmptyUnboundedSource],newWatermarkEstimator->[ensureTimestampWithinBounds],splitRestriction->[getSource]],OutputSingleSource->[populateDisplayData->[populateDisplayData]],Bounded->[populateDisplayData->[populateDisplayData]]]]
Process an element of the unbounded source.
This feels a little expensive. Underneath it calls UnboundedReader#getCheckpointMark, which is supposed to be called (mostly) before checkpoint, not for each element.
@@ -142,7 +142,10 @@ public class DefaultFullHttpRequest extends DefaultHttpRequest implements FullHt @Override public FullHttpRequest replace(ByteBuf content) { - return new DefaultFullHttpRequest(protocolVersion(), method(), uri(), content, headers(), trailingHeaders()); + FullHttpRequest request = new DefaultFullHttpRequest(protocolVersion(), method(), uri(), content, + HttpHeaders.copy(headers()), HttpHeaders.copy(trailingHeaders())); + request.setDecoderResult(decoderResult()); + return request; } @Override
[DefaultFullHttpRequest->[copy->[copy],duplicate->[duplicate],toString->[toString],setUri->[setUri],release->[release],equals->[content,trailingHeaders,equals],refCnt->[refCnt],retain->[retain],hashCode->[hashCode,refCnt],touch->[touch],retainedDuplicate->[retainedDuplicate],setProtocolVersion->[setProtocolVersion],replace->[DefaultFullHttpRequest,trailingHeaders],setMethod->[setMethod]]]
This method replaces the content of this request with a new one.
Also we missed this before :(
@@ -30,12 +30,7 @@ The following args are useful to tweak to fit your specific needs; """ -defaults = [ - {"mephisto/blueprint": BLUEPRINT_TYPE}, - {"mephisto/architect": "local"}, - {"mephisto/provider": "mock"}, - {"conf": "example"}, -] +defaults = ["_self_", {"conf": "example"}] @dataclass
[main->[validate_and_run_config,Operator,print,wait_for_runs_then_shutdown,load_db_and_process_config],ScriptConfig->[field],main,register_script_config]
The main function for the crowd - conn - n - n - t The main function for the script.
(the same lint error as above appears here)
@@ -230,6 +230,7 @@ func loadBranches(ctx *context.Context) []*Branch { CommitsAhead: divergence.Ahead, CommitsBehind: divergence.Behind, LatestPullRequest: pr, + MergeMovedOn: mergeMovedOn, } }
[CanWrite,Warn,CreateNewBranchFromCommit,PushUpdate,PathEscapeSegments,GetCommit,Redirect,AllowsPulls,DeleteBranch,GetBranchCommit,GetBranches,GetProtectedBranches,HTML,Error,GetErrMsg,NotFound,CanCreateBranch,LoadUser,LoadIssue,QueryInt64,GetDeletedBranchByID,JSON,HasError,Tr,ServerError,Contains,GetDeletedBranches,CreateNewBranch,GetLatestPullRequestByHeadInfo,IsBranchExist,IsErrBranchAlreadyExists,RemoveDeletedBranch,BranchNameSubURL,Query,AddDeletedBranch,CountDivergingCommits,IsErrBranchNameConflict,IsProtectedBranch,String,IsErrTagAlreadyExists,CreateBranch,Success]
get all branch objects that are not protected and have no commit in it. CreateBranch creates new branch in repository.
Maybe this is an edge case and is covered elsewhere but, perhaps if we've got `ErrReferenceNotFound` we should hide the `new PR` button as well?
@@ -32,7 +32,7 @@ define([ * * @exception {DeveloperError} Element with id "container" does not exist in the document. */ - function InfoBox(container) { + function InfoBox(container, allowScripts) { //>>includeStart('debug', pragmas.debug); Check.defined('container', container); //>>includeEnd('debug')
[No CFG could be retrieved]
A class that displays a single check object. Adds the camera icon to the infoBox.
We would normally take an options object here instead for future proofing, but I would just remove this option, since we're not using it anywhere internally and it doesn't add much.
@@ -115,7 +115,7 @@ func userVersionsToDetails(ctx context.Context, g *libkb.GlobalContext, uvs []ke for i, uv := range uvs { uids[i] = uv.Uid } - packages, err := g.UIDMapper.MapUIDsToUsernamePackages(ctx, g, uids, 10*time.Minute, 0, true) + packages, err := g.UIDMapper.MapUIDsToUsernamePackages(ctx, g, uids, 10*time.Minute, 0, forceRepoll) if err != nil { return nil, err }
[MapUIDsToUsernamePackages,GetProofSet,UploadImage,SeitanIKeyV2,RunEngine2,CTimeTracer,myRole,WithUID,IsOpen,ChangeMembership,Post,New,InviteMember,deleteSubteam,AsTeam,Time,NewMetaContext,ImplicitAdmins,OpenTeamJoinAs,IsSubteam,TeamInviteTypeFromString,Stage,Generation,Split,WithPublicKeyOptional,FindActiveInvite,Finish,UserVersionByUID,notify,CanSkipKeyRotation,KeybaseUserVersion,FindKID,GenerateSignature,IsMember,Exists,Leave,GetBool,Now,PostTeamSettings,GetLastUserLogPointWithPredicate,GetError,Add,FindNextMerkleRootAfterTeamRemoval,GenerateAcceptanceKey,MemberRole,GetNormalizedName,ImplicitTeamDisplayName,CTrace,GetUPAKLoader,KBFSTLFID,InviteSeitan,IsWriterOrAbove,AddMemberByUsername,ToLower,InviteSeitanV2,NewLoadUserArg,IsTeamName,HasActiveInvite,WithTimeout,ResolveNameToIDUntrusted,chain,ToTeamName,Rotate,ChangeMembershipWithOptions,ToTime,ReAddMemberToImplicitTeam,Members,WithForcePoll,ResolveFullExpressionNeedUsername,FieldsFunc,UsersWithRole,Eq,GenerateTeamInviteID,GetUsername,NewLoadUserArgWithMetaContext,CDebugf,CTraceTimed,TeamID,WithNetContext,currentUserUV,FindActiveKeybaseInvite,TrimSpace,IsPublic,TeamInviteName,NewNormalizedUsername,Equal,ParseAddress,TeamInviteIDFromString,Name,ToTeamID,InviteEmailMember,postTeamInvites,AtKey,GetDecode,Sprintf,LoadV2,GenerateSIKey,String,AssociateWithTLFID,ToUserVersion,GetRunMode,GetUID,RootAncestorName,WithName,CWarningf,Delete,NormalizedUsername,NewHTTPArgs,ExportToTeamPlusApplicationKeys,CompleteSocialInvitesFor,IsRootTeam,deleteRoot,IsNil,SeitanIKey,Ctx,Errorf,NewResolveThenIdentify2,ResolveWithBody,IsOrAbove,GetTeamLoader,Unix,IsTeamID,NewAPIArgWithNetContext,AsUserOrTeam,TeamNameFromString,F,parseSocial,GetNormalizedUsername,G,GetActiveAndObsoleteInvites,Load]
ImplicitAdmins returns a list of admins of the specified team. SetRoleOwner sets the owner and admin role for the given teamname.
or was this intentional?
@@ -923,7 +923,7 @@ def _get_inverse_affine_matrix( # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1 rot = math.radians(angle) - sx, sy = [math.radians(s) for s in shear] + sx, sy = (math.radians(s) for s in shear) cx, cy = center tx, ty = translate
[resized_crop->[resize,crop],autocontrast->[autocontrast],adjust_brightness->[adjust_brightness],vflip->[vflip],five_crop->[get_image_size,center_crop,crop],pad->[pad],rgb_to_grayscale->[to_grayscale,rgb_to_grayscale],crop->[crop],convert_image_dtype->[convert_image_dtype],adjust_gamma->[adjust_gamma],resize->[resize,_interpolation_modes_from_int],center_crop->[get_image_size,pad,crop],adjust_saturation->[adjust_saturation],adjust_sharpness->[adjust_sharpness],adjust_hue->[adjust_hue],scale->[resize],gaussian_blur->[to_pil_image,to_tensor,gaussian_blur],invert->[invert],solarize->[solarize],get_image_num_channels->[get_image_num_channels],posterize->[posterize],to_tensor->[_is_numpy,_is_numpy_image],perspective->[perspective,_interpolation_modes_from_int,_get_perspective_coeffs],get_image_size->[get_image_size],hflip->[hflip],affine->[get_image_size,_get_inverse_affine_matrix,_interpolation_modes_from_int,affine],rotate->[rotate,_get_inverse_affine_matrix,_interpolation_modes_from_int,get_image_size],to_grayscale->[to_grayscale],ten_crop->[five_crop,hflip,vflip],equalize->[equalize],adjust_contrast->[adjust_contrast]]
Compute the affine matrix for a given . Returns a matrix that applies the inverse of translation and center translation of the system with a negative.
Looks like torchscript in unhappy here, I think we should revert to a list
@@ -31,14 +31,11 @@ class Frame(Dict[Key, Type]): """ def __init__(self) -> None: + self.types = {} # type: Dict[Key, Type] self.unreachable = False - -class DeclarationsFrame(Dict[Key, Optional[Type]]): - """Same as above, but allowed to have None values.""" - - def __init__(self) -> None: - self.unreachable = False + def __getitem__(self, k: Key) -> Type: + return self.types[k] if MYPY:
[ConditionalTypeBinder->[invalidate_dependencies->[get,_cleanse_key],_add_dependencies->[_add_dependencies],allow_jump->[Frame],__init__->[Frame,DeclarationsFrame],pop_frame->[update_from_options],handle_continue->[unreachable,allow_jump],get->[_get],frame_context->[allow_jump,push_frame,pop_frame],top_frame_context->[push_frame,pop_frame],handle_break->[unreachable,allow_jump],put->[_put,_add_dependencies],update_from_options->[_get,get,_put],push_frame->[Frame],assign_type->[put]]]
Initialize a object.
Could you remove the remaining uses of `frame[key]` into `frame.types[key]`? I notice that you did convert *some* operations on frames into operations into the `.types` attribute -- why not all? Or are there too many?
@@ -68,10 +68,6 @@ import org.kohsuke.stapler.export.Flavor; * @see SearchableModelObject */ public class Search implements StaplerProxy { - @Restricted(NoExternalUse.class) // used from stapler views only - public static String encodeQuery(String query) throws UnsupportedEncodingException { - return URLEncoder.encode(query, "UTF-8"); - } public void doIndex(StaplerRequest req, StaplerResponse rsp) throws IOException, ServletException { List<Ancestor> l = req.getAncestors();
[Search->[TokenList->[toString->[toString]],find->[find,subSequence,length,findClosestSuggestedItem,TokenList],suggest->[find,Tag,suggest]]]
Checks if a query is found in the index.
Not sure whether it's safe to remove it
@@ -650,3 +650,17 @@ export function checkCorsUrl(url) { export function tryDecodeUriComponent(component, opt_fallback) { return tryDecodeUriComponent_(component, opt_fallback); } + +/** + * Adds the path to the given url. + * + * @param {!HTMLAnchorElement} a + * @param {string} url + * @param {string} path + * @return {string} + */ +export function appendPathToUrlWithA(a, url, path) { + a.href = url; + const pathname = a.pathname.replace(/\/?$/, '/') + path.replace(/^\//, ''); + return a.protocol + '//' + a.host + pathname + a.search + a.hash; +}
[No CFG could be retrieved]
Decode a URI component.
Why can't we use the URL service here instead of creating an anchor and passing it in? The point of the URL service is to avoid creating unnecessary `<a>` elements and have caching of URLs.
@@ -68,7 +68,12 @@ var ( `\[sig-network-edge\]\[Feature:Idling\] Unidling should work with TCP \(while idling\)`, }, // tests that may work, but we don't support them - "[Disabled:Unsupported]": {}, + "[Disabled:Unsupported]": { + // Skip vSphere-specific storage tests. The standard in-tree storage tests for vSphere + // (prefixed with `In-tree Volumes [Driver: vsphere]`) are enough for testing this plugin. + // https://bugzilla.redhat.com/show_bug.cgi?id=2019115 + `\[sig-storage\].*\[Feature:vsphere\]`, + }, // tests too slow to be part of conformance "[Slow]": {}, // tests that are known flaky
[No CFG could be retrieved]
A list of all possible tests that can be run on a container. The list of skipped tests for the cluster.
This doesn't seem to match to me. I see a lot of `[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern:****` failures.
@@ -19,7 +19,7 @@ class Lock: def release(self): if self.state != 1: - raise RuntimeError + raise RuntimeError("Lock is not acquired.") if self.waiting.peek(): # Task(s) waiting on lock, schedule next Task self.state = self.waiting.pop_head()
[Lock->[acquire->[release],__aexit__->[release],__aenter__->[acquire]]]
Release a from the queue.
I guess this change is for usability, instead of a blank `RuntimeError` it gives a proper message. But is it necessary to have the exact same message as CPython? Usually MicroPython's error messages are shorter versions of their CPython counterpart. So this here could be made `"Lock not acquired"`. Every byte counts!
@@ -50,7 +50,7 @@ class Migraphx(CMakePackage): depends_on('py-pybind11@2.6:', type='build', when='@4.1.0:') for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0', '4.0.0', '4.1.0', - '4.2.0', '4.3.0', '4.3.1']: + '4.2.0', '4.3.0', '4.3.1', '4.5.0']: depends_on('rocm-cmake@' + ver, type='build', when='@' + ver) depends_on('hip@' + ver, when='@' + ver) depends_on('llvm-amdgpu@' + ver, when='@' + ver)
[Migraphx->[url_for_version->[Version,format],cmake_python_hints->[join_path,define],cmake_args->[append,spec,format],variant,depends_on,version,patch]]
Returns a URL for a specific CMake version. Return the list of arguments for the CMake command.
Just above here there's a python requirement mismatch with the new patch. It has cmake looking only for 3.5+, the package is looking for 3+.
@@ -98,6 +98,7 @@ func addHook(ctx *context.APIContext, form *api.CreateHookOption, orgID, repoID URL: form.Config["url"], ContentType: models.ToHookContentType(form.Config["content_type"]), Secret: form.Config["secret"], + HTTPMethod: "POST", HookEvent: &models.HookEvent{ ChooseEvents: true, HookEvents: models.HookEvents{
[IsValidHookTaskType,IsSliceContainsStr,IsValidHookContentType,GetWebhookByOrgID,Error,CreateWebhook,IsValidSlackChannel,Marshal,NotFound,UpdateEvent,HomeLink,JSON,UpdateWebhook,TrimSpace,IsErrWebhookNotExist,ToHookContentType,GetWebhookByRepoID,ToHookTaskType,ToHook]
AddRepoHook add a hook to a repository. Get the object that represents the hook task.
Note: I've hardcoded this as `POST` for now. Currently there are several checks that essentially force POST to be the method, but I want to keep this PR as simple as possible. This of course will need to be changed in future.
@@ -8,9 +8,10 @@ namespace System.Net.Sockets.Tests public static class TestSettings { // Timeout values in milliseconds. - public const int PassingTestTimeout = 10000; + public const int PassingTestShortTimeout = 10_000; + public const int PassingTestLongTimeout = 30_000; public const int FailingTestTimeout = 100; - public static Task WhenAllOrAnyFailedWithTimeout(params Task[] tasks) => tasks.WhenAllOrAnyFailed(PassingTestTimeout); + public static Task WhenAllOrAnyFailedWithTimeout(params Task[] tasks) => tasks.WhenAllOrAnyFailed(PassingTestShortTimeout); } }
[TestSettings->[Task->[WhenAllOrAnyFailed]]]
When all or any of the given tasks fail with a timeout.
Can you keep this untouched to be consistent with other tests? e.g. `PassingTestTimeout` is normal/default timeout. If we need anything different for come case we would make it with specific name - like `PassingTestLongTimeout` (that part looks ok to me)
@@ -57,7 +57,7 @@ func main() { fmt.Println(err) return } - input = strings.Split(strings.TrimSpace(string(b[:])), " ") + input = strings.Split(strings.TrimSpace(string(b)), " ") } pdctl.MainStart(append(os.Args[1:], input...))
[Printf,MainStart,Stat,Println,Split,Notify,ReadAll,Getenv,Exit,TrimSpace,Mode]
- check if is found in command line.
need to change it?
@@ -30,3 +30,12 @@ function jetpack_tiled_gallery_configuration_load() { } jetpack_load_tiled_gallery(); + +// Tile-gallery block definition can be found in wp-calypso repo +jetpack_register_block( 'tiled-gallery', array( + 'render_callback' => 'jetpack_tiled_gallery_load_assets' +) ); +function jetpack_tiled_gallery_load_assets( $attr, $content ) { + Jetpack_Gutenberg::load_assets_as_required( 'tiled-gallery' ); + return $content; +}
[No CFG could be retrieved]
} } ) ;.
Do you think you could add a docblock here so we know what the parameters refer to?
@@ -913,6 +913,15 @@ class Block(object): ops_in_cpp_index += 1 ops_in_python_index += 1 + # sync ops inserted from c++ end + if len(self.ops) != len(ops_in_cpp) and start_index == 0 and len( + self.ops) == end_index: + self.ops.clear() + for index in range(len(ops_in_cpp)): + op_desc = ops_in_cpp[index] + op = Operator(self, op_desc) + self.ops.append(op) + assert len(self.ops) == len(ops_in_cpp) for index in range(len(self.ops)): assert self.ops[index].desc == ops_in_cpp[index]
[get_var->[default_main_program,global_block],get_all_op_protos->[get_all_op_protos],dtype_is_floating->[convert_np_dtype_to_dtype_],Operator->[type->[type],output_arg_names->[output_arg_names],input_arg_names->[input_arg_names],rename_input->[rename_input],has_attr->[has_attr],attr_type->[attr_type],rename_output->[rename_output],to_string->[_debug_string_],attr_names->[attr_names],__init__->[type,instance,find_name],output_names->[output_names],block_attr->[block_attr],all_attrs->[block_attr,attr],output->[output],attr->[attr],input_names->[input_names],input->[input],__str__->[to_string]],Block->[sync_with_cpp->[has_var,type,Operator,name,create_var],clone_variable->[create_var],to_string->[_debug_string_,to_string],rename_var->[has_var,var,type,rename_var,shape,dtype,Variable],set_forward_block_idx->[set_forward_block_idx],append_op->[Operator,append_op],copy_param_info_from->[iter_parameters],var_recursive->[var],create_var->[Variable],prepend_op->[Operator,prepend_op],__str__->[to_string]],Program->[sync_with_cpp->[sync_with_cpp,Block],parse_from_string->[sync_with_cpp,Program,Block],to_string->[_debug_string_,to_string],prune->[sync_with_cpp,Program,prune,Block],__init__->[Block],create_block->[block,Block,current_block],copy_param_info_from->[global_block],inference_optimize->[sync_with_cpp,Program,Block,inference_optimize],append_backward->[sync_with_cpp,to_string,append_backward],clone->[sync_with_cpp,Program,Block,copy_param_info_from],rollback->[current_block],__str__->[to_string]],program_guard->[switch_main_program,switch_startup_program],Parameter->[__str__->[to_string],to_string->[to_string],__init__->[__init__]],Variable->[type->[type],lod_level->[lod_level],persistable->[persistable],to_string->[_debug_string_],shape->[shape],__init__->[convert_np_dtype_to_dtype_],dtype->[dtype],name->[name]],OpProtoHolder->[__init__->[get_all_op_protos]],Program]
Sync from the desc on the c ++ end. This method is used to synchronize the c Sync ops removed from c ++ ops removed from Python ops removed from Python ops removed from C.
For simplicity, can we only keep this and remove all above sync operations? It's slower but very easy to understand.
@@ -140,6 +140,7 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { apiRouter.HandleFunc("/hotspot/regions/write", hotStatusHandler.GetHotWriteRegions).Methods("GET") apiRouter.HandleFunc("/hotspot/regions/read", hotStatusHandler.GetHotReadRegions).Methods("GET") apiRouter.HandleFunc("/hotspot/stores", hotStatusHandler.GetHotStores).Methods("GET") + apiRouter.HandleFunc("/hotspot/regions/history", hotStatusHandler.GetHistoryHotRegions).Methods("GET") regionHandler := newRegionHandler(svr, rd) clusterRouter.HandleFunc("/region/id/{id}", regionHandler.GetRegionByID).Methods("GET")
[Handle,Handler,Use,NewRouter,HandlerFunc,ServeHTTP,New,Subrouter,TrimPrefix,NewRoute,UseEncodedPath,GetHandler,Inject,HandleFunc,PathPrefix,Methods]
Router for all the methods of the nagios - cluster endpoint. Handle all regions.
prefer to exchange with line 156
@@ -27,6 +27,12 @@ from .base import ( class AccountRegisterInput(graphene.InputObjectType): email = graphene.String(description="The email address of the user.", required=True) password = graphene.String(description="Password.", required=True) + redirect_origin = graphene.String( + description=( + "Base of frontend URL that will be needed to create confirmation URL." + ), + required=True, + ) class AccountRegister(ModelMutation):
[AccountDelete->[perform_mutation->[clean_instance]],AccountRequestDeletion->[perform_mutation->[AccountRequestDeletion]],AccountRegister->[save->[save],Arguments->[AccountRegisterInput]],AccountUpdate->[Arguments->[AccountInput]]]
Create a new user object. Input for the customer s shipping address.
In other mutations where this pattern is used this argument is called `redirectUrl` - for consistency, I would use it here as well.
@@ -102,9 +102,8 @@ public class CodeGenMojo extends AbstractMojo { .setTargetDirectory(buildDir.toPath()) .build().bootstrap(); - Path generatedSourcesDir = test - ? buildDir.toPath().resolve("generated-test-sources") - : buildDir.toPath().resolve("generated-sources"); + Path generatedSourcesDir = buildDir.toPath().resolve(test ? "generated-test-sources" : "generated-sources") + .resolve("quarkus"); sourceRegistrar.accept(generatedSourcesDir);
[CodeGenMojo->[doExecute->[getProperties,getMessage,equals,MojoExecutionException,bootstrap,info,accept,createDeploymentClassLoader,stringPropertyNames,trigger,MojoFailureException,getProperty,init,setProperty,getAppModel,startsWith,resolve,build,getAppArtifact,Properties,singleton,toPath],getAppArtifact->[getClassifier,debug,getArtifact,MojoExecutionException,getOutputDirectory,get,getGroupId,AppArtifact,getArtifactId,isDebugEnabled,exists,setPath,getExtension,getVersion,createDirectories],execute->[get,doExecute,addCompileSourceRoot,toString,getAbsolutePath]]]
Execute the build phase of the quarkus build. Catch any exceptions thrown during the prepare phase of the quarkus - maven - plugin.
I think we may need both in case of testing.
@@ -881,8 +881,13 @@ func (b *cloudBackend) runEngineAction( }() // The backend.SnapshotManager and backend.SnapshotPersister will keep track of any changes to - // the Snapshot (checkpoint file) in the HTTP backend. - persister := b.newSnapshotPersister(ctx, u.update, u.tokenSource, op.SecretsManager) + // the Snapshot (checkpoint file) in the HTTP backend. We will reuse the snapshot's secrets manager when possible + // to ensure that secrets are not re-encrypted on each update. + sm := op.SecretsManager + if secrets.AreCompatible(sm, u.GetTarget().Snapshot.SecretsManager) { + sm = u.GetTarget().Snapshot.SecretsManager + } + persister := b.newSnapshotPersister(ctx, u.update, u.tokenSource, sm) snapshotManager := backend.NewSnapshotManager(persister, u.GetTarget().Snapshot) // Depending on the action, kick off the relevant engine activity. Note that we don't immediately check and
[GetStack->[GetStack],RenameStack->[RenameStack],GetLogs->[GetStack],CloudConsoleURL->[CloudURL],tryNextUpdate->[CloudURL],GetPolicyPack->[CloudURL,Name,parsePolicyPackReference],CancelCurrentUpdate->[GetStack],GetLatestConfiguration->[GetLatestConfiguration],GetStackTags->[GetStack],DoesProjectExist->[DoesProjectExist],GetStackResourceOutputs->[GetStackResourceOutputs],query->[Query],Query->[GetStack],GetStackOutputs->[GetStackOutputs],apply->[cloudConsoleStackPath,CloudConsoleURL,createAndStartUpdate],CreateStack->[CreateStack],runEngineAction->[Refresh,Update,Destroy],ListStacks->[ListStacks],Logout->[CloudURL],UpdateStackTags->[UpdateStackTags,getCloudStackIdentifier],GetStack]
runEngineAction runs an engine action on the given stackRef and stackRef with the given NewScope creates a new scope for the given state.
What are the cases where these might not be compatible? And what is the experience in those cases?
@@ -237,7 +237,7 @@ registries = ['{{.Host}}:{{.Port}}']` }) It("podman search attempts HTTP if registry is in registries.insecure and force secure is false", func() { - SkipIfRemote("FIXME This should work on podman-remote") + SkipIfRemote("--tls-verify is not supportedon podman-remote search") if podmanTest.Host.Arch == "ppc64le" { Skip("No registry image for ppc64le") }
[Address->[Sprintf],setRegistriesConfigEnv,Unlock,ErrorToString,Podman,Exit,ShouldNot,Atoi,WriteFile,WaitWithDefaultTimeout,Should,MatchString,New,To,SeedImages,Address,Bytes,Contents,OutputToStringArray,Cleanup,ExitCode,Execute,Must,ErrorGrepString,RestoreArtifact,PodmanNoCache,GrepString,Sprintf,OutputToString,Parse,LineInOutputContains,Setup]
This test tests if the image is in the registry and if it is in the registry it 20 - 1.
[not worth re-pushing; please fix only if something else merits it] : "supportedon" needs space. Also lines 281, 320 below.
@@ -1881,13 +1881,10 @@ btr_insert(struct btr_context *tcx, d_iov_t *key, d_iov_t *val) rc = btr_root_start(tcx, rec); if (rc != 0) { D_DEBUG(DB_TRACE, "Failed to start the tree: %d\n", rc); - goto failed; + return rc; } } return 0; - failed: - btr_rec_free(tcx, rec, NULL); - return rc; } static int
[dbtree_lookup->[dbtree_fetch],btr_probe_rc->[btr_probe_valid],dbtree_iterate->[dbtree_iter_next,dbtree_iter_prev,dbtree_iter_finish,dbtree_iter_probe,dbtree_iter_prepare,dbtree_iter_fetch],int->[btr_root_grow,btr_root_start],dbtree_iter_prepare->[btr_context_addref],dbtree_open_inplace->[dbtree_open_inplace_ex]]
inserts a new record into the tree This function is called by btr_add_or_delete_next. It is.
I think this kind of error cleanup is still useful when dbtree is on non-transactional vmem class, right?
@@ -750,6 +750,14 @@ class BaseModuleFileWriter(object): with open(self.layout.filename, 'w') as f: f.write(text) + # Set the file permissions of the module to match that of the package + # We have to check for file existence here in case the Writer has been + # connected to a StringIO + if os.path.exists(self.layout.filename): + llnl.util.filesystem.chmod_x(self.layout.filename, self.perms) + if self.group: + llnl.util.filesystem.chgrp(self.layout.filename, self.group) + def remove(self): """Deletes the module file.""" mod_file = self.layout.filename
[merge_config_rules->[update_dictionary_extending_lists,dependencies],BaseConfiguration->[env->[process_arglist],naming_scheme->[_check_tokens_are_valid],blacklisted->[debug_info],__init__->[merge_config_rules]],BaseContext->[environment_modifications->[dependencies,_check_tokens_are_valid]],dependencies->[dependencies],update_dictionary_extending_lists->[update_dictionary_extending_lists],read_module_indices->[read_module_index],BaseModuleFileWriter->[write->[dirname,write,_get_template],remove->[dirname,remove]],BaseFileLayout->[dirname->[root_path],filename->[dirname]],read_module_indices]
Writes the module file. Dictionary representation of a object.
Can we consolidate the permission setting logic in one place? i.e., both this class and the existing package existing permission-setting logic need to know how to set permissions on a file based on a spec. So if we had a `SpecPermissionSetter` class somewhere, or just a `set_permissions_by_spec(spec, path)` function, we could avoid having to `chmod`/`chgrp` everywhere.
@@ -2788,8 +2788,9 @@ namespace System.Text.RegularExpressions // RegexNode.M is used for the number of iterations; RegexNode.N is ignored. void EmitSingleCharFixedRepeater(RegexNode node, bool emitLengthChecksIfRequired = true) { - int iterations = node.M; + Debug.Assert(node.IsOneFamily || node.IsNotoneFamily || node.IsSetFamily, $"Unexpected type: {node.Type}"); + int iterations = node.M; if (iterations == 0) { // No iterations, nothing to do.
[RegexCompiler->[Switch->[Switch],Ldstr->[Ldstr],Ldloc->[Ldloc],Stfld->[Stfld],Mul->[Mul],Ldloca->[Ldloca],Stloc->[Stloc],And->[And],Call->[Call],Or->[Or],Ret->[Ret],Dup->[Dup],Add->[Add],EmitTimeoutCheck->[RemUn,MarkLabel,Ldloc,Ldc,Brtrue,Add,Ldthis,Stloc,Call],Unaligned->[Unaligned],Ldlen->[Ldlen],Ceq->[Ceq],Pop->[Pop],Callvirt->[Callvirt],MarkLabel->[MarkLabel],EmitGo->[Switch,Ldstr,Ldc,Ldloc,BltFar,Ldthis,Stfld,Mul,Stloc,Ldloca,Call,Ret,LdindU2,Bgt,LdindI4,Dup,Brtrue,Add,BneFar,Bne,Unaligned,Ldlen,Br,BgeUn,BrfalseFar,BgeUnFar,MarkLabel,Blt,Mvfldloc,BrFar,Ble,Sub,BgeFar,LdindI8,BltUnFar,Ldthisfld,LdelemI4,LdcI8,BeqFar,StelemI4,CallToLower,BrtrueFar],Shl->[Shl],Mvfldloc->[Ldthisfld,Stloc],InitLocalCultureInfo->[Call,Stloc,Callvirt],EmitMatchCharacterClass->[CgtUn,Ldstr,Ldc,Ldloc,Stloc,And,Call,Or,CltUn,Ceq,Pop,Br,Blt,MarkLabel,Shl,Bge,Sub,Shr,CallToLower],Sub->[Sub],Shr->[Shr],Ldthisfld->[Ldthis],InitializeCultureForGoIfNecessary->[InitLocalCultureInfo,DeclareTextInfo],EmitFindFirstChar->[Ldstr,DeclareTextInfo,Beq,Ldloc,Ldc,BltFar,BleFar,Ldthis,Stfld,Stloc,Ldloca,Call,Ret,LdindU2,Bgt,Add,Br,BrfalseFar,MarkLabel,Blt,Mvfldloc,InitLocalCultureInfo,BrFar,Ble,Bge,Sub,Ldthisfld],CallToLower->[Ldloc,Call,Stloc,Callvirt]]]
This method is called by the Go method that is responsible for emitting the code for a Checks if a Capture node is available in the tree. if we have a node in the tree that doesn t match any node in the tree we Emits a sum of a constant and a value from a local builder.
Yay for all these new asserts.
@@ -3082,9 +3082,7 @@ public class QueueImpl extends CriticalComponentImpl implements Queue { numNoMatch = 0; numAttempts = 0; - if (consumer != redistributor) { - ref = handleMessageGroup(ref, consumer, groupConsumer, groupID); - } + ref = handleMessageGroup(ref, consumer, groupConsumer, groupID); deliveriesInTransit.countUp();
[QueueImpl->[getDurableDeliveringCount->[getDurableMessageCount],scheduleDepage->[getName],configureExpiry->[getExpiryAddress],addConsumer->[debug],deleteAllReferences->[deleteAllReferences],configureSlowConsumerReaper->[equals,getName,cancel,debug],getMessageCount->[getMessageCount],getDeliveringCount->[getMessageCount],deleteReference->[acknowledge,removeReferenceWithID,iterator],ExpiryScanner->[run->[getName,run,expire,debug,close,iterator]],unproposed->[run->[debug],getName],addHead->[addHead],createDeadLetterResources->[toString],getScheduledCount->[getScheduledCount],checkExpired->[expire],expiryAddressFromMessageAddress->[getAddress],getDurableScheduledCount->[getDurableScheduledCount],expireReference->[expire,iterator],removeWithSuppliedID->[checkIDSupplier],DelayedAddRedistributor->[run->[clearRedistributorFuture,internalAddRedistributor]],retryMessages->[actMessage->[getID],iterQueue,retryMessages],getDurableDeliveringSize->[getDurablePersistentSize],postRollback->[addSorted,isNonDestructive,getConsumerCount],getPersistentSize->[getPersistentSize],rerouteMessages->[actMessage->[route,getAddress],iterQueue],debug->[debug],acknowledge->[isDurable,getID,acknowledge,debug],deliverScheduledMessages->[cancel,addHead],getDeliveringMessages->[getDeliveringMessages,iterator],internalAddRedistributor->[toString,deliverAsync],DepageRunner->[run->[depage]],refDown->[refDown],deliverNow->[deliverAsync],deleteQueue->[deleteAllReferences,destroyPaging,deleteQueue,isDurable,cancel,getID],getDurableMessageCount->[getDurableMessageCount,getMessageCount,isDurable],internalAddTail->[addTail],moveReferences->[actMessage->[acknowledge],iterQueue,moveReferences],purgeAfterRollback->[acknowledge],durableUp->[durableUp],enforceRing->[referenceHandled,refRemoved,getMessageCountForRing,enforceRing,acknowledge],postAcknowledge->[durableDown,postAcknowledge,refDown,isDurable],internalErrorProcessing->[removeConsumer,addHead],getPriority->[getPriority],doInternalPoll->[internalAddTail,deliverAsync,skipDelivery,incrementMesssagesAdded],deliverAsync->[deliverAsync],getDurableScheduledSize->[getDurableScheduledSize],changeReferencePriority->[iterator,addTail],refUp->[refUp],getDurablePersistentSize->[getPersistentSize,getDurablePersistentSize,isDurable],createExpiryResources->[toString,getExpiryAddress],sendToDeadLetterAddress->[move,equals,sendToDeadLetterAddress,toString,acknowledge],iterator->[iterator],addTail->[getName,addTail],removeReferenceWithID->[removeReferenceWithID,iterator],handleMessageGroup->[extractGroupSequence],pause->[pause,flushDeliveriesInTransit,isDurable],SynchronizedIterator->[next->[next],remove->[remove],repeat->[repeat],close->[close],hasNext->[hasNext]],getRefsOperation->[getRefsOperation],hashCode->[hashCode],proceedDeliver->[proceedDeliver],move->[toString,acknowledge,route,makeCopy],createResources->[getAddress,equals],hasMatchingConsumer->[getFilter],sendMessagesToDeadLetterAddress->[iterator],handle->[removeConsumer,handle],resume->[deliverAsync],QueueBrowserIterator->[next->[getPagingIterator,next,hasNext],remove->[remove],getPagingIterator->[iterator],close->[close,getPagingIterator],hasNext->[getPagingIterator,hasNext],SynchronizedIterator,iterator],durableDown->[durableDown],expireReferences->[expire,run,iterator],addRedistributor->[deliverAsync],internalAddSorted->[addSorted],moveReference->[iterator],deleteMatchingReferences->[deleteMatchingReferences],DeliverRunner->[run->[deliver,checkDepage]],getScheduledSize->[getScheduledSize],SlowConsumerReaperRunnable->[run->[equals,getName,getMessageCount,getAddress,getRate,consumer,debug,toString,getID,getConsumerCount]],createDeleteMatchingAction->[actMessage->[acknowledge]],getRate->[getMessagesAdded],checkDeadLetterAddressAndExpiryAddress->[getExpiryAddress,equals],makeCopy->[toString,makeCopy],sendMessageToDeadLetterAddress->[iterator],removeConsumer->[close],scheduleSlowConsumerReaper->[getName,toString,debug],getExecutor->[getExecutor],AddressSettingsRepositoryListener->[onChange->[configureSlowConsumerReaper,toString,configureExpiry]],internalAddHead->[addHead],equals->[equals],expire->[move,getName,getExpiryAddress,expire,acknowledge],changeReferencesPriority->[iterator,addTail],getReference->[iterator],isPaused->[isPaused],cancelRedistributor->[clearRedistributorFuture],deliver->[isPaused,doInternalPoll,repeatNextDelivery,getName,deliverAsync,extractGroupID,iterator,canDispatch,incrementMesssagesAdded,nextDelivery,debug,getConsumerCount],flushOnIntermediate->[skipDelivery->[run]],depage->[needsDepage,isPaused,getName,getMessageCount,deliverAsync,expireReferences,getPersistentSize,debug,addTail],removeAddress->[getAddress],moveBetweenSnFQueues->[done->[deliverAsync],toString,acknowledge,route,debug],checkRedelivery->[getName,toString,isDurable],iterQueue->[actMessage,forceDelivery,cancel,iterator,addTail],cancel->[cancel],locateTargetBinding->[getRoutingName,equals,getAddress,debug,toString],reacknowledge->[isDurable],ConsumerHolder->[getPriority->[getPriority],resetIterator->[close],equals->[equals]],deliverDirect->[deliver],addSorted->[addSorted],moveReferencesBetweenSnFQueues->[iterQueue],toString->[toString],getDeliveringSize->[getPersistentSize]]]
This method is called when a message is delivered to the queue. This method is called when a message is delivered to a queue. It will check if the check if there is a race condition.
If the redistributor now handles groups and didnt before, does it need to do anything extra when it is cancelled that it wasnt doing before (since it didnt need to) that e.g other consumers currently do when removed?
@@ -215,6 +215,8 @@ class CheckerPluginInterface: msg = None # type: MessageBuilder options = None # type: Options path = None # type: str + # Type context for type inference + type_context = None # type: List[Optional[Type]] @abstractmethod def fail(self, msg: str, ctx: Context, *, code: Optional[ErrorCode] = None) -> None:
[ChainedPlugin->[get_type_analyze_hook->[get_type_analyze_hook],get_method_signature_hook->[get_method_signature_hook],set_modules->[set_modules],get_method_hook->[get_method_hook],get_function_hook->[get_function_hook],get_attribute_hook->[get_attribute_hook],get_base_class_hook->[get_base_class_hook],report_config_data->[report_config_data],get_metaclass_hook->[get_metaclass_hook],get_class_decorator_hook->[get_class_decorator_hook],get_dynamic_class_hook->[get_dynamic_class_hook],get_customize_class_mro_hook->[get_customize_class_mro_hook],get_additional_deps->[get_additional_deps]],WrapperPlugin->[get_type_analyze_hook->[get_type_analyze_hook],get_method_signature_hook->[get_method_signature_hook],set_modules->[set_modules],get_method_hook->[get_method_hook],get_function_hook->[get_function_hook],get_attribute_hook->[get_attribute_hook],get_base_class_hook->[get_base_class_hook],report_config_data->[report_config_data],get_metaclass_hook->[get_metaclass_hook],get_class_decorator_hook->[get_class_decorator_hook],get_dynamic_class_hook->[get_dynamic_class_hook],get_customize_class_mro_hook->[get_customize_class_mro_hook],get_additional_deps->[get_additional_deps],lookup_fully_qualified->[lookup_fully_qualified]],Plugin->[lookup_fully_qualified->[lookup_fully_qualified]]]
Emit an error at given location.
Here it should be an abstract property.
@@ -624,6 +624,7 @@ func RegisterRoutes(m *macaron.Macaron) { }, repo.MustBeNotBare, context.RepoRef(), context.CheckUnit(models.UnitTypeCode)) m.Group("/pulls/:index", func() { + m.Get(".diff", repo.DownloadPullDiff) m.Get("/commits", context.RepoRef(), repo.ViewPullCommits) m.Get("/files", context.RepoRef(), repo.SetEditorconfigIfExists, repo.SetDiffViewStyle, repo.ViewPullFiles) m.Post("/merge", reqRepoWriter, repo.MergePullRequest)
[Handle,Dir,GitHookService,Gziper,ServeData,CheckUnit,Close,Mailer,IncreaseDownloadCount,RepoRef,SetURLPrefix,MultipartForm,Static,Redirect,GetBranchCommit,Error,Post,New,NotFound,Captchaer,Route,CheckAnyUnit,Locale,CanCreateOrganization,Logger,AddBindingRules,I18n,Any,Join,Toggle,RequireRepoAdmin,ServeFileContent,Sessioner,Head,RequireRepoWriter,Recovery,RegisterRoutes,Custom,Get,RepoAssignment,RepoIDAssignment,GetAttachmentByUUID,InitMailRender,Csrfer,Renderer,Use,Cacher,IsErrAttachmentNotExist,LocalPath,OrgAssignment,Contexter,LoadRepoUnits,UnitTypes,Toolboxer,RepoRefByType,GetCommitsCount,Fatal,Combo,Open,Group,Params]
Add routes to the wiki. Commits routes are used to provide a list of commits.
I think maybe it's not work?
@@ -703,7 +703,14 @@ NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource, DnsServ // save the password in DB for (final VirtualRouter router : routers) { if (router.getState() == State.Running) { - return networkTopology.savePasswordToRouter(network, nic, uservm, router); + final boolean result = networkTopology.savePasswordToRouter(network, nic, uservm, router); + if (result) { + // Explicit password reset, while VM hasn't generated a password yet. + final UserVmVO userVmVO = _userVmDao.findById(vm.getId()); + userVmVO.setUpdateParameters(false); + _userVmDao.update(userVmVO.getId(), userVmVO); + } + return result; } } final String password = (String) uservm.getParameter(VirtualMachineProfile.Param.VmPassword);
[VirtualRouterElement->[applyPFRules->[canHandle],completeAggregatedExecution->[completeAggregatedExecution,getRouters],prepare->[canHandle],removeDnsSupportForSubnet->[getProvider,removeDhcpSupportForSubnet],validateLBRule->[canHandle],getRouters->[getProvider,getRouters],configDnsSupportForSubnet->[getProvider],removeDhcpSupportForSubnet->[canHandle,removeDhcpSupportForSubnet],applyIps->[canHandle],startVpn->[canHandle],shutdown->[getRouters],prepareAggregatedExecution->[prepareAggregatedExecution,getRouters],savePassword->[canHandle],applyDhcpEntries->[canHandle,getRouters],configureDhcpSupport->[canHandle,getRouters],applyLBRules->[canHandle],addPasswordAndUserdata->[canHandle,getRouters],saveSSHKey->[canHandle],addDnsEntry->[getProvider],applyFWRules->[canHandle],saveUserData->[canHandle],applyStaticNats->[canHandle,applyStaticNats],setCapabilities->[getHAProxyStickinessCapability],applyVpnUsers->[canHandle,applyVpnUsers],stopVpn->[canHandle]]]
Save the password of a user in a network.
What does this `updateParameters` means in the `userVmVo` object?
@@ -144,7 +144,15 @@ public class HoodieReadClient<T extends HoodieRecordPayload> implements Serializ // record locations might be same for multiple keys, so need a unique list Set<String> uniquePaths = new HashSet<>(paths); - Dataset<Row> originalDF = sqlContextOpt.get().read().parquet(uniquePaths.toArray(new String[uniquePaths.size()])); + Dataset<Row> originalDF = null; + if (uniquePaths.size() == 0) { + originalDF = sqlContextOpt.get().read().parquet(uniquePaths.toArray(new String[uniquePaths.size()])); + } + if (paths.get(0).endsWith(HoodieFileFormat.PARQUET.getFileExtension())) { + originalDF = sqlContextOpt.get().read().parquet(uniquePaths.toArray(new String[uniquePaths.size()])); + } else if (paths.get(0).endsWith(HoodieFileFormat.ORC.getFileExtension())) { + originalDF = sqlContextOpt.get().read().orc(uniquePaths.toArray(new String[uniquePaths.size()])); + } StructType schema = originalDF.schema(); JavaPairRDD<HoodieKey, Row> keyRowRDD = originalDF.javaRDD().mapToPair(row -> { HoodieKey key = new HoodieKey(row.getAs(HoodieRecord.RECORD_KEY_METADATA_FIELD),
[HoodieReadClient->[tagLocation->[tagLocation],readROView->[assertSqlContext,convertToDataFilePath]]]
Read RO view.
merge with `paths.get(0).endsWith(HoodieFileFormat.PARQUET.getFileExtension()`?
@@ -747,9 +747,6 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar if (_allocLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_SYNC)) { try { secStorageVm = startNew(dataCenterId, role); - for (UploadVO upload : _uploadDao.listAll()) { - _uploadDao.expunge(upload.getId()); - } } finally { _allocLock.unlock(); }
[SecondaryStorageManagerImpl->[stopSecStorageVm->[stop],isPoolReadyForScan->[isZoneReady],rebootSecStorageVm->[startSecStorageVm],createSecStorageVmInstance->[getDefaultNetworkForCreation],allocCapacity->[assignSecStorageVmFromStoppedPool,startNew,isSecondaryStorageVmRequired,startSecStorageVm],assignSecStorageVmFromRunningPool->[getCurrentAllocator],stop->[stop],startNew->[isSecondaryStorageVmRequired],pickSsvmHost->[listUpAndConnectingSecondaryStorageVmHost],onScanStart->[getZoneHostInfo],expandPool->[allocCapacity]]]
Allocate a secondary storage vm standby capacity for a given data center. try to acquire synchronization lock and if it fails start it.
@ravening would that cause a regression, to not expunge uploaded volumes?
@@ -44,7 +44,7 @@ class CheckoutQueryset(models.QuerySet): class Checkout(ModelWithMetadata): """A shopping checkout.""" - + id = "" created = models.DateTimeField(auto_now_add=True) last_change = models.DateTimeField(auto_now_add=True) user = models.ForeignKey(
[Checkout->[is_shipping_required->[is_shipping_required]],CheckoutLine->[is_shipping_required->[is_shipping_required]]]
A base class for all of the n + 1 models. Adds fields to checkouts.
IMO it shouldn't be here
@@ -1317,6 +1317,11 @@ vdev_metaslab_group_create(vdev_t *vd) vd->vdev_mg = metaslab_group_create(mc, vd, spa->spa_alloc_count); + if (!vd->vdev_islog) { + vd->vdev_log_mg = metaslab_group_create( + spa_embedded_log_class(spa), vd, 1); + } + /* * The spa ashift min/max only apply for the normal metaslab * class. Class destination is late binding so ashift boundry
[No CFG could be retrieved]
region vdev_metaslab_group_create Method region vdev_metaslab_init - This function is called by the vdev.
I may have misread the flow here (e.g. we call `vdev_metaslab_group_create` before `vdev_metaslab_init`), but wouldn't the above set `vdev_log_mg` for normal vdevs, regardless of our `zfs_embedded_slog_min_ms` tunable? This seems to be the case at least for `zdb.c`.
@@ -127,6 +127,13 @@ module Engine end def illegal_other_buy?(train, entity) + if @game.concession_pending?(entity) && + (@game.train_is_switcher?(train) || + @game.diesel?(train) || + (train.distance < 2 && entity == @game.nwe)) + return true + end + # can't ever buy machines across @game.train_is_machine?(train) || # only RRs can have a diesel - but only one
[BuyTrain->[allocate_machines!->[minor_distance],buyable_trains->[buyable_depot_trains],process_buy_train->[illegal_depot_buy?,illegal_other_buy?],illegal_depot_buy?->[minor_distance,public_mine_min_distance]]]
Checks if an entity can be able to buy a specific unknown node in the training set.
can we de-duplicate this code?
@@ -7,14 +7,12 @@ package org.mule.runtime.module.launcher.log4j2; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.*; import static org.mule.runtime.core.api.util.ClassUtils.withContextClassLoader; + import org.mule.runtime.module.artifact.api.classloader.RegionClassLoader; import org.mule.tck.junit4.AbstractMuleTestCase; import org.mule.tck.size.SmallTest; - import org.apache.logging.log4j.Level; import org.apache.logging.log4j.core.Logger; import org.apache.logging.log4j.core.LoggerContext;
[DispatchingLoggerTestCase->[currentClassLoader->[info],regionClassLoader->[info,getContext,mock,withContextClassLoader],before->[getContextClassLoader,DispatchingLogger,hashCode,thenReturn],anotherClassLoader->[info,withContextClassLoader],getName]]
Package that imports the given CPAL object into the Java code path. Provides a mocked version of the which is used to create a message.
import all the required classes in an explicit way.
@@ -166,8 +166,8 @@ class AssignmentsController < ApplicationController grouping = current_user.accepted_grouping_for(a) if grouping.has_submission? submission = grouping.current_submission_used - if submission.has_remark? && submission.get_remark_result.released_to_students - @a_id_results[a.id] = submission.get_remark_result + if submission.has_remark? && submission.remark_result.released_to_students + @a_id_results[a.id] = submission.remark_result elsif submission.has_result? && submission.get_original_result.released_to_students @a_id_results[a.id] = submission.get_original_result end
[AssignmentsController->[process_assignment_form->[new],create->[new],new->[new],update_assignment!->[new],decline_invitation->[decline_invitation]]]
This action shows all assignments and grades for the current user.
Line is too long. [86/80]
@@ -8,6 +8,9 @@ We assume that the non-stationary EOG artifacts have already been removed. The sources matching the ECG are automatically found and displayed. Subsequently, artefact detection and rejection quality are assessed. Finally, the impact on the evoked ERF is visualized. + +Note that this example does quite a bit of processing, so even on a very +fast machine it can take over a minute to complete. """ # Authors: Denis Engemann <denis.engemann@gmail.com>
[plot_overlay,create_ecg_epochs,filter,dict,epochs,plot_sources,crop,abs,print,find_events,data_path,plot_components,average,Raw,ICA,find_bads_ecg,Epochs,plot_scores,pick_types]
Compute ICA components on a series of n_components epochs using the FastICA algorithm fit the data from the raw data and plot the ecg scores.
really? this one?
@@ -411,7 +411,7 @@ public class GroupByQuery extends BaseQuery<Row> dimsInOrderBy.add(dimIndex); needsReverseList.add(needsReverse); final ValueType type = dimensions.get(dimIndex).getOutputType(); - isNumericField.add(type == ValueType.LONG || type == ValueType.FLOAT); + isNumericField.add(type == ValueType.LONG || type == ValueType.FLOAT || type == ValueType.DOUBLE); comparators.add(orderSpec.getDimensionComparator()); } }
[GroupByQuery->[Builder->[addOrderByColumn->[addOrderByColumn],copy->[Builder],addDimension->[addDimension],build->[GroupByQuery],getHavingSpec,getAggregatorSpecs,getVirtualColumns,getPostAggregatorSpecs,getDimensions,getDimFilter,getGranularity,getLimitSpec],compareDims->[compare],compareDimsForLimitPushDown->[compare],equals->[equals],hashCode->[hashCode],determineApplyLimitPushDown->[validateAndGetForceLimitPushDown,getDimensions],getRowOrderingForPushDown->[compare->[compare],getContextSortByDimsFirst],getTimeComparator->[compare],getRowOrdering->[getRowOrderingForPushDown,compare,getContextSortByDimsFirst],postProcess->[apply],makePostProcessingFn]]
Gets the row ordering for push down. Compares two rows.
Could add `ValueType.isNumeric()`?
@@ -3,12 +3,14 @@ package com.baeldung; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.TestPropertySource; import org.springframework.test.context.junit4.SpringRunner; import com.baeldung.boot.Application; @RunWith(SpringRunner.class) @SpringBootTest(classes = Application.class) +@TestPropertySource(properties = {"spring.jpa.show-sql=false "}) public class SpringContextTest { @Test
[No CFG could be retrieved]
When the Spring context is bootstrapped then no exceptions are thrown.
Maybe better to set this property in the application(-test).properties?
@@ -419,6 +419,9 @@ def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None, raise ValueError("predictions must not be None.") with ops.name_scope(scope, "huber_loss", (predictions, labels, weights)) as scope: + if delta == float('inf'): + return mean_squared_error(labels, predictions, weights, scope, + loss_collection, reduction) predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape())
[mean_pairwise_squared_error->[_num_present],compute_weighted_loss->[validate,_num_elements,_safe_mean,_num_present],huber_loss->[compute_weighted_loss],absolute_difference->[compute_weighted_loss],cosine_distance->[compute_weighted_loss],Reduction->[validate->[all]],sigmoid_cross_entropy->[compute_weighted_loss],softmax_cross_entropy->[compute_weighted_loss],sparse_softmax_cross_entropy->[compute_weighted_loss,_remove_squeezable_dimensions],log_loss->[compute_weighted_loss],hinge_loss->[compute_weighted_loss],mean_squared_error->[compute_weighted_loss]]
Adds a Huber Loss term to the training procedure. Compute weighted loss of .
This check only works if the delta is a python scalar infinity, not if it's a TF inf float.
@@ -80,6 +80,9 @@ namespace Dynamo.Scheduler Asynchronous } + /// <summary> + /// DynamoScheduler is run in the second thread, with Scheduler Dynamo runs tasks asynchronously. + /// </summary> public partial class DynamoScheduler : IScheduler { #region Class Events, Properties
[DynamoScheduler->[Shutdown->[Shutdown,Set,Clear],ProcessNextTask->[Reset,ReprioritizeTasksInQueue,ProcessTaskInternal,Count,NotifyTaskStateChanged,TaskAvailable,WaitAny,Discarded,CompactTaskQueue,RemoveAt],ScheduleForExecution->[Scheduled,ProcessTaskInternal,MarkTaskAsScheduled,NotifyTaskStateChanged,Set,Add,Synchronous],schedulerThread,ProcessMode,Next,Initialize]]
region Public API Methods Check if there are any items in the scheduler that are ready for access.
This class represents Dynamo scheduler. All the tasks are scheduled on the scheduler. Also, these tasks runs async.
@@ -20,4 +20,7 @@ class User < ApplicationRecord scope :by_source_site, ->(source_site) { where(source_site: source_site) } enum gender: { male: 0, female: 1 } + enum notification_frequency: { + disabled: 0, immediate: 1, hourly: 2, daily: 3, weekly: 4 + }, _suffix: :notifications end
[User->[include,belongs_to,enum,scope,validates,where,has_many,order]]
Select by source site.
What is this suffix for?
@@ -401,6 +401,16 @@ void WbStreamingServer::processTextMessage(QString message) { sendWorldStateToClient(client, state); } sendToClients("reset finished"); + } else if (message == "revert") + WbApplication::instance()->worldReload(); + else if (message.startsWith("load:")) { + WbWorld *world = WbWorld::instance(); + const QString worldName = message.mid(5); + const QString fullPath = QFileInfo(world->fileName()).dir().absolutePath() + '/' + worldName; + if (!QFile::exists(fullPath)) + WbLog::error(tr("Streaming server: world %1 doesn't exist.").arg(fullPath)); + else if (gMainWindow) + gMainWindow->loadDifferentWorld(fullPath); } else if (message.startsWith("get controller:")) { const QString controller = message.mid(15); if (!isControllerEditAllowed(controller))
[No CFG could be retrieved]
handles the case where the user has requested a controller and a main controller file to be sent All possible file extensions in the filter names are removed.
We should probably check that worldName contains a simple file name without any path component, e.g., to avoid things like `../../my_secret_project/worlds/my_secret_simulation.wbt`.
@@ -109,7 +109,10 @@ class TypeMeetVisitor(TypeVisitor[Type]): if isinstance(self.s, Void) or isinstance(self.s, ErrorType): return ErrorType() elif isinstance(self.s, NoneTyp): - return self.s + if experiments.STRICT_OPTIONAL: + return AnyType() + else: + return self.s else: return AnyType()
[is_overlapping_types->[is_overlapping_types],meet_simple->[meet_types],TypeMeetVisitor->[meet->[meet_types],visit_union_type->[meet_types]]]
Visit an UnboundType.
I suppose this should also have `elif isinstance(self.s, UninhabitedType): return self.s`, though I don't know specifically what this is for.