patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -592,7 +592,7 @@ aggregate_multi(struct io_test_args *arg, struct agg_tst_dataset *ds_sample) if (ds_sample->td_discard) rc = vos_discard(arg->ctx.tc_co_hdl, epr_a); else - rc = vos_aggregate(arg->ctx.tc_co_hdl, epr_a); + rc = vos_aggregate(arg->ctx.tc_co_hdl, epr_a, NULL); assert_int_equal(rc, 0); multi_view(arg, oids, dkeys, akeys, AT_OBJ_KEY_NR, ds_arr, true);
[run_discard_tests->[cmocka_run_group_tests_name],run_aggregate_tests->[cmocka_run_group_tests_name],int->[rand,D_ALLOC,D_FREE,d_iov_set,vos_hdl2cont,vos_iterate,vos_obj_cache_current,strlen,assert_true,test_args_reset,assert_non_null,update_value,vos_obj_release,vos_obj_hold,assert_int_equal],daos_size_t->[assert_true],void->[generate_akeys,daos_unit_oid_is_null,d_iov_set,daos_fail_loc_set,vos_pool_query,io_test_obj_update,assert_non_null,rand,dts_buf_render,vos_aggregate,io_test_obj_fetch,phy_recs_nr,aggregate_multi,VERBOSE_MSG,min,strlen,dts_key_gen,do_punch,agg_punches_test_helper,assert_int_equal,generate_view,vos_obj_punch,D_ALLOC,D_FREE,daos_sgl_init,generate_or_verify,fill_cont,sleep,daos_fail_value_set,generate_recx,get_ds_index,D_ALLOC_ARRAY,dts_unit_oid_gen,print_error,assert_memory_equal,fetch_value,verify_view,print_space_info,daos_sgl_fini,DP_UOID,memset,aggregate_basic,get_view_len,assert_true,vos_discard,lookup_object,multi_view,agg_punches_test,update_value]]
- - - - - - - - - - - - - - - - - - Randomly generate a random object from the array of objects. D - Link for D - Link - D - Link - D - Link - D - AT_OBJ_KEY_NR - Frees the array.
Why NULL is passed here?
@@ -57,6 +57,12 @@ func TestAPISearchRepo(t *testing.T) { user4 := models.AssertExistsAndLoadBean(t, &models.User{ID: 20}).(*models.User) orgUser := models.AssertExistsAndLoadBean(t, &models.User{ID: 17}).(*models.User) + oldAPIDefaultNum := setting.API.DefaultPagingNum + defer func() { + setting.API.DefaultPagingNum = oldAPIDefaultNum + }() + setting.API.DefaultPagingNum = 10 + // Map of expected results, where key is user for login type expectedResults map[*models.User]struct { count int
[RemoveAll,False,TempDir,AssertExistsAndLoadBean,GetCount,Len,EqualValues,Equal,Contains,GitPath,NoError,Cond,GetUserByName,DeleteRepository,HasAccess,Sprintf,NotEmpty,MakeRequest,Run,True]
TestAPISearchRepo tests that the API search results contain the given keyword. returns a list of the possible objects for the .
instead of setting it via variable, why not just add the query arguments?
@@ -534,13 +534,13 @@ class ChatThreadClient(object): @distributed_trace_async async def remove_participant( self, - user: CommunicationUserIdentifier, + user: CommunicationIdentifier, **kwargs ) -> None: """Remove a participant from a thread. :param user: Required. User identity of the thread participant to remove from the thread. - :type user: ~azure.communication.chat.CommunicationUserIdentifier + :type user: ~azure.communication.chat.CommunicationIdentifier :return: None :rtype: None :raises: ~azure.core.exceptions.HttpResponseError, ValueError
[ChatThreadClient->[__aexit__->[__aexit__],close->[close],send_typing_notification->[send_typing_notification],__aenter__->[__aenter__]]]
Remove a participant from a thread.
This argument also needs to be renamed to `identifier` @juancamilor @sarkar-rajarshi
@@ -40,7 +40,7 @@ import java.util.Queue; * is recommended to use {@link Unpooled#wrappedBuffer(ByteBuf...)} * instead of calling the constructor explicitly. */ -final class DefaultCompositeByteBuf extends AbstractByteBuf implements CompositeByteBuf, Unsafe { +public class DefaultCompositeByteBuf extends AbstractByteBuf implements CompositeByteBuf, Unsafe { private static final ByteBuffer[] EMPTY_NIOBUFFERS = new ByteBuffer[0];
[DefaultCompositeByteBuf->[toByteIndex->[checkComponentIndex],getLong->[getLong,getInt,order],resetWriterIndex->[resetWriterIndex],getBytes->[toComponentIndex,capacity,getBytes],removeComponents->[checkComponentIndex,updateComponentOffsets],setInt->[setShort,setInt,order],removeComponent->[checkComponentIndex,updateComponentOffsets],setIndex->[setIndex],resetReaderIndex->[resetReaderIndex],isDirect->[isDirect],toNioBuffer->[nioBuffer,hasNioBuffer],writerIndex->[writerIndex],writeByte->[writeByte],writeInt->[writeInt],writeZero->[writeZero],component->[checkComponentIndex],getInt->[getInt,order,getShort],hasArray->[hasArray],getByte->[getByte],copyTo->[capacity,getBytes],nioBuffer->[nioBuffer],writeBoolean->[writeBoolean],writeLong->[writeLong],hasNioBuffer->[hasNioBuffer],copy->[toComponentIndex,capacity],discardReadBytes->[toComponentIndex,capacity,updateComponentOffsets],internalNioBuffer->[internalNioBuffer],writeShort->[writeShort],free->[freeIfNecessary],capacity->[addComponent0,capacity,consolidateIfNeeded],ensureWritableBytes->[ensureWritableBytes],readBytes->[readBytes],addComponents->[addComponents0],getUnsignedMedium->[getByte,getShort,order,getUnsignedMedium],consolidate->[checkComponentIndex,numComponents,updateComponentOffsets],markReaderIndex->[markReaderIndex],discardReadComponents->[toComponentIndex,capacity,updateComponentOffsets],clear->[clear],setZero->[setZero],discardSomeReadBytes->[discardReadComponents],setShort->[setShort,setByte,order],setMedium->[setShort,setMedium,order,setByte],setBoolean->[setBoolean],writeChar->[writeChar],nioBuffers->[toComponentIndex,capacity,nioBuffers,readerIndex],skipBytes->[skipBytes],setFloat->[setFloat],readerIndex->[readerIndex],setByte->[setByte],copiedNioBuffer->[order],arrayOffset->[arrayOffset],markWriterIndex->[markWriterIndex],internalNioBuffers->[internalNioBuffer],setDouble->[setDouble],writeFloat->[writeFloat],writeBytes->[writeBytes],array->[array],findComponent->[capacity],setChar->[setChar],setBytes->[toComponentIndex,capacity,setBytes],writeMedium->[writeMedium],addComponents0->[addComponent0,addComponents0],toComponentIndex->[capacity],resumeIntermediaryDeallocations->[free],setLong->[order,setInt,setLong],getShort->[getByte,order,getShort],writeDouble->[writeDouble],toString->[toString],iterator->[iterator]]]
Creates a new instance of a CompositeByteBuf which shows multiple buffers as a single merged buffer region IVectorizer Implementation.
Why not keep the final here ?
@@ -2097,10 +2097,15 @@ func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSett } } - return describeServiceAccount(serviceAccount, tokens, missingSecrets) + var events *api.EventList + if describerSettings.ShowEvents { + events, _ = d.Core().Events(namespace).Search(api.Scheme, serviceAccount) + } + + return describeServiceAccount(serviceAccount, tokens, missingSecrets, events) } -func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret, missingSecrets sets.String) (string, error) { +func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret, missingSecrets sets.String, events *api.EventList) (string, error) { return tabbedString(func(out io.Writer) error { w := NewPrefixWriter(out) w.Write(LEVEL_0, "Name:\t%s\n", serviceAccount.Name)
[describeIngress->[describeBackend,Write],Describe->[Write],DescribeObject->[Describe],WriteLine,Write]
Describe returns a description of a service account.
@openshift/cli-review do the proposed changes to this file look reasonable?
@@ -38,6 +38,7 @@ func resourceAwsDefaultSubnet() *schema.Resource { // map_public_ip_on_launch is a computed value for Default Subnets dsubnet.Schema["map_public_ip_on_launch"] = &schema.Schema{ Type: schema.TypeBool, + Optional: true, Computed: true, } // assign_ipv6_address_on_creation is a computed value for Default Subnets
[Printf,StringValue,StringSlice,String,Errorf,DescribeSubnets,SetId,Get]
This function is exported for use by the resource code generation. Filters returns a list of Filters that can be applied to a resource s .
For backwards compatibility I don't set the` Default: false` flag here as is done for the equivalent attribute in the `aws_subnet` resource.
@@ -351,7 +351,8 @@ def diagnose_configurator_problem(cfg_type, requested, plugins): msg = "The requested {0} plugin does not appear to be installed".format(requested) else: msg = ("The {0} plugin is not working; there may be problems with " - "your existing configuration").format(requested) + "your existing configuration.\nThe error was: {1}" + .format(requested, `plugins[requested].problem`)) elif cfg_type == "installer": if os.path.exists("/etc/debian_version"): # Debian... installers are at least possible
[_plugins_parsing->[add,add_group,add_plugin_args],_auth_from_domains->[_report_new_cert,_treat_as_renewal],revoke->[revoke,_determine_account],auth->[_auth_from_domains,_find_domains,_report_new_cert,choose_configurator_plugins,_init_le_client],_create_subparsers->[add_subparser,add,add_group,flag_default],setup_logging->[setup_log_file_handler],_treat_as_renewal->[_find_duplicative_certs],SilentParser->[add_argument->[add_argument]],HelpfulArgumentParser->[add_plugin_args->[add_group],add->[add_argument],__init__->[SilentParser,flag_default]],install->[_init_le_client,_find_domains,choose_configurator_plugins],run->[_init_le_client,_auth_from_domains,_find_domains,choose_configurator_plugins],create_parser->[HelpfulArgumentParser,flag_default,add,config_help,add_group],main->[setup_logging,create_parser],choose_configurator_plugins->[set_configurator,diagnose_configurator_problem],_paths_parser->[config_help,add,add_group,flag_default],_init_le_client->[_determine_account],rollback->[rollback],main]
Diagnose a problem with a configuration that is not available in plugins.
Please do not use backticks! `{1!r}` should do it.
@@ -2,4 +2,9 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore +from ._access_control_client import KeyVaultAccessControlClient +from ._internal.client_base import ApiVersion +from ._models import KeyVaultPermission, RoleAssignment, RoleDefinition + + +__all__ = ["ApiVersion", "KeyVaultAccessControlClient", "KeyVaultPermission", "RoleAssignment", "RoleDefinition"]
[__import__]
Creates an instance of .
Regrading `ApiVersion`, is this the standard in Python? In JavaScript we are standardizing on `serviceVersion`.
@@ -648,10 +648,7 @@ class RealWinFixture { } else { // The anonymous class parameter allows us to detect native classes // vs transpiled classes. - installCustomElements( - win, - NATIVE_CUSTOM_ELEMENTS_V1 ? class {} : undefined - ); + installCustomElements(win, class {}); } // Intercept event listeners
[SandboxFixture->[defineProperty_->[constructor]],constructor]
Setup the test iframe Returns a sequence of bytes that can be used to create a new object.
We always transpile our testing code right? Did we try running our test suite against the native CE code path?
@@ -88,5 +88,14 @@ public interface Cache result = 31 * result + Arrays.hashCode(key); return result; } + + @Override + public String toString() + { + return "NamedKey{" + + "namespace='" + namespace + '\'' + + ", key=" + new String(key) + + '}'; + } } }
[NamedKey->[hashCode->[hashCode],equals->[equals]]]
Returns hashCode of .
you don't want to convert the key to string, they can contain arbitrary binary data. We would end up with gibberish in our logs. If you really need a toString() method, maybe just indicate the size of the byte array, but don't print out the data.
@@ -270,12 +270,6 @@ func newCmd() *cobra.Command { Args: cobra.ExactArgs(2), } - sqitchDeployCmd.Flags().StringVarP( - &opts.Sqitch.User, - "user", "i", "automate", - "User to connect with when running sqitch", - ) - fixPermsCmd := &cobra.Command{ Use: "fix-permissions DBNAME [ROLENAME]", Short: "Change owner of all tables in the public and sqitch schemas to the given role name",
[GetService,WithTLSKeyPath,Warn,Message,ExactArgs,GetKeyPath,Duration,SetConfigFile,Setenv,GetIp,Wrap,Info,RenameDB,Exit,ConfigFromEnvironment,AlterRole,WithField,GetTls,Error,MinimumNArgs,WithTLSCertPath,New,StringVarP,GetPort,CreateDB,NewClient,WithTLSRootCAPath,WithPort,Details,AddCommand,PGServiceUser,WithHost,GetCertPath,DeploySqitch,Flags,Execute,ExitCode,SetPublicSchemaRole,PersistentFlags,GetPgSidecar,Convert,DropTables,Println,WithTimeout,RangeArgs,ReadInConfig,WithError,Unmarshal,GetCfg,IntVarP,Background,MigrateTables,CreateExtension,Fatal,SetLevel,BoolVarP,GetRootCaPath,OnInitialize,BoolVar]
skip - db - creation - n - fail - if - src - missing - fail - Alter - roleHELP is a command to alter a role.
As far as I can tell, this feature would not have actually worked but we didn't notice it because we always call this with the default argument. To avoid confusion, I've removed this option and made it an error on the API side to pass anything but the default user "automate".
@@ -40,11 +40,6 @@ def groupuser_post_save(sender, instance, **kw): return amo.log(amo.LOG.GROUP_USER_ADDED, instance.group, instance.user) - - if (instance.user.user and - instance.user.groups.filter(rules='*:*').exists()): - instance.user.user.is_superuser = instance.user.user.is_staff = True - instance.user.user.save() log.info('Added %s to %s' % (instance.user, instance.group))
[GroupUser->[ForeignKey],Group->[CharField,TextField,ManyToManyField],groupuser_post_save->[get,log,filter,info,save],groupuser_post_delete->[get,log,filter,info,save],getLogger,receiver]
Add group user to group group.
this method is not doing anything anymore, is it?
@@ -1,11 +1,11 @@ # -*- encoding : utf-8 -*- # This file should contain all the record creation needed to seed the database with its default values. -# The data can then be loaded with the rake db:seed (or created alongside the db with db:setup). +# The data can then be loaded with the rails db:seed command (or created alongside the database with db:setup). # # Examples: # -# cities = City.create([{ :name => 'Chicago' }, { :name => 'Copenhagen' }]) -# Mayor.create(:name => 'Daley', :city => cities.first) +# movies = Movie.create([{ name: 'Star Wars' }, { name: 'Lord of the Rings' }]) +# Character.create(name: 'Luke', movie: movies.first) include AlaveteliFeatures::Helpers if Role.where(:name => 'admin').empty?
[create,include,empty?,feature_enabled?,each]
This file should contain all the records needed to seed the database with default values.
Line is too long. [81/80]
@@ -2459,10 +2459,8 @@ public class Queue extends ResourceController implements Saveable { return CauseOfBlockage.fromMessage(Messages._Queue_BlockedBy(r.getDisplayName())); } - for (QueueTaskDispatcher d : QueueTaskDispatcher.all()) { - CauseOfBlockage cause = d.canRun(this); - if (cause != null) - return cause; + if (causeOfBlockage != null) { + return causeOfBlockage; } return task.getCauseOfBlockage();
[Queue->[contains->[getItem],getApproximateItemsQuickly->[getItems],BuildableItem->[leave->[all],enter->[all,add],isStuck->[getEstimatedDuration,getAssignedLabel],getCauseOfBlockage->[isEmpty,getAssignedLabel,isBlockedByShutdown]],isBuildBlocked->[isBuildBlocked],getItems->[add],withLock->[call],BuildableRunnable->[run->[enter]],isEmpty->[isEmpty],LockedJUCCallable->[call->[withLock]],Item->[getCausesDescription->[getCauses,toString],getAssignedLabelFor->[getAssignedLabel],getWhy->[getCauseOfBlockage],doCancelQueue->[cancel],getCauses->[getCauses],getAssignedLabel->[getAssignedLabel],getParams->[toString],hasCancelPermission->[hasAbortPermission],toString->[getName],cancel->[enter,leave],authenticate->[getDefaultAuthentication,authenticate]],_getBuildableItems->[getNode,canTake,add],countBuildableItems->[countBuildableItemsFor],getUnblockedTasks->[getUnblockedItems,add],onStartExecuting->[updateSnapshot],LockedRunnable->[run->[withLock]],getEstimatedDuration->[getEstimatedDuration],getDefaultAuthentication->[getDefaultAuthentication],load->[set],schedule2->[schedule2],BlockedItem->[enter->[all,add],leave->[all],getCauseOfBlockage->[all,getCauseOfBlockage,getDisplayName]],WaitingItem->[enter->[all,add],compareTo->[getId,compareTo],leave->[all]],liveGetItems->[isEmpty,add],init->[load],makeFlyWeightTaskBuildable->[contains,canTake],save->[State],doCancelItem->[cancel],cancel->[cancel,updateSnapshot],JobOffer->[getNode->[getNode],getCauseOfBlockage->[canTake]],_withLock->[call],makePending->[add],schedule->[schedule],maintain->[isBuildBlocked,allowNewBuildableTask,peek,getCauseOfBlockage,add,clear,JobOffer,isEmpty,updateSnapshot],ItemList->[cancelAll->[cancel,clear],containsKey->[get],getAll->[add],remove->[remove],put->[add],cancel->[cancel,get]],StubTask->[getName],LockedHRCallable->[call->[withLock],checkRoles->[checkRoles]],scheduleInternal->[add],add->[schedule],checkPermissionsAndAddToList->[add],filterDiscoverableItemListBasedOnPermissions->[add],MaintainTask->[doRun->[cancel,maintain]],LeftItem->[enter->[all,getId],getExecutable->[getExecutable]],getName]]
get cause of blocked task.
Fine for me. Probably it could be checked even before calling `getBlockingActivity()` to speedup it even more. Though it needs more feedback from @jenkinsci/code-reviewers
@@ -126,6 +126,11 @@ func (c *MasterConfig) RunScheduler() { glog.Infof("Started Kubernetes Scheduler") } +func (c *MasterConfig) RunResourceQuotaManager() { + resourceQuotaManager := resourcequota.NewResourceQuotaManager(c.KubeClient) + resourceQuotaManager.Run(10 * time.Second) +} + func (c *MasterConfig) RunMinionController() { nodeResources := &kapi.NodeResources{ Capacity: kapi.ResourceList{
[RunMinionController->[Infof,NewKubeletClient,NewQuantity,NewNodeController,Fatalf,NewMilliQuantity,Run],RunReplicationController->[Infof,Run,NewReplicationManager],InstallAPI->[Sprintf,New,NewKubeletClient,Fatalf],EnsurePortalFlags->[Fatal],RunEndpointController->[Forever,NewEndpointController,Infof,SyncServiceEndpoints],RunScheduler->[Infof,NewConfigFactory,New,Fatalf,CreateFromProvider,Run]]
RunScheduler starts the scheduler.
If this is slower, is that an issue?
@@ -146,14 +146,10 @@ func (c *ImageStreamController) getByKey(key string) (*imageapi.ImageStream, err } stream, err := c.lister.ImageStreams(namespace).Get(name) if apierrs.IsNotFound(err) { + // TODO: this is not normal and should be refactored return nil, nil } - if err != nil { - glog.Infof("Unable to retrieve image stream %q from store: %v", key, err) - return nil, err - } - - return stream, nil + return stream, err } // tagImportable is true if the given TagReference is importable by this controller
[addImageStream->[enqueueImageStream],worker->[processNextWorkItem],getByKey->[ImageStreams,Infof,SplitMetaNamespaceKey,IsNotFound,Get],enqueueImageStream->[HandleError,Errorf,Add,KeyFunc],updateImageStream->[enqueueImageStream],Run->[Until,Infof,HandleCrash,Errorf,WaitForCacheSync,ShutDown,HandleError],processNextWorkItem->[AddRateLimited,Infof,getByKey,V,Forget,Errorf,Get,HandleError,Done],ImageStreams,Infof,LatestObservedTagGeneration,New,V,Import,IsStatusErrorKind,IsNotFound,Importing]
getByKey returns an ImageStream from the store for the given key.
Well here's your problem. Issue to do a more thorough refactor please.
@@ -13,10 +13,12 @@ #endif #if (defined(BYTE_ORDER)&&(BYTE_ORDER == BIG_ENDIAN)) || \ + (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) || \ (defined(_BIG_ENDIAN)&&defined(__SVR4)&&defined(__sun)) #define _le32toh(x) __builtin_bswap32(x) #define _htole32(x) __builtin_bswap32(x) #elif (defined(BYTE_ORDER)&&(BYTE_ORDER == LITTLE_ENDIAN)) || \ + (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \ (defined(_LITTLE_ENDIAN)&&defined(__SVR4)&&defined(__sun)) #define _le32toh(x) (x) #define _htole32(x) (x)
[grow_size->[size_idx],fit_size->[size_idx],HashIndex->[fit_size,get_lower_limit,get_upper_limit],int->[grow_size,shrink_size],shrink_size->[size_idx]]
Create a new object of type HashHeader with the given name. This function is used to generate a hash function based on the hash function of the given values.
keep the tight style (no blanks around &&) and do the style change globally later?
@@ -2636,7 +2636,7 @@ evt_common_insert(struct evt_context *tcx, struct evt_node *nd, ne = evt_node_entry_at(tcx, nd, i); evt_rect_write(&df_tmp, &ent->ei_rect); - rc = cb(tcx, mbr, &ne->ne_rect, &df_tmp); + rc = cb(tcx, nd, &ne->ne_rect, &df_tmp); if (rc < 0) continue;
[No CFG could be retrieved]
Creates a new event tree inplace of the root. Debug output for a single node of a tree.
(style) 'nd' may be misspelled - perhaps 'and'?
@@ -54,9 +54,14 @@ public class UnaryClientStream extends AbstractClientStream implements Stream { public void doOnComplete(OperationHandler handler) { execute(() -> { try { - final Object resp = deserializeResponse(getData()); + AppResponse result; + if (!"void".equals(getMethodDescriptor().getReturnClass().getName())) { + final Object resp = deserializeResponse(getData()); + result = new AppResponse(resp); + } else { + result = new AppResponse(); + } Response response = new Response(getRequest().getId(), TripleConstant.TRI_VERSION); - final AppResponse result = new AppResponse(resp); result.setObjectAttachments(parseMetadataToAttachmentMap(getTrailers())); response.setResult(result); DefaultFuture2.received(getConnection(), response);
[UnaryClientStream->[UnaryClientTransportObserver->[doOnComplete->[parseMetadataToAttachmentMap,deserializeResponse,onError,AppResponse,getData,withDescription,setResult,getTrailers,getConnection,getId,received,Response,setObjectAttachments,execute],getThrowable->[TripleRpcException,getMessage,decodeASCIIByte,getStackEntriesList,warn,getMultipleSerialization,getSerializeType,getDetailsList,getUrl,switchContextLoader,unWrapException,getStackFrameString,unpack,format,getContextClassLoader,getCode,get,contains,getHeader,tranFromStatusDetails],onError->[getThrowable,parseMetadataToAttachmentMap,AppResponse,setResult,getTrailers,hasException,setException,getConnection,getId,toDubboStatus,received,setErrorMessage,setStatus,Response,setObjectAttachments]],createTransportObserver->[UnaryClientTransportObserver],createStreamObserver->[ClientStreamObserver]]]
Handle the response of the request.
i think string "void" is not good consider change to `Void.TYPE`
@@ -430,12 +430,10 @@ static void put_packet(unsigned char *buffer) } while (get_debug_char() != '+'); } - static void gdb_log_exception(char *message) { while (*message) put_exception_char(*message++); - } /* Convert the memory pointed to by mem into hex, placing result in buf.
[gdb_handle_exception->[gdb_parser,gdb_log_exception],gdb_init->[init_buffers],int->[get_hex],get_packet->[get_debug_char,get_hex,put_debug_char],inline->[arch_gdb_single_step,hex_to_int,get_packet,hex_to_mem,gdb_log_exception,gdb_debug_info,strcpy,arch_gdb_read_sr,mem_to_hex,arch_gdb_write_sr,put_packet],char->[dcache_writeback_region,arch_gdb_load_from_memory,get_hex,arch_gdb_memory_load_and_store],void->[put_debug_char,put_exception_char,get_debug_char]]
This function is used to put a packet into the buffer.
@IulianOlaru249 I think this is fine for now. Anyhow notice that beside adding spaces around the operators you cleanup some newlines. Ideally this should be a separate patch in the same Pull Request. Please keep that in mind for the next patches. One patch should handle just one problem at a time.
@@ -111,7 +111,11 @@ class CriteriaController < ApplicationController def upload_yml assignment = Assignment.find(params[:assignment_id]) - + if assignment.released_marks.any? + flash_message(:error, t('criteria.errors.messages.released_marks')) + redirect_to action: 'index', id: assignment.id + return + end # Check for errors in the request or in the file uploaded. unless request.post? redirect_to action: 'index', id: assignment.id
[CriteriaController->[destroy->[destroy],update_positions->[update],create->[new],update->[create,update]]]
Upload a single neccesary node in the DB and create a new node in the.
Style/StringLiterals: Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
@@ -147,6 +147,15 @@ func searchImageInRegistry(term string, registry string, options SearchOptions) // every types.SystemContext, and to compute the value just once in one // place. sc.SystemRegistriesConfPath = sysreg.SystemRegistriesConfPath() + if options.ListTags { + results, err := searchRepositoryTags(registry, term, sc, options) + if err != nil { + logrus.Errorf("error listing registry tags %q: %v", registry, err) + return []SearchResult{} + } + return results + } + results, err := docker.SearchRegistry(context.TODO(), sc, registry, term, limit) if err != nil { logrus.Errorf("error searching registry %q: %v", registry, err)
[matchesOfficialFilter,Acquire,Add,Done,Atoi,SearchRegistry,Errorf,Wait,SplitN,Release,Wrapf,Join,Contains,GetRegistries,SystemRegistriesConfPath,Split,matchesStarFilter,Background,NewWeighted,Replace,TODO,matchesAutomatedFilter]
getRegistries returns a list of registries to search based on a term and a list limit - Number of results to return.
Can we break this out into a new function?
@@ -43,8 +43,8 @@ const defaultCrossBuildTarget = "golangCrossBuild" // See NewPlatformList for details about platform filtering expressions. var Platforms = BuildPlatforms.Defaults() -// Types is the list of package types -var SelectedPackageTypes []PackageType +// SelectedPackageTypes is the list of package types +var SelectedPackageTypes []PackageType = []PackageType{TarGz} func init() { // Allow overriding via PLATFORMS.
[Build->[Printf,Getuid,Itoa,Join,ImageSelector,Sprintf,ToSlash,Wrap,Rel,Getgid,RunCmd,Verbose],Now,GOOS,Merge,Wrap,HasPrefix,UnmarshalText,RunWith,Atoi,Walk,Filter,UID,Download,New,CanCrossBuild,Errorf,Sub,Wrapf,Join,GID,Deps,Chown,Split,Printf,Build,Println,Defaults,Getenv]
Initialization function for the crossbuild package. ImageSelectorFunc returns a function that can be used to filter the image name by the given.
windows does not have targz. careful and test on windows vm for sure
@@ -118,6 +118,12 @@ class PotentialFlowSolver(FluidSolver): def AddDofs(self): KratosMultiphysics.VariableUtils().AddDof(KCPFApp.VELOCITY_POTENTIAL, self.main_model_part) KratosMultiphysics.VariableUtils().AddDof(KCPFApp.AUXILIARY_VELOCITY_POTENTIAL, self.main_model_part) + KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.PRESSURE, self.main_model_part) + KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_X, self.main_model_part) + KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_Y, self.main_model_part) + KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.VELOCITY_Z, self.main_model_part) + + KratosMultiphysics.Logger.PrintInfo("PotentialFlowSolver", "Fluid solver DOFs added correctly.") def Initialize(self): time_scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticScheme()
[PotentialFlowSolver->[__init__->[PotentialFlowFormulation]]]
Adds DOFs to the model.
Do you need to define this Variables in the potential_flow_solver.py? In which cases are you using them? Maybe you can define them in your ale_potential_flow_solver.py instead?
@@ -2182,6 +2182,12 @@ describe('jqLite', function() { span.after('abc'); expect(root.html().toLowerCase()).toEqual('<span></span>abc'); }); + + + it('should not throw when the element has no parent', function() { + var span = jqLite('<span></span>'); + expect(function() { span.after('abc'); }).not.toThrow(); + }); });
[No CFG could be retrieved]
JS - Dynamics - like functions Checks that the element has a parent and has length of 0.
Can you add an expect that the span is still the span?
@@ -701,7 +701,12 @@ define([ var color = queryColorValue(node, 'color', namespaces.kml); var iconNode = queryFirstNode(node, 'Icon', namespaces.kml); - var icon = getIconHref(iconNode, dataSource, sourceUri, uriResolver, false, query); + var icon; + if (!defined(iconNode)) { + icon = dataSource._pinBuilder.fromColor(Color.YELLOW, 64); + } else { + icon = getIconHref(iconNode, dataSource, sourceUri, uriResolver, false, query); + } var x = queryNumericValue(iconNode, 'x', namespaces.gx); var y = queryNumericValue(iconNode, 'y', namespaces.gx); var w = queryNumericValue(iconNode, 'w', namespaces.gx);
[No CFG could be retrieved]
Get the icon id from the link. Link to a Billboard icon.
What situation does this address that isn't addressed below?
@@ -360,8 +360,8 @@ class ChallengeResource(Resource): @property def uri(self): # pylint: disable=missing-docstring,no-self-argument # bug? 'method already defined line None' - # pylint: disable=function-redefined - return self.body.uri # pylint: disable=no-member + # pylint: disable=function-redefined,protected-access + return self.body._url or self.body._uri # pylint: disable=no-member class Authorization(ResourceBody):
[ChallengeBody->[fields_from_json->[from_json],to_partial_json->[to_partial_json]],Authorization->[challenges->[from_json]],NewRegistration->[Resource],Directory->[from_json->[from_json],__getitem__->[_canon_key]],Registration->[phones->[_filter_contact],emails->[_filter_contact]],UpdateRegistration->[Resource],Revocation->[Resource],NewAuthorization->[Resource],CertificateRequest->[Resource],Status,IdentifierType]
Return the URI of the method.
Can't we just use `self.body.uri` here because of the property in `ChallengeBody` above?
@@ -173,14 +173,14 @@ class TFETensorTest(test_util.TensorFlowTestCase): self.assertIn("id=%d, shape=%s, dtype=%s, numpy=\n%r" % (t._id, t.shape, t.dtype.name, t.numpy()), tensor_repr) - def testTensorStrReprObeyNumpyPrintOptions(self): + def disabled_testTensorStrReprObeyNumpyPrintOptions(self): orig_threshold = np.get_printoptions()["threshold"] orig_edgeitems = np.get_printoptions()["edgeitems"] np.set_printoptions(threshold=2, edgeitems=1) t = _create_tensor(np.arange(10, dtype=np.int32)) - self.assertIn("[0 ..., 9]", str(t)) - self.assertIn("[0, ..., 9]", repr(t)) + self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", str(t))) + self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", repr(t))) # Clean up: reset to previous printoptions. np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems)
[TFETensorTest->[testNumpyValue->[_create_tensor],testNumpyOrderHandling->[_create_tensor],testTensorCreationFailure->[_create_tensor],testTensorAndNumpyMatrix->[_create_tensor],testIntDowncast->[_create_tensor],testZeroDimTensorStr->[_create_tensor],testScalarTensor->[_create_tensor],testMultiLineTensorRepr->[_create_tensor],testTensorStrReprObeyNumpyPrintOptions->[_create_tensor],testZeroDimTensorRepr->[_create_tensor],testFloatDowncast->[_create_tensor],testBool->[_create_tensor],testZeroSizeTensorRepr->[_create_tensor],testStringTensor->[_create_tensor],testIterateOverTensor->[_create_tensor],testStringTensorOnGPU->[_create_tensor],testNumpyValueWithCast->[_create_tensor],testMultiLineTensorStr->[_create_tensor],testZeroSizeTensorStr->[_create_tensor]],TFETensorUtilTest->[testTensorListContainsNonTensors->[_create_tensor],testSliceDimOutOfRange->[_create_tensor],testNegativeSliceDim->[_create_tensor],testTensorListNotList->[_create_tensor],testListOfThree->[_create_tensor]]]
Test for the problem of the MultiLineTensorRepr class.
Is keeping this change the right move?
@@ -197,7 +197,7 @@ class ProjMixin(object): if ch_type is None: ch_type = [ch for ch in ['meg', 'eeg'] if ch in self] elif isinstance(ch_type, str): - ch_type = [ch_type] + ch_type = [ch_type] for ch in ch_type: if ch in self: layout.append(find_layout(self.info, ch, exclude=[]))
[ProjMixin->[plot_projs_topomap->[plot_projs_topomap]],_read_proj->[Projection],make_eeg_average_ref_proj->[Projection],setup_proj->[make_eeg_average_ref_proj,make_projector_info,_has_eeg_average_ref_proj,activate_proj],_uniquify_projs->[proj_equal],make_projector_info->[make_projector]]
Plots the SSP topomap of the sensor topography.
@wmvanvliet for python 3 compat it would be good to use six string_types here.
@@ -22,6 +22,7 @@ static DH *dh_param_init(const BIGNUM *p, int32_t nbits) dh->p = (BIGNUM *)p; dh->g = (BIGNUM *)&_bignum_const_2; dh->length = nbits; + dh->dirty_cnt++; return dh; }
[DH_get_nid->[BN_cmp,BN_get_word,BN_rshift1,BN_free,BN_dup],DH_new_by_nid->[DHerr,dh_param_init],DH->[DH_new]]
This function initializes DH parameters based on the parameter NID.
Strictly speaking this is not necessary since we only just created the DH object.
@@ -169,6 +169,9 @@ exports.rules = [ { filesMatching: '{src,extensions}/**/*.js', mustNotDependOn: '3p/**/*.js', + allowlist: [ + 'src/inabox/inabox-iframe-messaging-client.js->3p/iframe-messaging-client.js', + ], }, // Rules for extensions.
[No CFG could be retrieved]
JS files that should be included in the AMP AMP AMP AMP AMP Extension - based AMP4A.
curious about the size of the intersection-observer-polyfill extension.
@@ -2052,8 +2052,12 @@ module Engine end end - def next_sr_position(entity) - player_order = @round.current_entity&.player? ? @round.pass_order : @players + def next_sr_position(entity, order) + player_order = if @round.current_entity&.player? + order == :first_to_pass ? @round.pass_order : [] + else + @players + end player_order.reject(&:bankrupt).index(entity) end
[Base->[end_game!->[format_currency],current_entity->[current_entity],log_cost_discount->[format_currency],float_corporation->[format_currency],after_par->[format_currency,all_companies_with_ability],remove_train->[remove_train],init_hexes->[abilities],token_string->[count_available_tokens],place_home_token->[home_token_locations],initialize_actions->[filtered_actions],rust_trains!->[rust,rust?],close_corporation->[current_entity],liquidity->[liquidity],round_description->[total_rounds],crowded_corps->[train_limit],total_emr_buying_power->[emergency_issuable_cash,liquidity],trains->[trains],initialize->[init_optional_rules],close_companies_on_train!->[abilities],rust->[remove_train],process_single_action->[setup,process_action],init_company_abilities->[shares,abilities],action_processed->[close_corporation],buy_train->[trains],payout_companies->[format_currency],train_limit->[train_limit],ability_right_time?->[current_entity],log_share_price->[format_currency],event_close_companies!->[abilities],active_step->[active_step],check_programmed_actions->[player_log]],load->[load]]
The priority deal card is a class that implements the logic for the priority deal card.
The game class knows the order, so you don't need to pass it in. Call next_sr_player_order directly from here.
@@ -18,6 +18,8 @@ from .patch_sites import patch_contrib_sites patch_contrib_sites() +DEFAULT_LIMIT_QUANTITY_PER_CHECKOUT: Final[int] = 50 + def email_sender_name_validators(): return [
[SiteSettingsTranslation->[__repr__->[type],ForeignKey,CharField],email_sender_name_validators->[RegexValidator,MaxLengthValidator],SiteSettings->[default_from_email->[str,ImproperlyConfigured,Address,parseaddr],CharField,PositiveIntegerField,email_sender_name_validators,EmailField,TranslationProxy,ForeignKey,BooleanField,OneToOneField,IntegerField],patch_contrib_sites]
Email sender name validators.
Shouldn't that be in `settings.py` file?
@@ -60,8 +60,10 @@ export default class TableData extends Dataset { columnNames.push({name: col, index: j, aggr: 'sum'}); } else { let valueOfCol; - if (!isNaN(valueOfCol = parseFloat(col)) && isFinite(col)) { - col = valueOfCol; + if (!(col[0] === '0' || col.length >= 7)) { + if (!isNaN(valueOfCol = parseFloat(col)) && isFinite(col)) { + col = valueOfCol; + } } cols.push(col); cols2.push({key: (columnNames[i]) ? columnNames[i].name : undefined, value: col});
[No CFG could be retrieved]
Table header.
is there a way to avoid hardcoding 7?
@@ -26,11 +26,11 @@ class TileSource(PlotObject): the maximum zoom level for the tile layer. This is the most "zoomed-in" level. """) - x_origin_offset = Float(default=20037508.34, help=""" + x_origin_offset = Float(default=None, help=""" x offset in plot coordinates """) - y_origin_offset = Float(default=20037508.34, help=""" + y_origin_offset = Float(default=None, help=""" y offset in plot coordinates """)
[TileSource->[Any,Float,Int,Dict,String]]
Creates a base class for all tile sources. Given a sequence of objects representing a single object that is a base object of a base object.
You are setting x_origin_offset to the 20037508.34 number in coffeescript ... why None in Python? similarly for some of the other properties.
@@ -226,7 +226,9 @@ func (rm *resmon) Invoke(ctx context.Context, req *lumirpc.InvokeRequest) (*lumi tok := tokens.ModuleMember(req.GetTok()) prov, err := rm.src.plugctx.Host.Provider(tok.Package()) if err != nil { - return nil, errors.Wrapf(err, "failed to load resource provider for %v", tok) + return nil, err + } else if prov == nil { + return nil, errors.Errorf("could not load resource provider for package '%v' from $PATH", tok.Package()) } // Now unpack all of the arguments and prepare to perform the invocation.
[Invoke->[Package,Wrapf,GetArgs,Invoke,UnmarshalProperties,Provider,GetTok,V,ModuleMember,MarshalProperties,Info],NewResource->[GetChildren,GetType,Infof,UnmarshalProperties,GetObject,NewGoal,GetName,V,QName,Synthesized,MarshalProperties,Type,GetCustom,URN],Next->[Assert,V,Resource,Infof],forkRun->[IgnoreClose,Wrapf,Errorf,LanguageRuntime,Address,Run],Close->[Cancel],Iterate->[Wrap,forkRun],Sprintf,RegisterResourceMonitorServer,Serve]
Invoke invokes the resource provider and returns the return value and any errors that occur.
Just wondering, why the removal of `Wrapf`? Not useful?
@@ -1303,6 +1303,8 @@ class VirtualMachine < VCenterDriver::Template :port => port) end + # grab the last unitNumber to ensure the nic to be added at the end + @unic = @unic || get_vcenter_nics.map{|d| d.unitNumber}.max rescue 0 card_spec = { :key => 0, :deviceInfo => {
[VmmImporter->[request_vnc->[extraconfig_vnc,one_item,info],import->[one_item,id,new_without_id,request_vnc,host]],VirtualMachine->[find_free_ide_controller->[device,key],Disk->[prefix->[exists?],ds->[exists?],type->[exists?],destroy->[is_cd?,exists?,path,ds],boot_dev->[is_cd?,key],config->[exists?],is_cd?->[exists?],node->[exists?],device->[exists?],path->[exists?],key->[exists?]],create_storagedrs_disks->[key],calculate_add_disk_spec->[key,get_effective_ds],disks_synced?->[disks_each],disk->[query_disk,one_disk],convert_to_template->[calculate_add_nic_spec_autogenerate_mac],attach_disks_specs->[id,storpod?,disks_each,one_item],set_boot_order->[exists?,boot_dev],new_from_clone->[clone_vm],attach_nic->[calculate_add_nic_spec],poweron->[wait_timeout],get_ds->[get,one_item],new_from_ref->[get_vm],resize_disk->[set_size,id,config,disk,get_size,exists?],detach_disks_specs->[type,config,device,path,disks_each,key],shutdown->[wait_timeout],resize_unmanaged_disks->[new_size,managed?,disks_each,config],clone_vm->[fetch!],calculate_add_nic_spec->[key],query_disk->[disk_real_path],sync->[set_boot_order],find_snapshot_in_list->[find_snapshot_in_list],one_item->[one_item],get_vm->[one_item],destroy_disk->[exists?,managed?,detach_disk,destroy],info_disks->[query_disk,vc_disk,one_disk],sync_disks->[create_storagedrs_disks],target_ds_ref->[one_item],storagepod_clonevm_task->[fetch!,key],host->[one_item],attach_disk->[key],find_free_controller->[device,key],add_new_scsi->[key],new_with_item->[set_item],calculate_add_nic_spec_autogenerate_mac->[key],detach_disk->[exists?,managed?,device,config],Resource->[detached?->[one?],unsynced?->[synced?],synced?->[one?,exists?],no_exists?->[exists?]],new_one->[one_item],migrate_routine->[migrate,info]]]
calculate_add_nic_spec calculates the add nic spec based on the given nic specification This method is called from the device discovery code to populate the device info. Initialize a new resource allocation.
maybe we need this to fail with an error message instead of 'rescue 0'?
@@ -158,7 +158,7 @@ public class JavaDeclarationDelegateTestCase extends AbstractJavaExtensionDeclar private static final String GET_MEDICAL_HISTORY = "getMedicalHistory"; private static final String APPROVE_INVESTMENT = "approve"; private static final String IGNORED_OPERATION = "ignoredOperation"; - private static final String EXTENSION_VERSION = "4.0-SNAPSHOT"; + private static final String EXTENSION_VERSION = getProperty("maven_project_version"); private static final String OTHER_HEISENBERG = "OtherHeisenberg"; private static final String PROCESS_WEAPON = "processWeapon"; private static final String PROCESS_WEAPON_LIST = "processWeaponList";
[JavaDeclarationDelegateTestCase->[listOfString->[build],assertTransactional->[isTransactional,assertThat,is],minMuleVersionIsDescribedCorrectly->[getDeclaration,declareExtension,loaderFor,MuleVersion,getMinMuleVersion,setLoader,is,assertThat],messageOperationWithoutGenerics->[getDeclaration,declare,assertThat,getType,DefaultExtensionLoadingContext,is,instanceOf,getClassLoader,getOperation],assertConnected->[isRequiresConnection,assertThat,is],assertModelProperties->[get,notNullValue,getType,is,assertThat,isAssignableFrom],categoryDefaultValueIsDescribedCorrectly->[getDeclaration,declareExtension,loaderFor,getCategory,setLoader,is,assertThat],interceptingOperationWithoutAttributes->[getDeclaration,declareExtension,assertOutputType,build,loaderFor,getOutputAttributes,getOutput,notNullValue,toMetadataType,setLoader,getConfiguration,is,assertThat,getOperation],heisenbergWithOperationsConfig->[declare,getClassLoader,DefaultExtensionLoadingContext],assertTestModuleOperations->[arrayOf,size,assertTransactional,getType,hasSize,nullValue,getOperations,assertConnected,getOperation,map,getAllParameters,toMetadataType,empty,isEmpty,load,isPresent,objectTypeBuilder,equalTo,notNullValue,assertMessageType,assertOperation,is,assertParameter,build,get,createHandler,instanceOf,assertThat],setUp->[loaderFor,setLoader],interceptingOperationWithAttributes->[getDeclaration,declareExtension,assertOutputType,loaderFor,getOutputAttributes,getOutput,notNullValue,toMetadataType,setLoader,getConfiguration,is,assertThat,getOperation],heisenbergPointerPlusExternalConfig->[getDeclaration,size,getName,loaderFor,equalTo,get,notNullValue,getAllParameters,toMetadataType,hasSize,setLoader,is,assertParameter,assertThat,assertExtensionProperties],assertExtensionProperties->[getName,getDescription,notNullValue,is,assertThat,getVersion],heisenbergWithParameterGroupAsOptional->[declare,getClassLoader,DefaultExtensionLoadingContext],heisenbergWithRecursiveParameterGroup->[declare,getClassLoader,DefaultExtensionLoadingContext],heisenbergWithOperationPointingToExtension->[declare,getClassLoader,DefaultExtensionLoadingContext],minMuleVersionDefaultValueIsDescribedCorrectly->[getDeclaration,declareExtension,loaderFor,MuleVersion,getMinMuleVersion,setLoader,is,assertThat],listOfResultsOperation->[getDeclaration,declare,assertThat,assertMessageType,getType,DefaultExtensionLoadingContext,is,instanceOf,getClassLoader,getOperation,load],heisenbergWithOperationPointingToExtensionAndDefaultConfig->[declare,getClassLoader,DefaultExtensionLoadingContext],heisenbergPointer->[getDeclaration,assertTestModuleMessageSource,declareExtension,loaderFor,setLoader,assertTestModuleConfiguration,assertTestModuleOperations,assertModelProperties,assertTestModuleConnectionProviders,assertExtensionProperties],findDeclarationByName->[NoSuchElementException,orElseThrow],assertOperation->[getDescription,equalTo,notNullValue,is,assertThat,getOperation],assertTestModuleConfiguration->[listOfString,arrayOf,getName,build,objectTypeBuilder,get,equalTo,getAllParameters,toMetadataType,hasSize,getConfigurations,assertParameter,assertThat,id],assertParameter->[getName,getDescription,equalTo,notNullValue,getType,findParameter,is,getDefaultValue,isRequired,assertThat,getExpressionSupport],assertTestModuleConnectionProviders->[getName,get,getAllParameters,equalTo,toMetadataType,getType,hasSize,is,assertParameter,assertThat,getConnectionProviders],describeTestModule->[getDeclaration,assertTestModuleMessageSource,declareExtension,assertTestModuleConfiguration,assertTestModuleOperations,assertModelProperties,assertTestModuleConnectionProviders,assertExtensionProperties],assertTestModuleMessageSource->[getName,get,equalTo,assertMessageType,getAllParameters,getType,toMetadataType,hasSize,is,getMessageSources,instanceOf,assertParameter,assertThat,load],heisenbergWithMoreThanOneConfigInOperation->[declare,getClassLoader,DefaultExtensionLoadingContext],assertOutputType->[equalTo,hasDynamicType,getType,is,assertThat],flyweight->[getDeclaration,declareExtension,getSimpleName,assertThat,loaderFor,setLoader,is,getMessageSources,getConfigurations,getOperations,findDeclarationByName,sameInstance],listOfResultsOperationWithoutGenerics->[getDeclaration,declare,assertThat,build,assertMessageType,getType,DefaultExtensionLoadingContext,is,instanceOf,getClassLoader,getOperation],categoryIsDescribedCorrectly->[getDeclaration,declareExtension,loaderFor,getCategory,setLoader,is,assertThat]]]
This class is used to provide a string representation of the extension. Test the module.
util method for rehuse
@@ -31,6 +31,7 @@ from .operations import WorkspaceOperations from .operations import SqlPoolsOperations from .operations import BigDataPoolsOperations from .operations import IntegrationRuntimesOperations +from .operations import LibraryOperations from .operations import WorkspaceGitRepoManagementOperations from .. import models
[ArtifactsClient->[__aexit__->[__aexit__],close->[close],__aenter__->[__aenter__]]]
Creates an instance of ArtifactsClient.
Is it possible to exclude library operations in this release? I remember according to discussion, we will not add new features in this release. @lmazuel , am I right?
@@ -168,6 +168,7 @@ func doEnterpriseMode(config interface{}) (retErr error) { return err } authclient.RegisterAPIServer(externalServer.Server, authAPIServer) + env.SetAuthServer(authAPIServer) return nil }); err != nil { return err
[Handle,NewConfiguration,StringVar,UnaryServerInterceptor,RegisterDebugServer,NewAPIServer,SetGCPercent,InitDexDB,BlockUntil,Ready,Warnf,GetDBClient,JoinHostPort,NewEnterpriseServer,RegisterHealthServer,Health,Is,Set,RegisterAPIServer,NewReporter,ListenAndServeTLS,Handler,NewIdentityServer,GetCertPaths,NewSidecarAPIServer,InitWithKube,New,InitPachOnlyEnv,ApplyMigrations,NewHealthServer,FormatterFunc,Errorf,NewCertLoader,Wait,Logger,Main,Wrapf,Join,LoadAndStart,SetFormatter,Lookup,NewAuthServer,GetPachClient,ChainStreamInterceptor,Server,NewInterceptor,ExternalIP,ListenAndServe,NewMasterDriver,Config,Printf,ChainUnaryInterceptor,RunGitHookServer,ListenTCP,NewServer,Sprintf,NewHTTPServer,StreamInterceptor,Background,WithError,WriteTo,Print,Initialize,InstallJaegerTracerFromEnv,StreamServerInterceptor,Parse,SetLevel,NewDebugServer,Getenv,BoolVar]
Setup External Pachd GRPC Server and Log GRPC Server. Register external server with the external server.
I find it slightly tidier to give these servers to the serviceenv as we create them, instead of having one big initialize call at the end, but ymmv.
@@ -142,15 +142,9 @@ class ClientBase(object): # pylint:disable=too-many-instance-attributes self._auth_uri = "sb://{}{}".format(self._address.hostname, self._address.path) self._config = Configuration(**kwargs) self._debug = self._config.network_tracing - self._conn_manager = get_connection_manager(**kwargs) + self._conn_manager = get_connection_manager(**kwargs) # type: Any self._idle_timeout = kwargs.get("idle_timeout", None) - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - @staticmethod def _from_connection_string(conn_str, **kwargs): # type: (str, Any) -> Dict[str, Any]
[EventHubSharedKeyCredential->[get_token->[_generate_sas_token]],ConsumerProducerMixin->[_do_retryable_operation->[_handle_exception,_backoff],_close_handler->[close],_handle_exception->[_handle_exception],_close_connection->[_close_handler],_open->[close,_create_auth],close->[_close_handler],__exit__->[close]],ClientBase->[_from_connection_string->[EventHubSharedKeyCredential,_parse_conn_str],get_eventhub_properties->[_management_request],_management_request->[_backoff,_create_auth],get_partition_ids->[get_eventhub_properties],get_partition_properties->[_management_request]]]
Initialize a SharedKey object.
Can this be narrowed?
@@ -173,7 +173,7 @@ public abstract class AbstractPersistentAcceptOnceFileListFilter<F> extends Abst try { this.flushableStore.flush(); } - catch (IOException e) { + catch (@SuppressWarnings("unused") IOException e) { // store's responsibility to log } }
[AbstractPersistentAcceptOnceFileListFilter->[close->[close],remove->[remove]]]
Flush the cache if necessary.
Can't we just re-throw as an `UncheckedIOException` ?
@@ -1,9 +1,6 @@ package org.ray.runtime; import com.google.common.base.Preconditions; -import java.util.HashMap; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import org.ray.api.id.UniqueId; import org.ray.runtime.config.WorkerMode; import org.ray.runtime.task.TaskSpec;
[WorkerContext->[nextCallIndex->[incrementAndGet],getCurrentThreadTaskId->[checkState,compareAndSet,randomId,warn,getId,isNil],setCurrentTask->[set],nextPutIndex->[incrementAndGet],createDummyTask->[randomId,TaskSpec],AtomicBoolean,AtomicInteger,getLogger,randomId,getId,createDummyTask]]
Creates a worker context. Returns the unique id for the current thread.
Should this be initialized with nil? It looks like it gets overwritten soon after in both branches if the if-block below.
@@ -152,6 +152,9 @@ func (table *Table) ToStringWithGap(columnGap string) string { allRows = append(allRows, table.Rows...) + // 7-bit C1 ANSI sequences + ansiEscape := regexp.MustCompile(`\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])`) + for rowIndex, row := range allRows { columns := row.Columns if len(columns) != len(preferredColumnWidths) {
[ToStringWithGap->[Sprint,Sprintf,Itoa],String->[ToStringWithGap],ToStringWithGap,EqualFold,TrimSuffix,IsCI,Fd,WriteByte,ToLower,Print,IsTerminal,String,Read,Getenv]
ToStringWithGap returns a string representation of the table with the requested column gaps. region System API.
I would consider making `ansiEscape` a global constant. That way it only needs to be compiled once, and not on each call.
@@ -197,6 +197,11 @@ public abstract class BaseRollbackActionExecutor<T extends HoodieRecordPayload, protected void finishRollback(HoodieRollbackMetadata rollbackMetadata) throws HoodieIOException { try { + // TODO: Potential error here - rollbacks have already completed here so if the syncTableMetadata fails, + // metadata table will be left in an inconsistent state. This is because we do not use the inflight + // state for rollback. + syncTableMetadata(rollbackMetadata); + table.getActiveTimeline().createNewInstant( new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.ROLLBACK_ACTION, instantTime)); table.getActiveTimeline().saveAsComplete(
[BaseRollbackActionExecutor->[doRollbackAndGetStats->[rollBackIndex,validateSavepointRollbacks,executeRollback,validateRollbackCommitSequence]]]
Finishes the rollback.
here how can we handle the case, re-bootstrap?
@@ -308,12 +308,11 @@ namespace ProtoCore.Lang // create language blocks for true and false branch, // so directly return the value. if (runtimeCore.Options.GenerateSSA) - return svCondition.RawBooleanValue ? svTrue : svFalse; + return svCondition.BooleanValue ? svTrue : svFalse; Validity.Assert(svTrue.IsInteger); Validity.Assert(svFalse.IsInteger); - int blockId = (1 == (int)svCondition.opdata) ? (int)svTrue.opdata : (int)svFalse.opdata; - + int blockId = svCondition.BooleanValue ? (int)svTrue.IntegerValue : (int)svFalse.IntegerValue; int oldRunningBlockId = runtimeCore.RunningBlock; runtimeCore.RunningBlock = blockId;
[MapBuiltIns->[MapTo->[Map]],FileIOBuiltIns->[StackValue->[ConvertToString]],RangeExpressionUntils->[GenerateNumericRange->[GenerateRangeByStepNumber,GenerateRangeByAmount,GenerateRangeByStepSize]],ArrayUtilsForBuiltIns->[CountTrue->[CountTrue],IsUniformDepth->[Rank],Exists->[Exists],SomeFalse->[Exists],SomeNulls->[Exists],SomeTrue->[Exists],Count->[Count],ForAll->[ForAll],GetFlattenedArrayElements->[GetFlattenedArrayElements],IsHomogeneous->[IsHomogeneous],Contains->[Contains],AllTrue->[ForAll],IsRectangular->[Rank,Count],ContainsArray->[ContainsArray],IndexOf->[Count],ArrayIndexOfArray->[Count],CountFalse->[CountFalse],StackValue->[ConvertToString,Rank,Count,CountNumber],CountNumber->[Count],AllFalse->[ForAll]],StackValueComparerForDouble->[Compare->[Equals],Equals->[Equals]]]
Execute the method. Method that builds a sequence of values from the parameters of the method. Method that returns the value of the missing parameter in the array. MethodID. kRemove duplicates remove duplicates remove duplicates remove duplicates remove duplicates remove duplicates remove duplicates MethodIDs are used to implement the sort algorithm.
This is one such hard-casting area that we can avoid if `IntegerValue` is made `int` instead of `long`. Same for the one below.
@@ -94,6 +94,7 @@ def create_payment_lines_information( PaymentLineData( quantity=order_line.quantity, product_name=product_name, + product_sku=order_line.product_sku, gross=order_line.unit_price_gross_amount, ) )
[get_already_processed_transaction_or_create_new_transaction->[get_already_processed_transaction,create_transaction],create_payment_information->[create_payment_lines_information]]
Create a list of PaymentLineData objects for the given n - node payment. This method is called to add a missing line item to the list of line items that are.
FYI for 3.1 product_sku is an optional field
@@ -83,7 +83,6 @@ RSVG_CONVERT_BIN = 'rsvg-convert' PNGCRUSH_BIN = 'pngcrush' FLIGTAR = 'amo-admins+fligtar-rip@mozilla.org' -REVIEWERS_EMAIL = 'amo-editors@mozilla.org' THEMES_EMAIL = 'theme-reviews@mozilla.org' ABUSE_EMAIL = 'amo-admins+ivebeenabused@mozilla.org' NOBODY_EMAIL = 'nobody@mozilla.org'
[get_redis_settings->[,int,append,dict,parse_qsl,float,urlparse],path->[join],lazy_langs->[lower],read_only_mode->[list,insert,get,tuple,index,Exception],lazy,cors_endpoint_overrides,gethostname,join,db,env,dict,Env,r'^,bool,list,get_redis_settings,dirname,exists,float,read_env,Queue,path,basename,get,lower,reverse_lazy]
Creates a new node object. Return a list of cors endpoint overrides.
as much as I like my old jokes, we should probably change this email address (and constant) at some point.
@@ -61,9 +61,9 @@ namespace DotNetNuke.Entities.Users public const string USERPROFILE_Unit = "Unit"; public const string USERPROFILE_Street = "Street"; public const string USERPROFILE_City = "City"; - public const string USERPROFILE_Region = "Region"; public const string USERPROFILE_Country = "Country"; - public const string USERPROFILE_PostalCode = "PostalCode"; + public const string USERPROFILE_Region = "Region"; + public const string USERPROFILE_PostalCode = "PostalCode"; //Phone contact public const string USERPROFILE_Telephone = "Telephone";
[UserProfile->[InitialiseProfile->[InitialiseProfile],SetProfileProperty,GetPropertyValue]]
UserProfile is a base class for all UserProfile objects. region ProfilePropertyDefinitionCollection Collection of profile properties.
Can you just adjust the spacing here, other than that, I am fine with this. For other reviewers this does not change behaviour just the order of declarations so it matches in code what we have in the UI for most situations.
@@ -31,10 +31,10 @@ import ( "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/reference" + "github.com/vmware/vic/lib/apiservers/engine/backends/kv" "github.com/vmware/vic/lib/apiservers/portlayer/client" - "github.com/vmware/vic/lib/apiservers/portlayer/client/storage" "github.com/vmware/vic/lib/metadata" - "github.com/vmware/vic/pkg/vsphere/sys" + "github.com/vmware/vic/pkg/trace" ) // ICache is an in-memory cache of image metadata. It is refreshed at startup
[RemoveImageByConfig->[Delete,Lock,Unlock,Debugf],IsImageID->[Get,RUnlock,RLock],GetImage->[ParseIDOrReference,Contains,NewRequestNotFoundError,RLock,Errorf,Get,RUnlock,getImageByNamed,getImageByDigest],AddImage->[Unlock,Error,WithTag,WithName,Lock,Errorf,Add],GetImages->[RUnlock,RLock],getImageByNamed->[Get],Update->[NewErrorWithStatusCode,AddExisting,ListImages,UUID,Unmarshal,AddImage,Hostname,Errorf,NewListImagesParamsWithContext,WithStoreName,Debugf],Digests,Tags,NewTruncIndex,HasPrefix,TODO]
Creates a new object that represents the given image ID and image configuration. Update initializes and updates the image cache.
If you expose access to the structure members you either need to make mutex public or extend mutex. Otherwise there is no way to lock ICache for non-package members to modify a structure.
@@ -48,9 +48,9 @@ func resourceGoogleProjectDefaultServiceAccounts() *schema.Resource { Type: schema.TypeString, Optional: true, Default: "REVERT", - ValidateFunc: validation.StringInSlice([]string{"NONE", "REVERT"}, false), + ValidateFunc: validation.StringInSlice([]string{"NONE", "REVERT", "REVERT_AND_IGNORE_FAILURE"}, false), Description: `The action to be performed in the default service accounts on the resource destroy. - Valid values are NONE and REVERT. If set to REVERT it will attempt to restore all default SAs but in the DEPRIVILEGE action.`, + Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.`, }, "service_accounts": { Type: schema.TypeMap,
[DefaultTimeout,Undelete,Disable,SetIamPolicy,Sprintf,Do,Get,GetIamPolicy,ToLower,List,Delete,StringInSlice,Errorf,SetId,Enable,NewResourceManagerClient,Set,NewIamClient]
resourceGoogleDefaultServiceAccountsDoAction is a default action that will be performed on all default DELETE removes a service account.
Can you add back the information about how what setting this to `REVERT` does? And add a description on what `REVERT_AND_IGNORE_FAILURE` will do
@@ -258,7 +258,7 @@ func (v *Validator) QueryDatastore(ctx context.Context, vch *config.VirtualConta sort.Sort(dataStores) if err != nil { log.Errorf("Error while accessing datastore: %s", err) - return + return err } for _, ds := range dataStores { log.Infof("Datastore %s Status: %s", ds.Name, ds.OverallStatus)
[QueryVCHStatus->[Title,HTML,Join,Infof,Begin,ReadFile,Sprintf,PIDFileDir,End,Errorf,Base,Split],QueryDatastore->[Reference,DatastoreOrDefault,HTML,DefaultCollector,Infof,Sprintf,Sort,Errorf,Retrieve],LinkByName,LinkByAlias,Warnf,Now,Close,Hostname,HTML,Error,Format,Dial,End,CheckLicense,IsNil,Errorf,QueryVCHStatus,ClearIssues,QueryDatastore,Infof,CreateFromVCHConfig,AddrList,FirewallCheckOutput,GetIssues,Begin,Sprintf,CheckFirewallForTether,String]
QueryDatastore queries all the datastores in the vSphere host Debug - friendly display of any missing index objects.
curious for what are you gonna do with this returned error?
@@ -637,6 +637,10 @@ class BuildNumberMatch(BaseSpec): # lgtm [py/missing-equals] % (self.raw_value, other.raw_value)) return self.raw_value + def union(self, other): + options = set((self.raw_value, other.raw_value)) + return '|'.join(options) + @property def exact_value(self): return excepts(ValueError, int(self.raw_value))
[VersionSpec->[get_matcher->[treeify,VersionSpec,VersionOrder,untreeify]],VersionOrder->[startswith->[_eq,startswith],__eq__->[_eq]],BuildNumberMatch->[get_matcher->[startswith,VersionOrder],__init__->[get_matcher]],treeify->[apply_ops],untreeify->[untreeify],compatible_release_operator->[startswith,__ge__,VersionOrder],BaseSpec->[exact_value->[is_exact],operator_match->[VersionOrder],__ne__->[__eq__]],startswith]
Merge two components together.
Looks like this union function is defined a couple times. Can if be extracted into a utils module or something and shared?
@@ -179,6 +179,18 @@ func SetReadKeys(v uint64) RegionCreateOption { } } +// SetReadQueryNum sets the read query num for the region. +func SetReadQueryNum(v uint64) RegionCreateOption { + return func(region *RegionInfo) { + if region.queryStats == nil { + region.queryStats = &pdpb.QueryStats{} + } + region.queryStats.Coprocessor = v / 3 + region.queryStats.Get = v / 3 + region.queryStats.Scan = v - region.queryStats.Get - region.queryStats.Coprocessor + } +} + // SetApproximateSize sets the approximate size for the region. func SetApproximateSize(v int64) RegionCreateOption { return func(region *RegionInfo) {
[GetPeers,GetStoreId,Sort,GetRegionEpoch,GetId]
SetWrittenKeys sets the written keys for the region. SetRegionVersion sets the region version.
I think this function is hard to be used in another test. It is too specific.
@@ -1320,8 +1320,8 @@ class TestAPIKeyInSubmission(TestCase): upload.refresh_from_db() # https://github.com/mozilla/addons-server/issues/8208 - # causes this to be 2 (and invalid) instead of 0 (and valid). + # causes this to be 1 (and invalid) instead of 0 (and valid). # The invalid filename error is caught and raised outside of this # validation task. - assert upload.processed_validation['errors'] == 2 + assert upload.processed_validation['errors'] == 1 assert not upload.valid
[TestWebextensionIncompatibilities->[test_webextension_downgrade_only_warning_unlisted->[update_files],test_no_upgrade_annotation_no_version->[update_files],test_webextension_cannot_be_downgraded->[update_files],test_webextension_cannot_be_downgraded_ignore_deleted_version->[update_files]],TestRunAddonsLinter->[test_run_linter_fail->[get_upload],test_calls_run_linter->[get_upload]],TestSubmitFile->[test_file_not_passed_all_validations->[create_upload],test_file_passed_all_validations->[create_upload]],TestCreateVersionForUpload->[test_file_passed_all_validations_beta_string->[create_upload],test_file_passed_all_validations_not_most_recent->[create_upload],test_file_passed_all_validations_most_recent_failed->[create_upload],test_file_passed_all_validations_most_recent->[create_upload],test_file_passed_all_validations_no_version->[create_upload],test_file_passed_all_validations_version_exists->[create_upload]],TestValidator->[test_validation_signing_warning->[get_upload],test_pass_validation->[get_upload],test_fail_validation->[get_upload]],TestTrackValidatorStats->[test_count_unlisted_results->[result],test_count_listed_results->[result],test_count_all_successes->[result],test_count_all_errors->[result]],TestMeasureValidationTime->[test_measure_large_files_in_separate_bucket->[approximate_upload_time,statsd_timing_mock,assert_milleseconds_are_close,handle_upload_validation_result],handle_upload_validation_result->[handle_upload_validation_result],test_track_upload_validation_results_with_file_size->[approximate_upload_time,statsd_timing_mock,assert_milleseconds_are_close,handle_upload_validation_result],test_ignore_missing_upload_paths_for_now->[statsd_timing_mock,handle_upload_validation_result],test_do_not_calculate_scaled_time_for_empty_files->[statsd_timing_mock,handle_upload_validation_result],test_measure_small_files_in_separate_bucket->[approximate_upload_time,statsd_timing_mock,assert_milleseconds_are_close,handle_upload_validation_result],test_scale_large_xpi_times_per_megabyte->[approximate_upload_time,statsd_timing_mock,assert_milleseconds_are_close,handle_upload_validation_result],test_track_upload_validation_results_time->[approximate_upload_time,statsd_timing_mock,assert_milleseconds_are_close,handle_upload_validation_result]]]
Test validation finishes if invalid filename is found.
? I don't see any validation changes in this patch?
@@ -83,8 +83,10 @@ def deprecated(reason, replacement, gone_in, issue=None): if issue is not None: url = "https://github.com/pypa/pip/issues/" + str(issue) message += " You can find discussion regarding this at {}.".format(url) + if gone_in is not None: + message += " pip {} will remove this functionality.".format(gone_in) + # Raise as an error if it has to be removed. + if parse(current_version) >= parse(gone_in): + raise PipDeprecationWarning(message) - # Raise as an error if it has to be removed. - if gone_in is not None and parse(current_version) >= parse(gone_in): - raise PipDeprecationWarning(message) warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
[_showwarning->[warning,getLogger,issubclass,_original_showwarning],install_warning_logger->[simplefilter],deprecated->[parse,str,format,warn,PipDeprecationWarning]]
Deprecate existing functionality.
I think the most important information should be listed first. So that would be (1) the version in which it was / will be removed, (2) then the replacement, and (3) ending with where to go for more info.
@@ -387,6 +387,11 @@ class SetChannelsMixin(object): else: coil_type = FIFF.FIFFV_COIL_NONE self.info['chs'][c_ind]['coil_type'] = coil_type + if len(list(unit_changes.keys())) > 0: + msg = "The unit for channel(s) {0} has changed from {1} to {2}." + for this_change in unit_changes: + names = ", ".join(sorted(unit_changes[this_change])) + warn(msg.format(names, *this_change)) def rename_channels(self, mapping): """Rename channels.
[ContainsMixin->[__contains__->[_contains_ch_type]],_get_T1T2_mag_inds->[pick_types],UpdateChannelsMixin->[pick_types->[pick_types],_pick_drop_channels->[inst_has]],read_ch_connectivity->[_recursive_flatten],fix_mag_coil_types->[pick_types],SetChannelsMixin->[set_channel_types->[_check_set],rename_channels->[rename_channels],plot_sensors->[plot_sensors],set_eeg_reference->[set_eeg_reference]]]
Define the sensor type of channels. This function renames the channels.
I would get rid of the if and just do `for key, value in unit_changes.iteritems():`.
@@ -734,6 +734,11 @@ export const adConfig = { ], consentHandlingOverride: true, }, + + 'performancenative': { + prefetch: 'https://api.performancenative.com/assets/pn.js', + renderStartImplemented: true, + }, 'pixels': { prefetch: 'https://cdn.adsfactor.net/amp/pixels-amp.min.js',
[No CFG could be retrieved]
Provides a list of all possible URLs for a specific tag. A list of all AMP urls that are implemented by the AMP library.
I don't see this api being called when I run locally. Are you calling this method in your js? If not, you should remove this as it will delay rendering.
@@ -980,8 +980,15 @@ func NewContext() { log.Fatal(4, "Error saving generated JWT Secret to custom config: %v", err) } } + + PasswordComplexity = sec.Key("PASSWORD_COMPLEXITY").MustString("[a-z]+[A-Z]+[0-9_]+[^A-Za-z0-9_]+") + if len(PasswordComplexity) == 0 { + PasswordComplexity = "[a-z]+[A-Z]+[0-9_]+[^A-Za-z0-9_]+" + } IterateBufferSize = Cfg.Section("database").Key("ITERATE_BUFFER_SIZE").MustInt(50) LogSQL = Cfg.Section("database").Key("LOG_SQL").MustBool(true) + DBConnectRetries = Cfg.Section("database").Key("DB_RETRIES").MustInt(10) + DBConnectBackoff = Cfg.Section("database").Key("DB_RETRY_BACKOFF").MustDuration(3 * time.Second) sec = Cfg.Section("attachment") AttachmentPath = sec.Key("PATH").MustString(path.Join(AppDataPath, "attachments"))
[DelLogger,KeysHash,MustCompilePOSIX,Warn,TempDir,Info,MapTo,New,NewLfsJwtSecret,MustCompile,ParseIP,Split,MustString,SetUseHTTPS,NewSection,NewXORMLogger,Getenv,Trim,IsFile,Dir,TrimRight,Close,Append,MatchString,LookPath,Create,Section,GetSection,ToLower,MkdirAll,BinVersion,MustDuration,Minutes,Decode,Keys,TrimPrefix,Fatal,HomeDir,Count,MustBool,Clean,Format,MustInt64,Trace,TrimSpace,IsAbs,Key,ParseAddress,Name,ParseUint,Sprintf,SetSecureFallbackHost,ChildSections,String,Parse,SetLevel,Abs,DiscardXORMLogger,Getpid,Title,LastIndex,SetValue,MustInt,In,Strings,Error,Compare,Empty,SaveTo,SplitN,HasKey,Join,NewLogger,Contains,CurrentUsername,TrimSuffix,FormatInt,Replace,SetFallbackHost,NewInternalToken,WriteString]
MustString returns a token that can be used to log in to the user. Initialize the attachment configuration.
You don't need this again as you have passed the default value to `MustString`
@@ -91,7 +91,8 @@ import static org.apache.nifi.util.StringUtils.isEmpty; }) public class ConsumeAzureEventHub extends AbstractSessionFactoryProcessor { - private static final String FORMAT_STORAGE_CONNECTION_STRING = "DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s"; + private static final String FORMAT_STORAGE_CONNECTION_STRING_FOR_KEY = "DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s"; + private static final String FORMAT_STORAGE_CONNECTION_STRING_FOR_TOKEN = "BlobEndpoint=https://%s.blob.core.windows.net/;SharedAccessSignature=%s"; static final PropertyDescriptor NAMESPACE = new PropertyDescriptor.Builder() .name("event-hub-namespace")
[ConsumeAzureEventHub->[customValidate->[customValidate],EventProcessor->[writeRecords->[putEventHubAttributes,transferTo],writeFlowFiles->[putEventHubAttributes]],unregisterEventProcessor->[unregisterEventProcessor]]]
Consumes messages from Azure Event Hubs.
To make it more straightforward, I would suggest using `FOR_ACCOUNT_KEY` and `FOR_SAS_TOKEN` suffixes.
@@ -105,7 +105,7 @@ async def setup_pytest_for_target( Addresses((adaptor.address,)) ) all_targets = transitive_hydrated_targets.closure - + plugin_file_digest = await Get[Digest](InputFilesContent, get_coverage_plugin_input()) resolved_requirements_pex = await Get[Pex]( CreatePexFromTargetClosure( addresses=Addresses((adaptor.address,)),
[setup_pytest_for_target->[get_coveragerc_input,get_packages_to_cover,calculate_timeout_seconds,TestTargetSetup]]
Setup a test target for a single n - node test. A generator for the test - target setup for the missing coverage options.
I recommend only doing this when coverage is used as a slight performance improvement. Generally, you're using `test_options.values.run_coverage` in multiple places so will want to extract it into a variable like `use_coverage = test_options.values.run_coverage`.
@@ -302,6 +302,8 @@ class Term_Command extends WP_CLI_Command { $current_parent = 0; $current_depth = 1; + $max_id = (int) $wpdb->get_var( "SELECT term_taxonomy_id FROM $wpdb->term_taxonomy ORDER BY term_taxonomy_id DESC LIMIT 1" ); + for ( $i = 0; $i < $count; $i++ ) { if ( $hierarchical ) {
[Term_Command->[create->[get_error_message],list_->[display_items,get_formatter],generate->[tick,finish,maybe_make_child,maybe_reset_depth],get->[get_formatter,display_item],update->[get_error_message]]]
Generates a term tree. Delete all the options that are not associated with a taxonomy.
Why not set `$i` based on `$max_id` ?
@@ -176,11 +176,11 @@ public class ContentProviderTest { // Delete all notes List<Long> remnantNotes = col.findNotes("tag:" + TEST_TAG); if (remnantNotes.size() > 0) { - long[] nids = new long[remnantNotes.size()]; + long[] noteIds = new long[remnantNotes.size()]; for (int i = 0; i < remnantNotes.size(); i++) { - nids[i] = remnantNotes.get(i); + noteIds[i] = remnantNotes.get(i); } - col.remNotes(nids); + col.remNotes(noteIds); col.save(); assertEquals("Check that remnant notes have been deleted", 0, col.findNotes("tag:" + TEST_TAG).size()); }
[ContentProviderTest->[testSuspendCard->[getCol,getFirstCardFromScheduler],testInsertField->[getCol],getCol->[getCol],testQueryNextCard->[getCol],testQueryCardFromCertainDeck->[getCol],testUpdateTags->[getCol,getFirstCardFromScheduler],setUp->[getCol],tearDown->[getCol],reopenCol->[getCol],testQueryCertainDeck->[getCol],testBuryCard->[getCol,getFirstCardFromScheduler],testAnswerCard->[getCol,getFirstCardFromScheduler],testProviderProvidesDefaultForEmptyModelDeck->[getCol],testQueryAllDecks->[getCol],testInsertTemplate->[getCol]]]
Delete all notes and decks with the same tag.
Can we use `Utils.arrayList2array` here (and in a few other places)? Note: actually accepts `List<T>`
@@ -153,7 +153,8 @@ func getIPAMConfig(clusterNetworks []common.ClusterNetwork, localSubnet string) Name: "openshift-sdn", Type: "openshift-sdn", IPAM: &hostLocalIPAM{ - Type: "host-local", + Type: "host-local", + DataDir: hostLocalDataDir, Subnet: cnitypes.IPNet{ IP: nodeNet.IP, Mask: nodeNet.Mask,
[teardown->[shouldSyncHostports,ipamDel],UpdateLocalMulticastRules->[updateLocalMulticastRulesWithLock],setup->[ipamAdd,shouldSyncHostports,ipamDel],Start->[Start],handleCNIRequest->[waitRequest,addRequest],processRequest->[updateLocalMulticastRulesWithLock]]
Start returns a struct that represents a CNI server that is able to serve the given network getPodKey returns the key for the pod that has a podPortMapping in the running.
Where is hostLocalIPAM.DataDir used?
@@ -36,6 +36,16 @@ export function isDisplayNameRequired(state: Object): boolean { || state['features/base/config'].requireDisplayName; } +/** + * Selector for determining if the display name from prejoin page is read only. + * + * @param {Object} state - The state of the app. + * @returns {boolean} + */ +export function isPrejoinNameReadOnly(state: Object): boolean { + return Boolean(state['features/base/jwt']?.user?.name); +} + /** * Selector for determining if the user has chosen to skip prejoin page. *
[No CFG could be retrieved]
Get the visibility of the app. Returns the conferenceUrl used for dialing out a specific .
`jwt?.user?.name? || jwt?.user?.email` ? think it sets either name or email right? also both are under 'user' not directly in jwt I think
@@ -169,7 +169,7 @@ type StepEventStateMetadata struct { func makeEventEmitter(events chan<- Event, update UpdateInfo) (eventEmitter, error) { target := update.GetTarget() var secrets []string - if target.Config.HasSecureValue() { + if target != nil && target.Config.HasSecureValue() { for k, v := range target.Config { if !v.Secure() { continue
[resourcePreEvent->[Requiref,Op],preludeEvent->[AssertNoError,Value,NewBlindingDecrypter,Requiref,String],policyViolationEvent->[WriteRune,FilterString,Failf,Requiref,String,WriteString],resourceOperationFailedEvent->[Requiref,Op],updateSummaryEvent->[Requiref],resourceOutputsEvent->[Requiref],previewSummaryEvent->[Requiref],Value,Index,Old,Mappable,ValueOf,Requiref,Secure,MapKeys,HasSecureValue,NewPropertyValue,Provider,New,IsNil,Assert,Len,Type,MapIndex,Logical,Failf,CreateFilter,Res,GetTarget,AddGlobalFilter,Diffs,FilterString,DetailedDiff,NewPropertyMapFromMapRepl,Kind,Op,Keys,Elem,Interface,IsUserProgramCode,URN,MassageIfUserProgramCodeAsset]
Invite a payload of a resource type that is related to a given step.
Is this change necessary?
@@ -489,8 +489,16 @@ public class AgentInstaller { @Override public Iterable<Iterable<Class<?>>> resolve(Instrumentation instrumentation) { // filter out our agent classes and injected helper classes - return Iterables.transform( - delegate.resolve(instrumentation), i -> Iterables.filter(i, c -> !isIgnored(c))); + return () -> + StreamSupport.stream(delegate.resolve(instrumentation).spliterator(), false) + .map( + classes -> + (Iterable<Class<?>>) + () -> + StreamSupport.stream(classes.spliterator(), false) + .filter(c -> !isIgnored(c)) + .iterator()) + .iterator(); } private static boolean isIgnored(Class<?> c) {
[AgentInstaller->[RedefinitionDiscoveryStrategy->[resolve->[resolve]],installBytebuddyAgent->[installBytebuddyAgent],InstallComponentAfterByteBuddyCallback->[getName->[getName]],ClassLoadCallBack->[run->[getName]],ClassLoadListener->[onComplete->[run]],logVersionInfo->[getName]]]
Resolves the given class.
Hope you do understand this and tested it :) Looks like black magic to me
@@ -40,9 +40,12 @@ }, render: function() { return( - <input id={this.props.grade_id} type="text" size="4" - className="grade-input" value={this.props.value} - onClick={this.handleChange} readonly/> + <p + id={this.props.grade_id} + size="4" + className="grade-input"> + {this.props.value} + </p> ); } });
[No CFG could be retrieved]
Updates the total marks cell. The grade input field.
@wkwan Can you change this to a `span`? The `p` has vertical padding that I don't like the look of.
@@ -1637,9 +1637,9 @@ class Exchange: def fill_leverage_brackets(self): """ - # TODO-lev: Should maybe be renamed, leverage_brackets might not be accurate for kraken Assigns property _leverage_brackets to a dictionary of information about the leverage allowed on each pair + Not used by most exchanges, only used by Binance at time of writing """ return
[available_exchanges->[ccxt_exchanges],validate_exchanges->[available_exchanges,validate_exchange,ccxt_exchanges],Exchange->[fetch_ticker->[fetch_ticker,markets],validate_pairs->[markets,get_pair_quote_currency],fetch_order_or_stoploss_order->[fetch_order],_async_get_trade_history->[_async_get_trade_history_time,_async_get_trade_history_id],close->[close],get_trades_for_order->[exchange_has,_log_exchange_response],check_dry_limit_order_filled->[_is_dry_limit_order_filled,add_dry_order_fee],validate_stakecurrency->[get_quote_currencies],fetch_order->[fetch_dry_run_order,fetch_order,_log_exchange_response],add_dry_order_fee->[get_pair_quote_currency],_load_markets->[_load_async_markets],_async_get_trade_history_time->[_async_fetch_trades],create_dry_run_order->[amount_to_precision],fetch_dry_run_order->[check_dry_limit_order_filled],_async_get_trade_history_id->[_async_fetch_trades],fetch_l2_order_book->[get_next_limit_in_list,fetch_l2_order_book],get_valid_pair_combination->[markets],calculate_fee_rate->[get_valid_pair_combination,fetch_ticker,get_pair_base_currency,get_pair_quote_currency],_async_get_historic_ohlcv->[ohlcv_candle_limit],get_historic_ohlcv_as_df->[get_historic_ohlcv],get_historic_trades->[exchange_has,_async_get_trade_history],get_dry_market_fill_price->[exchange_has,price_to_precision],get_funding_fees_from_exchange->[exchange_has],_set_leverage->[exchange_has],set_margin_mode->[exchange_has,set_margin_mode],cancel_order->[fetch_dry_run_order,cancel_order,_log_exchange_response],_is_dry_limit_order_filled->[exchange_has],create_order->[_get_params,amount_to_precision,create_order,_lev_prep,price_to_precision,_log_exchange_response,create_dry_run_order],get_rate->[fetch_ticker,fetch_l2_order_book],cancel_order_with_result->[fetch_order,cancel_order,is_cancel_order_result_suitable],refresh_latest_ohlcv->[klines],cancel_stoploss_order_with_result->[is_cancel_order_result_suitable],reload_markets->[_load_async_markets],validate_required_startup_candles->[ohlcv_candle_limit],extract_cost_curr_rate->[calculate_fee_rate],_async_get_candle_history->[ohlcv_candle_limit]]]
Fill the _leverage_brackets property with the values of the leverage that a.
I think we should be decisive here - either this is binance-specific - so we also remove this method in the parent exchange class (here) - or we say it's "exchange opt in" - so we still call it from the main exchange (which makes the custom constructor in `binance.py` unnecessary).
@@ -16,6 +16,16 @@ import ( "github.com/pachyderm/s2" ) +func newLocalContents(path string, fileInfo os.FileInfo) *s2.Contents { + return &s2.Contents{ + Key: path, + LastModified: fileInfo.ModTime(), + Size: uint64(fileInfo.Size()), + StorageClass: globalStorageClass, + Owner: defaultUser, + } +} + func newContents(fileInfo *pfsClient.FileInfo) (s2.Contents, error) { t, err := types.TimestampFromProto(fileInfo.Committed) if err != nil {
[GetBucketVersioning->[bucketCapabilities,bucket,requestClient,Debugf],CreateBucket->[IsInvalidNameError,InspectBranch,NotImplementedError,IsBranchNotFoundErr,ScrubGRPC,InvalidBucketNameError,BucketAlreadyOwnedByYouError,requestClient,CreateBranch,Ctx,bucket,IsAlreadyExistError,InternalError,CreateRepo,canModifyBuckets,Debugf],ListObjectVersions->[NotImplementedError,Debugf],SetBucketVersioning->[NotImplementedError,requestClient,bucket,Debugf,bucketCapabilities],ListObjects->[Sprintf,requestClient,TrimPrefix,bucket,Debugf,GlobFile,HasPrefix,QuoteMeta,bucketCapabilities],GetLocation->[bucketCapabilities,bucket,requestClient,Debugf],DeleteBucket->[InspectBranch,NotImplementedError,WalkFile,requestClient,Ctx,bucket,InspectRepo,DeleteRepo,BucketNotEmptyError,ScrubGRPC,InternalError,DeleteBranch,canModifyBuckets,Debugf],Sprintf,TimestampFromProto]
GetLocation - get location of a file in a bucket. ListObjects - list all objects in a bucket.
for a file to be accessed locally, does it mean that it must be committed? I'm comparing the semantics here to those below in `newContents()`
@@ -1026,7 +1026,12 @@ describe Assignment do result = s.get_latest_result result.total_mark = total_mark result.marking_state = Result::MARKING_STATES[:complete] - result.save + @assignment.rubric_criteria.each do |cri| + result.marks.create!(markable_id: cri.id, + markable_type: RubricCriterion, + mark: (total_mark * 4.0 / 20).round) + end + result.save! end end
[minute,create,let,is_greater_than_or_equal_to,accepted_grouping_for,have_many,through,it,to,get_current_assignment,update_results_stats,each,marking_state,context,reload,and_return,be,to_not,match_array,to_csv,where,map,today,drop,groupings,get_latest_result,due_date,id,not_to,save,new,add_csv_group,ago,get_total_extra_points,add_group,dependent,accept_nested_attributes_for,before,total_mark,mark,destroy_all,section_due_date,equal,tap,day,from_now,to_s,instance,times,receive,now,get_total_extra_percentage,push,build,has_submission?,eq,update_attribute,size,max,describe,update_attributes,find_by_markable_id_and_markable_type,allow_destroy,first,is_greater_than,order,name,validate_uniqueness_of,push_groupings_to_queue,pending,latest_due_date,validate_presence_of,require,same_time_within_ms,user_name,be_a,weight]
Creates a list of sections and assignments that are past due dates. where both are past due dates.
The rounding invalidates the meaning of the `total_marks` array. On the other hand, the array could only be consistent with real marks if weights were assigned to the criteria. @aclar48, if you could clean up this test and create another one for flexible criteria, that would be much appreciated.
@@ -72,6 +72,18 @@ export function Accordion({ }); }, [expandSingleSection]); + useImperativeHandle( + ref, + () => { + return { + registerSection, + toggleExpanded, + isExpanded, + }; + }, + [registerSection, toggleExpanded, isExpanded] + ); + const registerSection = useCallback( (id, defaultExpanded) => { setExpandedMap((expandedMap) => {
[No CFG could be retrieved]
Creates a new accordion object. Adds a new child to the hierarchy of the hierarchy.
Just one note that, `toggleExpanded` and `isExpanded` could be right here. But `registerSection` is definitely out of place. Think about it this way: the imperative API (along with component props) is part of the component's public API. The `registerSection` is an internal detail.
@@ -3808,7 +3808,7 @@ public class Jenkins extends AbstractCIBase implements DirectlyModifiableTopLeve public synchronized void doConfigSubmit( StaplerRequest req, StaplerResponse rsp ) throws IOException, ServletException, FormException { BulkChange bc = new BulkChange(this); try { - checkPermission(ADMINISTER); + checkPermission(MANAGE); JSONObject json = req.getSubmittedForm();
[Jenkins->[getUser->[get],_cleanUpShutdownTcpSlaveAgent->[add],setNumExecutors->[updateComputerList],getPlugin->[getPlugin],_cleanUpCloseDNSMulticast->[add],getViewActions->[getActions],getJDK->[getJDKs,get],setViews->[addView],getCloud->[getByName],getStaplerFallback->[getPrimaryView],getStoredVersion->[get],getViews->[getViews],doDoFingerprintCheck->[isUseCrumbs],deleteView->[deleteView],getLabel->[get],_cleanUpInterruptReloadThread->[add],doConfigSubmit->[save,updateComputerList],CloudList->[onModified->[onModified]],doCheckDisplayName->[isNameUnique,isDisplayNameUnique],_cleanUpPersistQueue->[save,add],getLabelAtom->[get],setBuildsAndWorkspacesDir->[isDefaultWorkspaceDir,isDefaultBuildDir],reload->[loadTasks,save,reload,executeReactor],doConfigExecutorsSubmit->[all,updateComputerList],DescriptorImpl->[getDynamic->[getDescriptor],DescriptorImpl],checkRawBuildsDir->[expandVariablesForDirectory],_cleanUpShutdownThreadPoolForLoad->[add],isDisplayNameUnique->[getDisplayName],_cleanUpRunTerminators->[onTaskFailed->[getDisplayName],execute->[run],onTaskCompleted->[getDisplayName],onTaskStarted->[getDisplayName],add],getJobNames->[getFullName,add],doChildrenContextMenu->[add,getViews,getDisplayName],doLogout->[doLogout],getActiveInstance->[get],getNode->[getNode],copy->[copy],updateNode->[updateNode],doSubmitDescription->[doSubmitDescription],doCheckURIEncoding->[doCheckURIEncoding],getItem->[getItem,get],doViewExistsCheck->[getView],getUnprotectedRootActions->[getActions,add],setAgentProtocols->[add],disableSecurity->[setSecurityRealm],onViewRenamed->[onViewRenamed],getDescriptorByName->[getDescriptor],loadConfig->[getConfigFile],getRootUrl->[get],refreshExtensions->[getInstance,add,getExtensionList],getRootPath->[getRootDir],getView->[getView],putItem->[get],_cleanUpShutdownTimer->[add],_cleanUpDisconnectComputers->[run->[add]],getAllThreadDumps->[get,getComputers],createProject->[createProject,getDescriptor],MasterComputer->[doConfigSubmit->[doConfigExecutorsSubmit],hasPermission->[hasPermission],get],createProjectFromXML->[createProjectFromXML],getAgentProtocols->[add],doScript->[getView,getACL],_cleanUpReleaseAllLoggers->[add],isRootUrlSecure->[getRootUrl],EnforceSlaveAgentPortAdministrativeMonitor->[doAct->[forceSetSlaveAgentPort,getExpectedPort],isActivated->[get,getSlaveAgentPortInitialValue],getExpectedPort->[getSlaveAgentPortInitialValue]],setSecurityRealm->[get],getItems->[getItems,add],doCheckViewName->[getView,checkGoodName],removeNode->[removeNode],getSelfLabel->[getLabelAtom],fireBeforeShutdown->[all,add],doSimulateOutOfMemory->[add],restartableLifecycle->[get],expandVariablesForDirectory->[expandVariablesForDirectory,getFullName],_getFingerprint->[get],getManagementLinks->[all],addView->[addView],getPlugins->[getPlugin,getPlugins,add],save->[getConfigFile],getPrimaryView->[getPrimaryView],makeSearchIndex->[get->[getView],makeSearchIndex,add],getNodes->[getNodes],lookup->[get,getInstanceOrNull],getLegacyInstanceId->[getSecretKey],_cleanUpShutdownUDPBroadcast->[add],saveQuietly->[save],getLifecycle->[get],getInstanceOrNull->[getInstance],executeReactor->[containsLinkageError->[containsLinkageError],runTask->[runTask]],setNodes->[setNodes],loadTasks->[run->[setSecurityRealm,getExtensionList,getNodes,setNodes,remove,add,loadConfig],add],remove->[remove],getDescriptorOrDie->[getDescriptor],getLabelAtoms->[add],getItemByFullName->[getItemByFullName,getItem],doCreateView->[addView],getExtensionList->[get,getExtensionList],getLabels->[add],restart->[restartableLifecycle],isNameUnique->[getItem],getWorkspaceFor->[all],_cleanUpShutdownPluginManager->[add],getRootDirFor->[getRootDirFor,getRootDir],canDelete->[canDelete],getInstance->[getInstanceOrNull],getFingerprint->[get],getAuthentication->[getAuthentication],doScriptText->[getView,getACL],getDynamic->[getActions],_cleanUpPluginServletFilters->[cleanUp,add],_cleanUpShutdownTriggers->[add],addNode->[addNode],getTopLevelItemNames->[add],MasterRestartNotifyier->[onRestart->[all]],doQuietDown->[doQuietDown],safeRestart->[restartableLifecycle],updateComputerList->[updateComputerList],rebuildDependencyGraphAsync->[call->[get,rebuildDependencyGraph]],getConfiguredRootUrl->[get],_cleanUpAwaitDisconnects->[get,add],readResolve->[getSlaveAgentPortInitialValue],getName,get]]
Configures the configuration of all the descriptors in the system.
Taking how the code below is implemented, it is likely to expose some Admin-only configuration sections to users with `Manage` permissions. Hiding controls from frontend is fine, but IIUC the manually prepared form submission would pass here.
@@ -39,6 +39,16 @@ public class LdapPasswordManagementService extends BasePasswordManagementService final PasswordHistoryService passwordHistoryService) { super(passwordManagementProperties, cipherExecutor, issuer, passwordHistoryService); this.ldapProperties = passwordManagementProperties.getLdap(); + this.ldapProperties.forEach(ldap -> { + this.connectionFactoryMap.put(ldap, LdapUtils.newLdaptiveConnectionFactory(ldap)); + }); + } + + @Override + public void destroy() { + this.connectionFactoryMap.forEach((ldap, connectionFactory) -> { + connectionFactory.close(); + }); } @Override
[LdapPasswordManagementService->[findEmail->[isValid,debug,of,getAttributeName,warn,findAttribute],changeInternal->[getMessage,error,toList,collect,allMatch],findUsername->[toList,collect,findAttribute],getSecurityQuestions->[getAttribute,getPageSize,debug,getEntry,isNotBlank,newLdaptiveConnectionFactory,getSearchFilter,getBaseDn,wrap,executeSearchOperation,forEach,getSecurityQuestionsAttributes,getStringValue,consumer,put,containsResultEntry,newLdaptiveSearchFilter],findAttribute->[getMessage,error,orElse],findPhone->[getAttributeName,of,findAttribute],getLdap]]
Find email.
Pass the connection factory map as a ctor argument; build the map inside the bean
@@ -98,15 +98,14 @@ var _ = Describe("validator", func() { Expect(err).To(BeForbiddenError()) }) - It("should disallow seed deletion because it is still referenced by a backupbucket", func() { + It("should allow seed deletion even though it is still referenced by a backupbucket (will be cleaned up during Seed reconciliation)", func() { backupBucket.Spec.SeedName = &seedName Expect(coreInformerFactory.Core().InternalVersion().BackupBuckets().Informer().GetStore().Add(&backupBucket)).To(Succeed()) attrs := admission.NewAttributesRecord(&seed, nil, core.Kind("Seed").WithVersion("version"), "", seed.Name, core.Resource("seeds").WithVersion("version"), "", admission.Delete, &metav1.DeleteOptions{}, false, nil) err := admissionHandler.Validate(context.TODO(), attrs, nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(BeForbiddenError()) + Expect(err).ToNot(HaveOccurred()) }) It("should disallow seed deletion because shoot migration is yet not finished", func() {
[DeepCopy,Resource,StringPtr,AssignReadyFunc,Shoots,Add,ValidateInitialization,Validate,BackupBuckets,To,Informer,Handles,GetStore,NewAttributesRecord,NewSharedInformerFactory,NotTo,Registered,NewPlugins,SetInternalCoreInformerFactory,Kind,ToNot,Core,InternalVersion,TODO,WithVersion]
BeforeEach sets up the necessary fields for the Hugo API. Admit - checks for missing seed name.
Can you keep this test case and rather invert it? E.g.: `should allow seed deletion if it is referenced by BackupBucket (will get cleaned up after Seed deletion)` Though, it wouldn't really test concrete part of the implementation now, I think it still makes sense to have such tests, that will pop up when someone in the future might want to change the logic. Then they at least have to change the test, but it wouldn't go unnoticed, that there was a concrete reason for doing it the way it is currently implemented.
@@ -284,7 +284,8 @@ public class KsqlResource implements KsqlConfigurable { configProperties, localHost, localUrl, - requestConfig.getBoolean(KsqlRequestConfig.KSQL_REQUEST_INTERNAL_REQUEST) + requestConfig.getBoolean(KsqlRequestConfig.KSQL_REQUEST_INTERNAL_REQUEST), + request.getSessionVariables() ), request.getKsql() );
[KsqlResource->[addCommandRunnerWarning->[singletonList,equals,get,updateWarnings,KsqlWarning],shouldSynchronize->[containsKey,contains],terminateCluster->[getStreamsProperties,ok,serverErrorForStatement,ensureValidPatterns,getDeleteTopicList,KsqlEntityList,SessionProperties,info,throwIfNotConfigured,validateAll,execute],ensureValidPatterns->[forEach,KsqlRestException,badRequest,compile],isValidProperty->[badRequest,ok,put,info,generateResponse,validateAll],handleKsqlStatements->[getSqlStatement,create,serverErrorForStatement,getServiceContext,updateLastRequestTime,SessionProperties,info,throwIfNotConfigured,validateAll,addCommandRunnerWarning,httpWaitForCommandSequenceNumber,getSessionVariables,getRequestProperties,KsqlRequestConfig,badRequest,getConfigOverrides,ok,parse,getKsql,generateResponse,execute,getCommandQueue,validate,getBoolean,getRawMessage,badStatement],configure->[IllegalArgumentException,port,URL,IllegalStateException,host,getCommandQueue,get,DefaultCommandQueueSync,RequestHandler,KsqlHostInfo,ValidatedCommandFactory,RequestValidator,DistributingExecutor,containsKey,parseHostInfo],throwIfNotConfigured->[KsqlRestException,notReady],parse,getLogger,build,requireNonNull]]
Handles Ksql statements. server error for statement.
Yikes, thanks for finding and fixing this bug! Unless SessionProperties (or any of its params) are mutable, it'd be good to pull out the creation of the SessionProperties to be shared between the validate() and execute() calls in this method, in order to guard against similar bugs in the future.
@@ -227,6 +227,12 @@ class BeamSearch: gather(1, expanded_backpointer).\ reshape(batch_size * self.beam_size, *last_dims) + if not torch.isfinite(last_log_probabilities).all(): + warnings.warn("Infinite log probabilities encountered. Some final sequences may not make sense. " + "This can happen when the beam size is larger than the number of valid (non-zero " + "probability) transitions that the step function produces.", + RuntimeWarning) + # Reconstruct the sequences. # shape: [(batch_size, beam_size, 1)] reconstructed_predictions = [predictions[-1].unsqueeze(2)]
[BeamSearch->[search->[,step,size,topk,where,reversed,gather,view,ConfigurationError,cat,range,predictions,list,append,len,items,float,reshape,new_full,unsqueeze]]]
This function applies a beam search to find the most likely target sequences. Stores the index n for the parent prediction and the log probabilities for the last time step. Find all classes in the last beam that have predicted the end token in and then replace Topk method for the base class.
Hmm, this is a bit unsatisfying, but I guess filtering out the impossible results thereby changing the shape of the returned tensor might be a worse fix. Could you add to the docstring that it's the responsibility of the caller to check the returned probabilities for invalid results?
@@ -337,7 +337,7 @@ func main() { coordinatorAddress := trans.String("coordinator-address", "", "coordinator address") subID := trans.Int64("subID", 0, "subID") to := trans.String("to", "", "to") - panicErr(trans.Parse(os.Args[2:])) + parseArgs(trans, os.Args[2:], []string{"coordinator-address", "to"}) coordinator, err := vrf_coordinator_v2.NewVRFCoordinatorV2(common.HexToAddress(*coordinatorAddress), ec) panicErr(err) tx, err := coordinator.RequestSubscriptionOwnerTransfer(owner, uint64(*subID), common.HexToAddress(*to))
[CreateSubscription,SRandomWords,Hash,SetConfig,NewVRFSingleConsumerExample,NewKeyedTransactorWithChainID,DeployVRFExternalSubOwnerExample,DeployVRFCoordinatorV2,ScalarBaseMult,LookupEnv,TransferAndCall,BalanceOf,Exit,UnmarshalPubkey,RequestRandomWords,TopUpSubscription,Dial,HexToHash,CancelSubscription,ParseInt,Bytes,Address,NewLinkToken,SRequestId,OwnerCancelSubscription,NewVRFCoordinatorV2,SRequestConfig,Uint64,DeployVRFSingleConsumerExample,RegisterProvingKey,RequestSubscriptionOwnerTransfer,DecodeString,Int64,NewFlagSet,Subscribe,FundAndRequestRandomWords,WatchSubscriptionFunded,Printf,HexToAddress,Println,GetSubscription,Sprintf,Unsubscribe,GenericEncode,Background,SuggestGasPrice,AcceptSubscriptionOwnerTransfer,NewVRFExternalSubOwnerExample,S256,NewInt,SetBytes,String,Parse,WatchSubscriptionCreated,Uint,SetString,AddConsumer]
This function is used to request random words from a specific key hash. Accepts an incoming subscription owner transfer.
Should subID be required here too?
@@ -488,7 +488,7 @@ class Installer $links = $this->package->getRequires(); } - foreach ($links as $link) { + foreach ($links as $link) { $request->install($link->getTarget(), $link->getConstraint()); } }
[Installer->[whitelistUpdateDependencies->[packageNameToRegexp],disablePlugins->[disablePlugins]]]
Installs the package. Checks if packages that are not installed or installed are not fixed to a version in lock. Checks if the current package is not installed in the lock file and if so installs it. Finds a node - level package that can be installed and install it if it can.
you should remove these trailing whitespaces
@@ -1012,9 +1012,10 @@ func (f5 *f5LTM) CreatePool(poolname string) error { // From @Miciah: In the future, we should allow the administrator // to specify a different monitor to use. payload := f5Pool{ - Mode: "round-robin", - Monitor: "min 1 of /Common/http /Common/https", - Name: poolname, + Mode: "round-robin", + Monitor: "min 1 of /Common/http /Common/https", + Partition: f5.partitionPath, + Name: poolname, } err := f5.post(url, payload, nil)
[restRequestPayload->[restRequest],ensureVserverHasIRule->[get,patch],InsecureRouteExists->[routeExists],DeletePassthroughRoute->[getPassthroughRoutes,updatePassthroughRoutes],deleteRoute->[delete],associateClientSslProfileWithVserver->[post],createClientSslProfile->[post],get->[restRequest],uploadCert->[post,buildSshArgs],uploadKey->[post,buildSshArgs],ensurePolicyExists->[post,get],getRoutes->[get],AddPassthroughRoute->[getPassthroughRoutes,updatePassthroughRoutes],addPartitionPath->[post],patch->[restRequestPayload],AddVtep->[ensurePartitionPathExists,post],GetPoolMembers->[get],DeletePoolMember->[delete,PoolHasMember],routeExists->[getRoutes],ensureVxLANTunnel->[post],checkPartitionPathExists->[get],DeleteSecureRoute->[deleteRoute],ensureIRuleExists->[post,get],delete->[restRequest],PoolHasMember->[GetPoolMembers],RemoveVtep->[ensurePartitionPathExists,delete],ensureDatagroupExists->[post,get],PoolExists->[GetPoolMembers],SecureRouteExists->[routeExists],addRoute->[post,getRoutes],deleteCertParts->[delete],associateServerSslProfileWithVserver->[post],ensureVserverHasPolicy->[post,get],getPassthroughRoutes->[get],AddSecureRoute->[addRoute],ensurePartitionPathExists->[checkPartitionPathExists,addPartitionPath],createServerSslProfile->[post],Initialize->[ensureVserverHasIRule,ensureVserverHasPolicy,ensurePartitionPathExists,ensurePolicyExists,ensureVxLANTunnel,ensureIRuleExists,ensureDatagroupExists],updatePassthroughRoutes->[getPassthroughRoutes,patch],CreatePool->[post],DeleteInsecureRoute->[deleteRoute],AddPoolMember->[GetPoolMembers,post,PoolHasMember],post->[restRequestPayload],AddInsecureRoute->[addRoute],DeletePool->[delete]]
CreatePool creates a new pool with the given name.
fmt.Sprintf("min 1 of /%s/http /%s/https", f5.partionPath, f5.partionPath)?
@@ -87,7 +87,7 @@ class User < ActiveRecord::Base has_many :created_tags, class_name: 'Tag', foreign_key: 'created_by_id' has_many :modified_tags, class_name: 'Tag', foreign_key: 'last_modified_by_id' has_many :assigned_user_my_modules, class_name: 'UserMyModule', foreign_key: 'assigned_by_id' - has_many :assigned_user_organizations, class_name: 'UserOrganization', foreign_key: 'assigned_by_id' + has_many :assigned_user_teams, class_name: 'UserTeam', foreign_key: 'assigned_by_id' has_many :assigned_user_projects, class_name: 'UserProject', foreign_key: 'assigned_by_id' has_many :added_protocols, class_name: 'Protocol', foreign_key: 'added_by_id', inverse_of: :added_by has_many :archived_protocols, class_name: 'Protocol', foreign_key: 'archived_by_id', inverse_of: :archived_by
[User->[search->[all,distinct,where,present?,not,id],confirmation_required?->[enable_email_confirmations],projects_by_orgs->[organization,group_by,joins],organizations_ids->[pluck],statistics->[find_each,count],time_zone_check->[add,nil?],active?->[present?],filter_paperclip_errors->[key?,set,join,each,clear,delete],destroy_notifications->[in_groups_of,pluck,destroy_all],last_activities->[uniq],empty_avatar->[avatar_file_size,to_i,avatar_content_type,avatar_file_name,last,mime_type],active_status_str->[t,active?],megabytes,auto_strip_attributes,include,enum,after_validation,validate,before_destroy,validates,has_many,validates_attachment,has_attached_file,devise]]
Has many fields for the same name as the class name. The base class for all the related protocol objects.
Line is too long. [86/80]
@@ -31,8 +31,9 @@ requests = [ workspace= os.environ['LOG_WORKSPACE_ID'] ), LogsQueryRequest( - query= "AppRequests | take 2", - workspace= os.environ['LOG_WORKSPACE_ID'] + query= "AppRequests", + workspace= os.environ['LOG_WORKSPACE_ID'], + include_statistics=True ), ] response = client.batch_query(requests)
[ClientSecretCredential,LogsQueryRequest,datetime,print,LogsQueryClient,batch_query,DataFrame]
send_batch_query - Sends a batch query to the Azure Logs API to retrieve.
should sample also have `include_render`, `server_timeout` or nah?
@@ -66,7 +66,11 @@ Rails.application.routes.draw do end resources :reactions, only: [:update] resources :response_templates, only: %i[index new edit create update destroy] - resources :chat_channels, only: %i[index create update] + resources :chat_channels, only: %i[index create update destroy] do + member do + delete :remove_user + end + end resources :reports, only: %i[index show], controller: "feedback_messages" do collection do post "send_email"
[new,authenticate,authenticated,redirect,devise_scope,mount,put,draw,freeze,has_role?,resources,member,root,use,scope,post,set,controllers,require,secrets,class_eval,use_doorkeeper,resource,patch,devise_for,each,production?,delete,namespace,collection,get,session_options,tech_admin?,app]
View of the user s category. send_email - Sends email to all organization members.
I would remove the `destroy` action from the array since you're using the `remove_user` action now. :)
@@ -273,6 +273,12 @@ define([ node = new Node(ExpressionNodeType.CONDITIONAL, '?', left, right, test); } else if (ast.type === 'MemberExpression') { node = parseMemberExpression(expression, ast); + } else if (ast.type === 'ArrayExpression') { + var val = []; + for (var i=0; i < ast.elements.length; i++) { + val[i] = createRuntimeAst(expression, ast.elements[i]); + } + node = new Node(ExpressionNodeType.ARRAY, val); } //>>includeStart('debug', pragmas.debug); else if (ast.type === 'CompoundExpression') {
[No CFG could be retrieved]
Creates a node which represents the next node in the tree. finds the correct evaluation function for a node.
Minor whitespace. Throughout this file and in general, use something like: `for (var i = 0;`
@@ -378,6 +378,14 @@ uint8_t calculate_clut_haldclut(char *filepath, float **clut) return 0; } level *= level; // to be equivalent to cube level + if(level > 256) + { + fprintf(stderr, "[lut3d] error - LUT 3D size %d > 256\n", level); + dt_control_log(_("error - LUT 3D size %d > 256"), level); + fclose(png.f); + png_destroy_read_struct(&png.png_ptr, &png.info_ptr, NULL); + return 0; + } const size_t buf_size = (size_t)png.height * png_get_rowbytes(png.png_ptr, png.info_ptr); dt_print(DT_DEBUG_DEV, "[lut3d] allocating %zu bytes for png file\n", buf_size); uint8_t *buf = NULL;
[No CFG could be retrieved]
read png header and return 0 if found finds the next non - zero value in the image and allocates the buffer for the.
This breaks translation... Maybe we can wait for 3.0.1. Also as this is a user's message I would just wrote: "Lut 3D size exceed the maximum supported" The fprintf will tell what is the issue to developers.
@@ -88,6 +88,10 @@ def all_inputs( """ if duration_only(state, inputs, parameters) is False: return False + elif getattr(state, "hashed_inputs", None) == { + key: tokenize(val) for key, val in inputs.items() + }: + return True elif {key: res.value for key, res in state.cached_inputs.items()} == inputs: return True else:
[partial_inputs_only->[_partial_inputs_only->[duration_only]],all_inputs->[duration_only],partial_parameters_only->[_partial_parameters_only->[duration_only]],all_parameters->[duration_only]]
Checks if the cache based on cache expiration _and_ all inputs that were provided on the.
You probably only want to call `tokenize` (which has a cost) if the `hashed_inputs` attribute is present and contains values. Right now `tokenize` is always called, even if unneeded.
@@ -46,6 +46,9 @@ var ( AtomicTransport = "atomic" // DefaultTransport is a prefix that we apply to an image name DefaultTransport = DockerTransport + // DefaultLocalRepo is the default local repository for local image operations + // Remote pulls will still use defined registries + DefaultLocalRepo = "localhost" ) type pullStruct struct {
[getPullListFromRef->[getPullStruct],pullImage->[getPullListFromRef]]
getPullStruct returns a pullStruct that pulls the image from the given image reference to getPullListFromRef returns a list of pullStruct that is the same as the pull.
"registry" (a server providing an API, roughly ~ a hostname) is not the same thing as a "repository" (a namespace within a single registry, one which can contain images identified by tags and digests only, e.g. `example.com/namespace/rhel`) are not the same thing. An initial `localhost/` is typically interpreted as a registry.
@@ -338,7 +338,7 @@ describe GeneralController, 'when using xapian search' do it "should filter results based on end of URL being 'requests'" do get :search, params: { :combined => "bob/requests" } - expect(assigns[:xapian_requests].results.map{|x|x[:model]}).to match_array([ + expect(assigns[:xapian_requests].results.map { |x|x[:model] }).to match_array([ info_request_events(:useless_outgoing_message_event), info_request_events(:silly_outgoing_message_event), info_request_events(:useful_incoming_message_event),
[create,describe,public_bodies,match_array,load_default,first,it,name,map,env,to,of,save!,before,info_request_events,post,double,symbolize_keys,require,to_s,include,raise_error,have_http_status,dirname,mock_model,receive,hexdigest,slice!,now,hours,match,id,words_to_highlight,have_css,load_file_fixture,redirect_to,context,with_default_locale,email_confirmed,get,not_to,eq,users,to_return,render_template,find_each,and_return,expand_path]
expects that xapian_users and xapian_requests are set to results should filter results based on end of URL being bodies and should prioritise direct matches of public.
Line is too long. [83/80]
@@ -35,6 +35,8 @@ class Gatk(Package): homepage = "https://software.broadinstitute.org/gatk/" url = "https://github.com/broadinstitute/gatk/releases/download/4.0.4.0/gatk-4.0.4.0.zip" + version('4.0.8.1', sha256='6d47463dfd8c16ffae82fd29e4e73503e5b7cd0fcc6fea2ed50ee3760dd9acd9', + url='https://github.com/broadinstitute/gatk/archive/4.0.8.1.tar.gz') version('4.0.4.0', '083d655883fb251e837eb2458141fc2b', url="https://github.com/broadinstitute/gatk/releases/download/4.0.4.0/gatk-4.0.4.0.zip") version('3.8-0', '0581308d2a25f10d11d3dfd0d6e4d28e', extension='tar.gz',
[Gatk->[install->[install,join_path,set_executable,mkdirp,satisfies,dirname,glob,filter_file,match],setup_environment->[prepend_path,join_path],depends_on,version]]
Creates an object that can be used to provide a high throughput sequence of variants in Package - level functions for installing the n - tuple of objects.
technically you don't need the `sha256=` spack infers the type from the hash length
@@ -0,0 +1,14 @@ +class RepositoryRow < ActiveRecord::Base + belongs_to :repository + belongs_to :team + belongs_to :created_by, foreign_key: :created_by_id, class_name: 'User' + has_many :repository_cells, dependent: :destroy + has_many :repository_columns, through: :repository_cells + + auto_strip_attributes :name, nullify: false + validates :name, + presence: true, + length: { maximum: Constants::NAME_MAX_LENGTH } + validates :team, presence: true + validates :created_by, presence: true +end
[No CFG could be retrieved]
No Summary Found.
Do we need `belongs_to :team`? I mean, you can get this from the repository? Is there any optimization benefit from this?
@@ -346,10 +346,9 @@ static bool xshm_server_changed(obs_properties_t *props, obs_property_t *p, bool randr = randr_is_active(xcb); bool xinerama = xinerama_is_active(xcb); int_fast32_t count = - (randr) ? randr_screen_count(xcb) - : (xinerama) - ? xinerama_screen_count(xcb) - : xcb_setup_roots_length(xcb_get_setup(xcb)); + randr ? randr_screen_count(xcb) + : (xinerama ? xinerama_screen_count(xcb) + : xcb_setup_roots_length(xcb_get_setup(xcb))); for (int_fast32_t i = 0; i < count; ++i) { char *name;
[bool->[xcb_get_setup,UNUSED_PARAMETER,xcb_connect,dstr_free,dstr_printf,obs_property_list_item_disable,obs_properties_get,obs_property_modified,obs_property_set_visible,obs_data_get_int,obs_data_get_bool,dstr_init,randr_is_active,xcb_get_extension_data,xinerama_screen_geo,blog,obs_data_get_string,randr_screen_count,xcb_setup_roots_length,xcb_disconnect,sprintf,obs_property_list_clear,xinerama_screen_count,xcb_connection_has_error,obs_property_list_add_int,free,x11_screen_geo,xinerama_is_active,randr_screen_geo,obs_property_set_enabled],obs_properties_t->[obs_property_modified,obs_properties_create,obs_module_text,obs_data_release,obs_properties_add_list,obs_source_get_settings,obs_properties_add_bool,obs_property_set_modified_callback,XSHM_DATA,obs_properties_add_text,obs_properties_add_int],uint32_t->[XSHM_DATA],const->[UNUSED_PARAMETER,obs_module_text],inline->[gs_texture_destroy,gs_texture_create],int_fast32_t->[x11_screen_geo,blog,xinerama_screen_geo,randr_screen_geo,xcb_get_screen],void->[gs_texture_set_image,UNUSED_PARAMETER,obs_get_base_effect,xcb_connect,bstrdup,gs_effect_get_param_by_name,xcb_shm_get_image_reply,xshm_xcb_detach,xcb_xcursor_init,XSHM_DATA,xcb_xfixes_get_cursor_image_reply,gs_enable_framebuffer_srgb,obs_data_set_default_bool,obs_data_get_int,xcb_xcursor_offset,xshm_xcb_attach,xshm_update_geometry,obs_data_get_bool,xcb_xfixes_get_cursor_image_unchecked,xcb_shm_get_image_unchecked,xcb_xcursor_update,obs_leave_graphics,randr_is_active,xcb_xcursor_render,blog,xshm_capture_start,obs_data_get_string,bzalloc,obs_enter_graphics,xshm_update,gs_get_linear_srgb,gs_texture_destroy,xcb_disconnect,gs_effect_loop,gs_draw_sprite,xinerama_is_active,xcb_connection_has_error,gs_effect_set_texture_srgb,free,xshm_resize_texture,obs_data_set_default_int,gs_framebuffer_srgb_enabled,xcb_xcursor_destroy,bfree,obs_source_showing,xshm_capture_stop,gs_effect_set_texture,xshm_check_extensions]]
called when an xshm server changes This function handles the missing screen feature.
This change seems unrelated. Was there some sort of compiler warning? Keep in mind that CI uses clang-format 10 at the moment.
@@ -619,12 +619,17 @@ public class ServerModel extends Observable implements IConnectionChangeListener if (gameToLobbyConnection != null && lobbyWatcherThread != null) { lobbyWatcherThread .getGameId() - .ifPresent(gameId -> connectionAndGameIdAction.accept(gameToLobbyConnection, gameId)); + .ifPresent( + gameId -> + new Thread(() -> connectionAndGameIdAction.accept(gameToLobbyConnection, gameId)) + .start()); } } @Override public void connectionRemoved(final INode node) { + notifyLobby( + (lobbyConnection, gameId) -> lobbyConnection.playerLeft(gameId, node.getPlayerName())); if (removeConnectionsLatch != null) { Interruptibles.await(() -> removeConnectionsLatch.await(6, TimeUnit.SECONDS)); }
[ServerModel->[connectionRemoved->[takePlayerInternal,notifyLobby],setPlayerEnabled->[takePlayerInternal],disablePlayer->[setPlayerEnabled],getServerProps->[cancel],createServerMessenger->[cancel,gameDataChanged],getAvailableGames->[getAvailableGames],enablePlayer->[setPlayerEnabled],getLauncher->[disallowRemoveConnections,getPlayerListingInternal],newGame->[notifyChannelPlayersChanged],releasePlayer->[takePlayerInternal],takePlayer->[takePlayerInternal]]]
Notify the lobby that a connection has been lost.
Made the lobby notification non-blocking and now runs on a background thread.
@@ -83,6 +83,12 @@ public class HivePageSourceProvider @Override public ConnectorPageSource createPageSource(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<ColumnHandle> columns) + { + return createPageSource(transaction, session, split, table, columns, TupleDomain.all()); + } + + @Override + public ConnectorPageSource createPageSource(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<ColumnHandle> columns, TupleDomain<ColumnHandle> dynamicFilter) { HiveTableHandle hiveTable = (HiveTableHandle) table;
[HivePageSourceProvider->[createHivePageSource->[createPageSource],ColumnMapping->[buildColumnMappings->[regular,prefilled,interim],regular->[ColumnMapping],prefilled->[ColumnMapping],interim->[ColumnMapping]]]]
Creates a page source for the given table and split.
I think `TestHiveDistributedJoinQueries` should be tested with dynamic filtering on and off
@@ -654,7 +654,7 @@ if (! empty($conf->expensereport->enabled)) if (! empty($date_start) && ! empty($date_end)) $sql.= " AND $column >= '".$db->idate($date_start)."' AND $column <= '".$db->idate($date_end)."'"; - $sql.= " GROUP BY p.rowid, p.ref, u.firstname, u.lastname, dm"; + $sql.= " GROUP BY u.rowid"; $sql.= " ORDER BY p.ref"; dol_syslog("get expense report outcome");
[free,query,executeHooks,trans,num_rows,fetch_object,initHooks,close,idate,load,select_date]
Get the list of all non - free non - duplicate non - duplicate non - duplicate non Get the list of payment salaries that are part of the total number of payments.
A lot of field are missing in the group by. This will make error with postgresql. We should find same fields than into select except sum()
@@ -420,6 +420,16 @@ func (c *Create) ProcessBridgeNetwork() error { if err != nil { return cli.NewExitError(fmt.Sprintf("Error parsing bridge network ip range: %s. Range must be in CIDR format, e.g., 172.16.0.0/12", err), 1) } + width, err := strconv.Atoi(c.BridgeNetworkWidth) + if err != nil || width > 30 { + return cli.NewExitError(fmt.Sprintf("Error parsing bridge network width: %s. Width must be an integer less than 31.", err), 1) + } + s, _ := c.Data.BridgeIPRange.Mask.Size() + if width < s { + return cli.NewExitError(fmt.Sprintf("Error specifying bridge network width: %d. Width must be larger than prefix of bridge network ip range.", width), 1) + } + w := net.CIDRMask(width, 32) + c.Data.BridgeNetworkWidth = &w return nil }
[Run->[ProcessParams,Run,logArguments],Flags->[Flags],logArguments->[SetFields]]
ProcessBridgeNetwork processes the bridge network.
Better to move validation codes into validator.network() api, so rest apis will share the same validation codes.
@@ -75,8 +75,8 @@ public class AvroUtils { static { // This works around a bug in the Avro library (AVRO-1891) around SpecificRecord's handling // of DateTime types. - SpecificData.get().addLogicalTypeConversion(new TimeConversions.TimestampConversion()); - GenericData.get().addLogicalTypeConversion(new TimeConversions.TimestampConversion()); + SpecificData.get().addLogicalTypeConversion(new JodaTimeConversions.TimestampConversion()); + GenericData.get().addLogicalTypeConversion(new JodaTimeConversions.TimestampConversion()); } // Unwrap an AVRO schema into the base type an whether it is nullable.
[AvroUtils->[getGenericRecordToRowFunction->[toBeamRowStrict],convertArrayStrict->[convertAvroFieldStrict],AvroSpecificRecordFieldValueTypeSupplier->[get->[get]],getRowToGenericRecordFunction->[toGenericRecord],convertMapStrict->[convertAvroFieldStrict,convertStringStrict],convertAvroFieldStrict->[TypeWithNullability],getToRowFunction->[toBeamSchema],genericFromBeamField->[fromBeamFieldType,genericFromBeamField,getSize,TypeWithNullability,toGenericRecord],getGetters->[AvroPojoFieldValueTypeSupplier,AvroSpecificRecordFieldValueTypeSupplier,getGetters],FixedBytesField->[withSize->[FixedBytesField],fromAvroType->[FixedBytesField],fromBeamFieldType->[FixedBytesField]],schemaCoder->[getToRowFunction,schemaCoder,getSchema,getFromRowFunction],getFieldTypes->[getFieldTypes,AvroPojoFieldValueTypeSupplier,AvroSpecificRecordFieldValueTypeSupplier],getSchema->[toBeamSchema],convertBytesStrict->[get],toFieldType->[toFieldType,TypeWithNullability,toBeamType,toBeamSchema],convertRecordStrict->[toBeamRowStrict],toAvroSchema->[toAvroField,toAvroSchema],getFieldSchema->[toAvroSchema,toAvroType,getFieldSchema,fromBeamFieldType],getCreator->[getCreator,AvroPojoFieldValueTypeSupplier],toBeamField->[TypeWithNullability],toBeamSchema->[toBeamField],toGenericRecord->[toAvroSchema],toBeamRowStrict->[toBeamSchema]]]
Imports a specific type from the Java schema. missing fields in AVRO are not supported in AVRO.
This is necessary because `TimeConversions` converts to `java.time` now? Maybe we need to consider adding support for `java.time` in `AvroSchemaProvider` now (not for this PR, of course)
@@ -81,10 +81,15 @@ public class ObjectToJsonTransformer extends AbstractTransformer { if (headers.containsKey(MessageHeaders.CONTENT_TYPE)) { if (this.contentTypeExplicitlySet){ // override - headers.put(MessageHeaders.CONTENT_TYPE, this.contentType); + if (StringUtils.hasLength(this.contentType)) { + headers.put(MessageHeaders.CONTENT_TYPE, this.contentType); + } + else { + headers.remove(MessageHeaders.CONTENT_TYPE); + } } } - else { + else if (StringUtils.hasLength(this.contentType)) { headers.put(MessageHeaders.CONTENT_TYPE, this.contentType); } messageBuilder.copyHeaders(headers);
[ObjectToJsonTransformer->[doTransform->[transformPayload]]]
This method is overridden to transform a message.
mix of concerns... It was `header-enricher` before. And now it is `header-filter` too ;-).
@@ -40,6 +40,10 @@ const ( // This file will be ignored when copying from the template cache to // a project directory. pulumiTemplateManifestFile = ".pulumi.template.yaml" + + // pulumiLocalTemplatePathEnvVar is a path to the folder where template are stored. + // It is used in sandboxed environments where the classic template folder may not be writable. + pulumiLocalTemplatePathEnvVar = "PULUMI_TEMPLATE_PATH" ) // Template represents a project template.
[CopyTemplateFiles->[ReadFile,IsDir,Mkdir,Base,IsExist],CopyTemplateFilesDryRun->[Base,Stat,IsDir],RemoveAll,FileMode,Wrap,IsNotExist,Copy,IgnoreClose,Stat,ReadFile,New,ReadDir,Errorf,Assert,Wrapf,Join,Current,Next,Name,Base,Require,MkdirAll,Write,IsDir,NewReader,Sprintf,Unmarshal,IsPackageName,Replace,OpenFile,IsExist]
LoadLocalTemplate loads a template from a local directory. returns the template with the name of the template in the templateDir.
Nit: template => templates (e.g. "... where template**s** are stored.")
@@ -37,7 +37,7 @@ namespace Dynamo.UI //public static readonly double Minimum // Generic Constants - public static readonly double PortHeightInPixels = 26.0; + public static readonly double PortHeightInPixels = 17.563333333333336; // Grid Settings public static readonly int GridSpacing = 100;
[FrozenResources->[FromRgb,Freeze,FromArgb],Configurations->[FromRgb,Light,Normal]]
A class that represents a single node in Dynamo. region Public API Read - only.
Hey @elayabharath, how do you like the new port height?
@@ -114,8 +114,7 @@ class AdminGeneralController < AdminController end # get all the models in the slice, eagerly loading the associations we use in the view public_body_versions = PublicBody.versioned_class. - includes(:public_body => :translations). - find(public_body_version_ids.keys) + find(public_body_version_ids.keys) info_request_events = InfoRequestEvent. includes(:info_request). find(info_request_event_ids.keys)
[AdminGeneralController->[timeline->[create,created_at,find,to_i,keys,size,updated_at,total_entries,replace,each_with_index,each,id],debug->[env],get_date_back_to_utc->[days,getutc,hour,month,week,years,now,day],get_timestamps->[select_rows,table_name,connection],stats->[count],index->[not_embargoed,can?,empty?,defunct?,incoming_messages,any?,select,open,is_searchable,embargoed]]]
Recent events .
Don't know what this is doing. If we need it, then: - tests - commit message explaining why we need the change.