patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -35,11 +35,11 @@ class InvoiceRequest(ModelMutation): @staticmethod def clean_order(order): - if order.status == OrderStatus.DRAFT: + if order.status in (OrderStatus.DRAFT, OrderStatus.UNCONFIRMED): raise ValidationError( { "orderId": ValidationError( - "Cannot request an invoice for draft order.", + "Cannot request an invoice for draft or unconfirmed order.", code=InvoiceErrorCode.INVALID_STATUS, ) }
[InvoiceRequestDelete->[perform_mutation->[InvoiceRequestDelete]],InvoiceRequest->[perform_mutation->[clean_order,InvoiceRequest]],InvoiceSendEmail->[perform_mutation->[InvoiceSendEmail,clean_instance]],InvoiceCreate->[Arguments->[InvoiceCreateInput],perform_mutation->[clean_order,InvoiceCreate,clean_input]],InvoiceUpdate->[Arguments->[UpdateInvoiceInput],perform_mutation->[clean_input,InvoiceUpdate]]]
Checks if order is not draft or billing address.
Maybe we should create some const for this status? For example `UNCONFIRMED_ORDER_STATUS= [ ...`
@@ -89,7 +89,7 @@ def handle_message_refundtransfer(raiden: 'RaidenService', message: RefundTransf ) else: state_change = ReceiveTransferRefund( - routes, + message.sender, from_transfer, )
[on_udp_message->[handle_message_secretrequest,handle_message_mediatedtransfer,handle_message_revealsecret,handle_message_directtransfer,handle_message_secret,handle_message_refundtransfer]]
Handles a refundtransfer message.
Something is wrong here. I just changed the argument to what the constructor expected, but the routes must be given to the state machine, otherwise it doesn't have were to re-route the transfer.
@@ -165,6 +165,7 @@ func (me *Metricset) Transform(tctx *transform.Context) []beat.Event { context := common.MapStr{} if me.Tags != nil { context["tags"] = me.Tags + fields["labels"] = context["tags"] } fields["context"] = tctx.Metadata.Merge(context)
[decodeSamples->[New,Float64,Errorf],Transform->[NewLogger,Warnf,Merge,IsZero,Inc,Put],Prune,MapStr,CreateSchema,TimeEpochMicro,New,TimeRFC3339,NewInt,NewRegistry,decodeSamples]
Transform transforms the metricset into a series of events.
Note that ECS has both the concept of `tags` and `labels`. `tags` are meant to be single words that are present or not (e.g. `truncated`), whereas `labels` is an object in Elasticsearch, where users can store simple key-value pairs like `environment:production`.
@@ -1236,10 +1236,15 @@ class Archiver: msg.append("This repository seems to have no manifest, so we can't tell anything about its " "contents.") else: - msg.append("You requested to completely DELETE the repository *including* all archives it " + if self.output_list: + msg.append("You requested to completely DELETE the repository *including* all archives it " "contains:") - for archive_info in manifest.archives.list(sort_by=['ts']): - msg.append(format_archive(archive_info)) + for archive_info in manifest.archives.list(sort_by=['ts']): + msg.append(format_archive(archive_info)) + else: + msg.append("You requested to completely DELETE the" + "repository *including* %d archives it" + "contains." % len(manifest.archives)) msg.append("Type 'YES' if you understand this and want to continue: ") msg = '\n'.join(msg) if not yes(msg, false_msg="Aborting.", invalid_msg='Invalid answer, aborting.', truish=('YES',),
[main->[get_args,run,Archiver],with_repository->[decorator->[wrapper->[argument]]],Archiver->[do_debug_search_repo_objs->[print_error,print_finding],_export_tar->[item_to_tarinfo->[print_warning,item_content_stream],build_filter,print_warning,build_matcher,item_to_tarinfo],do_prune->[print_error],do_mount->[print_error],do_check->[print_error],do_extract->[build_filter,print_warning,build_matcher],_list_archive->[_list_inner,build_matcher],do_delete->[print_error],run->[_setup_topic_debugging,prerun_checks,get_func,_setup_implied_logging],do_debug_dump_archive->[output->[do_indent],output],do_recreate->[print_error,build_matcher],do_key_export->[print_error],do_benchmark_crud->[measurement_run,test_files],_info_archives->[format_cmdline],_process->[print_file_status,_process,print_warning],do_config->[print_error,list_config],build_parser->[define_archive_filters_group->[add_argument],define_exclusion_group->[define_exclude_and_patterns],define_borg_mount->[define_archive_filters_group,add_argument,define_exclusion_group],add_common_group,define_archive_filters_group,CommonOptions,define_borg_mount,add_argument,process_epilog,define_exclusion_group],do_key_import->[print_error],CommonOptions->[add_common_group->[add_argument->[add_argument]]],do_diff->[build_matcher,print_output,print_warning],do_debug_dump_repo_objs->[decrypt_dump],parse_args->[get_func,resolve,preprocess_args,parse_args,build_parser],do_list->[print_error],do_create->[create_inner->[print_file_status,print_error,print_warning],create_inner],with_repository],main]
Delete a repository.
nitpick: we do not obey 80 chars/line, but (iirc) 120 (see flake8 configuration in setup.cfg or so). so, you could put this into 1 or 2 lines, not 3.
@@ -545,7 +545,7 @@ class NodeControllerFieldsTest extends SuluTestCase $path = $this->sessionManager->getContentPath('sulu_io') . '/' . $title; } if ($parent !== null) { - $path = $parent->getPath(); + $path = $parent->getPath() . '/' . $title; $document->setParent($parent); }
[NodeControllerFieldsTest->[createShadowPage->[createPage]]]
Creates a page.
Check why this was necessary.
@@ -244,4 +244,16 @@ class User extends BaseUser return null !== $this->getContact() ? $this->getContact()->getFullName() : $this->getUsername(); } + + /** + * @VirtualProperty + * @SerializedName("username") + * @Groups({"frontend", "fullUser"}) + * + * @return string + */ + public function getUsername() + { + return parent::getUsername(); + } }
[User->[getFullName->[getContact,getFullName],getRoleObjects->[getUserRoles],getRoles->[getUserRoles]]]
Get the full name of the node.
Why can't we add the annotations to the parent directly? Just calling the parent function and doing nothing else would be unnecessary otheriwse...
@@ -3,8 +3,10 @@ import gevent import rlp from ethereum import slogging from ethereum import _solidity +from ethereum.exceptions import InvalidTransaction from ethereum.transactions import Transaction from ethereum.utils import denoms, int_to_big_endian, encode_hex, normalize_address +<<<<<<< 4c31f51f14f0b23ec72f06c4605e9944fbe60a20 from pyethapp.jsonrpc import ( address_encoder, address_decoder,
[Registry->[assetadded_filter->[Filter,new_filter]],ChannelManager->[channelnew_filter->[Filter,new_filter]],BlockChainService->[next_block->[block_number],manager_by_asset->[asset],deploy_and_register_asset->[deploy_contract],__init__->[patch_send_transaction]],Filter->[changes->[decode_topic]],NettingChannel->[channelsecretrevealed_filter->[Filter,new_filter],channelnewbalance_filter->[Filter,new_filter],channelclosed_filter->[Filter,new_filter],channelsettled_filter->[Filter,new_filter],deposit->[Asset,balance_of,asset_address]]]
Get a single node object from the network. Check if the transaction threw or if it executed properly.
note to self: don't get distracted while rebasing
@@ -78,6 +78,8 @@ type ClientConfig struct { // ACKLastEvent reports the last ACKed event out of a batch of ACKed events only. // Only the events 'Private' field will be reported. ACKLastEvent func(interface{}) + + Tracer *apm.Tracer } // CloseRef allows users to close the client asynchronously.
[No CFG could be retrieved]
WaitClose is only effective if one of the following is configured.
Why do we need another local Tracer?
@@ -431,7 +431,9 @@ class MatrixTransport(Runnable): so that the token is used as a starting point from which messages are fetched from the matrix server. """ - state_change = ActionUpdateTransportSyncToken(f'{self._user_id}/{next_batch}') + state_change = ActionUpdateTransportSyncToken( + f'{self._user_id}/{self._client.api.token}/{next_batch}', + ) self._raiden_service.handle_state_change(state_change) def _spawn(self, func: Callable, *args, **kwargs) -> gevent.Greenlet:
[_RetryQueue->[enqueue->[_expiration_generator,_MessageData],_run->[_check_and_send],enqueue_global->[enqueue],_check_and_send->[message_is_in_queue]],MatrixTransport->[_send_with_retry->[enqueue,_get_retrier],_handle_presence_change->[_spawn,UserPresence],_get_retrier->[start,_RetryQueue],stop->[notify],start->[start],start_health_check->[whitelist],_update_address_presence->[_get_user_presence,notify],_leave_unused_rooms->[leave->[leave],_spawn],_get_user_presence->[UserPresence],_receive_message->[enqueue_global],_validate_userid_signature->[_recover]]]
This function gets called after every sync performed. It is called after every sync performed. It.
This isn't only the sync token anymore. Either we need to rename the state change or add a new one. Also doesn't really make sense IMO to store the access token on each sync. (The same goes for the user id, now that I think about it...).
@@ -198,8 +198,15 @@ def cancel_payment_intent( return payment_intent, None except StripeError as error: logger.warning( - "Unable to cancel a payment intent (%s), error", payment_intent_id + "Unable to cancel a payment intent", + extra={ + "error_message": error.user_message, + "http_status": error.http_status, + "code": error.code, + "payment_intent": payment_intent_id, + }, ) + return None, error
[is_secret_api_key_valid->[stripe_opentracing_trace],refund_payment_intent->[stripe_opentracing_trace],list_customer_payment_methods->[stripe_opentracing_trace],capture_payment_intent->[stripe_opentracing_trace],get_or_create_customer->[stripe_opentracing_trace],create_payment_intent->[stripe_opentracing_trace],subscribe_webhook->[stripe_opentracing_trace],retrieve_payment_intent->[stripe_opentracing_trace],cancel_payment_intent->[stripe_opentracing_trace],delete_webhook->[stripe_opentracing_trace],construct_stripe_event->[stripe_opentracing_trace]]
Cancel a payment intent.
As `extra` data are repeated few times, maybe we should create some function for preparing this?
@@ -3,6 +3,10 @@ require "digest/sha1" class Upload < ActiveRecord::Base + self.ignored_columns = [ + "verified" # TODO(2020-12-10): remove + ] + include ActionView::Helpers::NumberHelper include HasUrl
[Upload->[migrate_to_new_scheme->[migrate_to_new_scheme,generate_digest,rebake_posts_on_old_scheme],consider_for_reuse->[matching_access_control_post?,uploaded_before_secure_media_enabled?],fix_dimensions!->[local?],thumbnail_height->[get_dimension],width->[get_dimension],copied_from_other_post?->[matching_access_control_post?],fix_image_extension->[to_s],short_url_basename->[base62_sha1],sha1_from_base62_encoded->[to_s],thumbnail_width->[get_dimension],short_path->[short_path],base62_sha1->[base62_sha1],get_dimension->[fix_dimensions!],height->[get_dimension]]]
The base class for all the access control post models. The object has a unique ID that can be used to retrieve the image and create a thumbnail.
We should mark this column as readonly first to prevent any writes going to it.
@@ -40,7 +40,8 @@ bootRun { } <%_ if (!skipClient) { _%> -task webpackBuildDev(type: <%= clientPackageManager.charAt(0).toUpperCase() + clientPackageManager.slice(1) %>Task) { +task webpackBuildDev(type: <%= _.upperFirst(clientPackageManager) %>Task, dependsOn: '<%= clientPackageManager %>_install') { + onlyIf { shouldWebpackRun() == true } args = ["run", "webpack:build"] }
[No CFG could be retrieved]
Package - level functions find all bootstrap. yml files that match the bootstrap. yml and include the required resources.
@atomfrede is this good or is there a better way to do this?
@@ -435,6 +435,7 @@ def add_dynamic_theme_tag(ids, **kw): files = addon.current_version.all_files if any('theme' in file_.webext_permissions_list for file_ in files): Tag(tag_text='dynamic theme').save_tag(addon) + index_addons.delay([addon.id]) def extract_strict_compatibility_value_for_addon(addon):
[theme_checksum->[make_checksum],calc_checksum->[make_checksum],rereviewqueuetheme_checksum->[make_checksum],add_static_theme_from_lwt->[_get_lwt_default_author],save_theme_reupload->[rereviewqueuetheme_checksum,save_persona_image],save_theme->[create_persona_preview_images,theme_checksum,save_persona_image],migrate_lwts_to_static_themes->[add_static_theme_from_lwt],bump_appver_for_legacy_addons->[bump_appver_for_addon_if_necessary],delete_preview_files->[delete_preview_files],bump_appver_for_addon_if_necessary->[extract_strict_compatibility_value_for_addon]]
Add a dynamic theme tag to addons with the specified ids.
You can do that after the loop, with `index_addons.delay(ids)`. Fewer tasks triggered that way.
@@ -51,7 +51,7 @@ import static io.netty.util.AsciiString.EMPTY_STRING; import static io.netty.util.internal.ObjectUtil.checkPositive; import static io.netty.util.internal.ThrowableUtil.unknownStackTrace; -final class HpackDecoder { +public final class HpackDecoder { private static final Http2Exception DECODE_ULE_128_DECOMPRESSION_EXCEPTION = unknownStackTrace( Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - decompression failure", Http2Exception.ShutdownHint.HARD_SHUTDOWN), HpackDecoder.class,
[HpackDecoder->[size->[size],Http2HeadersSink->[appendToHeaderList->[validate]],length->[length],setMaxHeaderListSize->[setMaxHeaderListSize],getIndexedHeader->[length],readStringLiteral->[decode],readName->[length],decodeULE128->[decodeULE128],decode->[decode]]]
Imports the class that implements the HpackDecoder interface. - illegal index value - decode exception.
Why make this `public`?
@@ -279,7 +279,6 @@ func TestEmitEvent_Service(t *testing.T) { "host": "192.168.0.1", "id": uid, "provider": UUID, - "port": 8080, "kubernetes": common.MapStr{ "service": common.MapStr{ "name": "metricbeat",
[Equal,NewLogger,UID,After,emit,New,NewV4,NewConfigMapper,NewServiceMetadataGenerator,Fatal,NewConfig,Events,GenerateHints,Run,Subscribe]
TestEmitEvent_Service creates a service that emits events when a service has a specific returns a description of the last non - nil object in the cluster.
Why is the port removed from service events?
@@ -534,10 +534,11 @@ class AmpVideo extends AMP.BaseElement { /** * @param {string} src * @param {?string} type + * @param {?string} bitrate * @return {!Element} source element * @private */ - createSourceElement_(src, type) { + createSourceElement_(src, type, bitrate = null) { const {element} = this; this.getUrlService_().assertHttpsUrl(src, element); const source = element.ownerDocument.createElement('source');
[No CFG could be retrieved]
Create a source element for the given element if there are any cached sources. Gets the first cached source of the current element.
if this parameter is optional: `string=` But from your map it's a `number`?
@@ -1289,7 +1289,7 @@ public abstract class AbstractByteBuf extends ByteBuf { } } - private int forEachByteAsc0(int start, int end, ByteProcessor processor) throws Exception { + protected int forEachByteAsc0(int start, int end, ByteProcessor processor) throws Exception { for (; start < end; ++start) { if (!processor.process(_getByte(start))) { return start;
[AbstractByteBuf->[resetWriterIndex->[writerIndex],setIndex->[checkIndexBounds],resetReaderIndex->[readerIndex],writeShortLE->[ensureWritable0,_setShortLE],writeZero->[ensureWritable,_setLong,_setByte,_setInt],getDouble->[getLong],getUnsignedShortLE->[getShortLE],checkReadableBounds->[readableBytes],asReadOnly->[isReadOnly],readIntLE->[_getIntLE],setBoolean->[setByte],readerIndex->[checkIndexBounds],readLong->[_getLong],writeCharSequence->[setCharSequence0],readShort->[_getShort],writeBytes->[writeBytes,readableBytes,setBytes,readerIndex,ensureWritable,ensureWritable0,checkReadableBounds],maxWritableBytes->[maxCapacity],getBytes->[writableBytes,getBytes,writerIndex],slice->[readableBytes,slice],writeByte->[ensureWritable0,_setByte],checkDstIndex->[checkIndex,checkRangeBounds],readUnsignedInt->[readInt],nioBuffer->[nioBuffer,readableBytes],readLongLE->[_getLongLE],copy->[copy,readableBytes],getMedium->[getUnsignedMedium],readBoolean->[readByte],readBytes->[writableBytes,getBytes,writerIndex,readBytes],writeIntLE->[_setIntLE,ensureWritable0],writeChar->[writeShort],forEachByteDesc0->[_getByte],readUnsignedShort->[readShort],checkIndex0->[checkRangeBounds],readUnsignedShortLE->[readShortLE],setDouble->[setLong],checkIndex->[checkIndex],getUnsignedByte->[getByte],setChar->[setShort],ensureWritable0->[writableBytes],getBoolean->[getByte],writeMediumLE->[ensureWritable0,_setMediumLE],writeDouble->[writeLong],readInt->[_getInt],setZero->[_setLong,_setByte,_setInt],writeInt->[ensureWritable0,_setInt],hashCode->[hashCode],readByte->[_getByte],readChar->[readShort],getChar->[getShort],writeLong->[_setLong,ensureWritable0],writeLongLE->[_setLongLE,ensureWritable0],writeShort->[_setShort,ensureWritable0],order->[order],readFloat->[readInt],getUnsignedIntLE->[getIntLE],nioBuffers->[nioBuffers,readableBytes],readCharSequence->[getCharSequence],bytesBefore->[readableBytes,indexOf,bytesBefore,readerIndex],checkNewCapacity->[maxCapacity],setCharSequence0->[ensureWritable0,getBytes,setBytes],getFloat->[getInt],forEachByteAsc0->[_getByte],indexOf->[indexOf],getUnsignedInt->[getInt],equals->[equals],writerIndex->[checkIndexBounds],readUnsignedMedium->[_getUnsignedMedium],getUnsignedShort->[getShort],readShortLE->[_getShortLE],readUnsignedByte->[readByte],checkSrcIndex->[checkIndex,checkRangeBounds],setFloat->[setInt],readDouble->[readLong],writeFloat->[writeInt],setBytes->[readableBytes,checkReadableBounds,setBytes,readerIndex],writeMedium->[_setMedium,ensureWritable0],ensureWritable->[maxCapacity,writableBytes,writerIndex],readUnsignedIntLE->[readIntLE],getMediumLE->[getUnsignedMediumLE],toString->[toString,readableBytes],readUnsignedMediumLE->[_getUnsignedMediumLE]]]
This method loops through the entire ByteArray in ascending order calling the given ByteProcessor for each.
can we make these package-private for now ?
@@ -304,6 +304,8 @@ Template for BigQuery jobs created by BigQueryIO. This template is: NOTE: This job name template does not have backwards compatibility guarantees. """ BQ_JOB_NAME_TEMPLATE = "beam_bq_job_{job_type}_{job_id}_{step_id}{random}" +"""The number of shards per destination when writing via streaming inserts.""" +DEFAULT_SHARDS_PER_DESTINATION = 500 @deprecated(since='2.11.0', current="bigquery_tools.parse_table_reference")
[RowAsDictJsonCoder->[RowAsDictJsonCoder],BigQueryReader->[BigQueryReader],_JsonToDictCoder->[_decode_with_schema->[_decode_with_schema],_convert_to_tuple->[_convert_to_tuple],decode->[decode]],_StreamToBigQuery->[expand->[InsertIdPrefixFn,BigQueryWriteFn]],WriteToBigQuery->[expand->[_compute_method,_StreamToBigQuery],from_runner_api->[deserialize->[from_runner_api],WriteToBigQuery,deserialize],to_runner_api_parameter->[serialize],__init__->[validate_write,validate_create],display_data->[format]],BigQuerySink->[display_data->[format],schema_as_json->[schema_list_as_object->[schema_list_as_object],schema_list_as_object],writer->[BigQueryWriter],__init__->[RowAsDictJsonCoder,validate_write,split,validate_create]],default_encoder->[default_encoder],_to_bytes->[encode],parse_table_schema_from_json->[parse_table_schema_from_json],BigQueryWrapper->[BigQueryWrapper],_CustomBigQuerySource->[_execute_query->[_get_project],estimate_size->[BigQueryWrapper],split->[_create_source,_get_project,BigQueryWrapper],get_range_tracker->[CustomBigQuerySourceRangeTracker],_export_files->[_get_project],_setup_temporary_dataset->[_get_project]],BigQuerySource->[reader->[BigQueryReader],__init__->[RowAsDictJsonCoder]],BigQueryWriteFn->[_flush_batch->[format],process->[_create_table_if_needed],start_bundle->[_reset_rows_buffer,BigQueryWrapper],_create_table_if_needed->[get_table_schema],get_table_schema->[parse_table_schema_from_json]],BigQueryWriter->[BigQueryWriter],ReadFromBigQuery->[expand->[RemoveJsonFiles,_CustomBigQuerySource,_get_destination_uri,_PassThroughThenCleanup],_validate_gcs_location->[format],_get_destination_uri->[format]]]
Helper function to parse a bigquery table reference.
I believe Java uses 50 shards. Do we need a larger default for Python ?
@@ -569,6 +569,18 @@ def decode_base64_dict(data): array = array.reshape(data['shape']) return array +def encode_base64_string(str, value): + ''' Encode a ascii string using Base64. + + Args: + str : a string to encode + + Returns: + string + + ''' + b64_string = base64.b64encode(value.encode("ascii")) + return b64_string #----------------------------------------------------------------------------- # Dev API #-----------------------------------------------------------------------------
[transform_column_source_data->[transform_series,transform_array,traverse_data],transform_series->[transform_array],serialize_array->[transform_array_to_list,array_encoding_disabled],convert_datetime_type->[convert_date_to_datetime],traverse_data->[traverse_data,transform_array],transform_array->[convert_datetime_array],encode_binary_dict->[make_id]]
Decode a base64 encoded array into a NumPy array.
Why limit strings to ASCII? `utf-8` encoding seems to be a much better choice in my opinion.
@@ -19,9 +19,7 @@ import('classes.issue.Issue'); define('PLN_PLUGIN_NAME','plnplugin'); -define('PLN_PLUGIN_NETWORKS', serialize(array( - 'PKP' => 'pkp-pln.lib.sfu.ca' -))); +define('PLN_PLUGIN_STAGING_SERVER', 'pkp-pln.lib.sfu.ca'); define('PLN_PLUGIN_HTTP_STATUS_OK', 200); define('PLN_PLUGIN_HTTP_STATUS_CREATED', 201);
[PLNPlugin->[callbackJournalArchivingSetup->[getTemplatePath],callbackTemplateDisplay->[getStyleSheet],callbackLoadHandler->[getHandlerPath],termsAgreed->[getSetting],getServiceDocument->[getSetting]]]
Creates a new instance of a PLNPlugin given a list of all available node identifiers. 9. 1. 1 Syncing - > 0x08.
where did it go?
@@ -93,9 +93,11 @@ async function coverageMap() { puppeteer = require('puppeteer'); explore = require('source-map-explorer').explore; - await dist(); + if (!argv.nodist) { + await dist(); + } await startServer( - {host: 'localhost', port: 8000}, + {host: 'localhost', port: serverPort}, {quiet: true}, {compiled: true} );
[No CFG could be retrieved]
Generate a code coverage heat map based on code traversed during puppeteer test.
can we be consistent, i believe on the other commands it will be `nobuild`?
@@ -75,4 +75,10 @@ public class ExpressionColumnValueSelector implements ColumnValueSelector<ExprEv inspector.visit("expression", expression); inspector.visit("bindings", bindings); } + + @Override + public boolean isNull() + { + return getObject() == null; + } }
[ExpressionColumnValueSelector->[getDouble->[asDouble],inspectRuntimeShape->[visit],getLong->[asLong],getFloat->[asDouble],getObject->[eval],checkNotNull]]
Inspects the runtime shape of the managed object.
According to the current way how those methods are implemented, it should be `return false`
@@ -515,6 +515,14 @@ class Select extends React.Component<PropsT, SelectStateT> { } }; + handleHighlightChange = (option?: OptionT) => { + if (option) { + this.setState({transientInputValue: option[this.props.labelKey]}); + } else { + this.setState({transientInputValue: ''}); + } + }; + addValue = (item: OptionT) => { const valueArray = [...this.props.value]; this.setValue(valueArray.concat(item), item, STATE_CHANGE_TYPE.select);
[No CFG could be retrieved]
Select value for a given item Adds a value to the list of items that can be selected.
The selected value rendering can be defined by `getValueLabel` and the final displayed value can be different from the value of the `labelKey` prop. This will create inconsistency in the display value while navigating through options and when one of the options selected.
@@ -813,16 +813,9 @@ class RevealSecret(SignedRetrieableMessage): that must not update the internal channel state. """ - cmdid = messages.REVEALSECRET + cmdid: ClassVar[Optional[int]] = messages.REVEALSECRET - def __init__(self, *, message_identifier: MessageID, secret: Secret, **kwargs): - super().__init__(message_identifier=message_identifier, **kwargs) - self.secret = secret - - def __repr__(self): - return "<{} [msgid:{} secrethash:{} hash:{}]>".format( - self.__class__.__name__, self.message_identifier, pex(self.secrethash), pex(self.hash) - ) + secret: Secret = field(repr=False) @property # type: ignore @cached(_hashes_cache, key=attrgetter("secret"))
[from_dict->[from_dict],LockedTransferBase->[unpack->[Lock],__init__->[assert_transfer_values]],RefundTransfer->[from_event->[Lock],from_dict->[from_dict],to_dict->[to_dict],unpack->[Lock]],EnvelopeMessage->[message_hash->[packed],__init__->[assert_envelope_values]],Message->[__ne__->[__eq__]],Lock->[from_bytes->[Lock],as_bytes->[Lock],__ne__->[__eq__]],Pong->[unpack->[Pong]],RequestMonitoring->[from_dict->[from_dict],from_balance_proof_signed_state->[from_balance_proof_signed_state],packed->[pack],sign->[sign,_data_to_sign,_sign],unpack->[SignedBlindedBalanceProof],to_dict->[to_dict]],UpdatePFS->[packed->[pack],from_dict->[from_dict],to_dict->[to_dict]],SignedMessage->[sender->[_data_to_sign],sign->[sign,_data_to_sign],_data_to_sign->[packed],decode->[unpack]],RevealSecret->[unpack->[RevealSecret]],LockedTransfer->[from_event->[Lock],from_dict->[from_dict],to_dict->[to_dict],unpack->[Lock]],SignedBlindedBalanceProof->[_sign->[sign,_data_to_sign]],decode->[decode]]
Initialize a sequence identifier.
`Optional` type is not necessary
@@ -55,6 +55,7 @@ type CertFactory struct { NoTLSverify bool KeyPEM []byte CertPEM []byte + NoSaveToDisk bool } func (c *CertFactory) CertFlags() []cli.Flag {
[loadCertificates->[IsNotExist,LoadCertificate,Join,Error,Infof,ReadFile,Begin,Certificate,VerifyClientCert,New,Warnf,End,Errorf,NewKeyPair,Debugf],ProcessCertificates->[NewExitError,Error,Infof,Warn,Sprintf,generateCertificates,Warnf,New,loadCertificates,String,Errorf,ParseCIDR],generateCertificates->[Certificate,Warnf,CreateServerCertificate,Info,HasPrefix,SaveCertificate,CombinedOutput,Error,Clean,New,End,ReadDir,Errorf,NewKeyPair,Debugf,Debug,Join,Infof,Split,MkdirAll,Command,CreateClientCertificate,Begin,Sprintf,CreateSelfSigned,CreateRootCA]]
CertFlags returns a list of flags that can be passed to the CLI interface. - - - - - - - - - - - - - - - - - -.
We should consider calling this `SaveToDisk` since `if c.SaveToDisk`is easier to understand than `if !c.NoSaveToDisk` for all the checks below. Also, since booleans are false by default `c.Certs.NoSaveToDisk = true` wouldn't be needed either.
@@ -137,12 +137,15 @@ class DocumentTranslationTest(AzureTestCase): # model helpers - def _validate_doc_status(self, doc_details, target_language): + def _validate_doc_status(self, doc_details, target_language, **kwargs): + status = kwargs.pop("statuses", ["Succeeded"]) + ids = kwargs.pop("statuses", None) # specific assertions - self.assertEqual(doc_details.status, "Succeeded") + self.assertIn(doc_details.status, status) self.assertEqual(doc_details.has_completed, True) self.assertIsNotNone(doc_details.translate_to, target_language) # generic assertions + self.assertIn(doc_details.id, ids) if ids else self.assertIsNotNone(doc_details.id) self.assertIsNotNone(doc_details.id) self.assertIsNotNone(doc_details.source_document_url) self.assertIsNotNone(doc_details.translated_document_url)
[DocumentTranslationTest->[create_target_container->[upload_documents],__init__->[OperationLocationReplacer],_create_translation_job_with_dummy_docs->[_validate_translation_job,create_dummy_docs,create_target_container,create_source_container],_submit_and_validate_translation_job->[_validate_translation_job],create_source_container->[upload_documents],_create_and_submit_sample_translation_jobs->[create_target_container,Document,create_source_container]]]
Checks that the given doc_details object is valid.
This might break other tests where we were just passing the string status. You might have to check if the status is an instance of a list before calling self.assertIn
@@ -121,9 +121,9 @@ class BlobLeaseClient(object): **kwargs) except StorageErrorException as error: process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = kwargs.get('etag') # type: str + self.id = response.get('lease_id') + self.last_modified = response.get('last_modified') + self.etag = kwargs.get('etag') @distributed_trace def renew(self, **kwargs):
[get_access_conditions->[LeaseAccessConditions],BlobLeaseClient->[change->[change_lease,get,process_storage_error,pop,get_modify_conditions],acquire->[get,pop,process_storage_error,acquire_lease,get_modify_conditions],__exit__->[release],break_lease->[pop,get,process_storage_error,break_lease,get_modify_conditions],release->[get,pop,process_storage_error,release_lease,get_modify_conditions],__init__->[str,TypeError,uuid4,hasattr],renew->[pop,get,process_storage_error,renew_lease,get_modify_conditions]],TypeVar]
Requests a new lease on the container. A method to renew a lease. Get lease id and last modified from response.
would you like to make it `self.etag = response.get('etag')`, I feel it's a 'typo' in the existing code
@@ -499,8 +499,10 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running defer func() { if !success { m.ipamDel(req.SandboxID) - if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil { - glog.Warningf("failed syncing hostports: %v", err) + if m.hostportSyncer != nil { + if err := m.hostportSyncer.SyncHostports(Tun0, m.getRunningPods()); err != nil { + glog.Warningf("failed syncing hostports: %v", err) + } } } }()
[teardown->[ipamDel,getRunningPods],UpdateLocalMulticastRules->[updateLocalMulticastRulesWithLock],setup->[ipamAdd,ipamDel,getRunningPods],Start->[Start],handleCNIRequest->[waitRequest,addRequest],processRequest->[updateLocalMulticastRulesWithLock]]
setup creates a container and IPAM for the given pod This function is used to set the MAC address of the host interface and the IP address of This function is called by ipam to setup the pod and return a runningPod object.
chain of booleans. Can we check for enableHostports here?
@@ -175,6 +175,7 @@ public class UdfIndex<T extends IndexedFunction> { private KsqlException createNoMatchingFunctionException(final List<Schema> paramTypes) { LOG.debug("Current UdfIndex:\n{}", describe()); + System.out.println(describe()); final String sqlParamTypes = paramTypes.stream() .map(schema -> schema == null
[UdfIndex->[describe->[describe],Node->[describe->[describe],compare->[compare]],getCandidates->[getCandidates],values->[values],Parameter->[accepts->[equals],equals->[equals]]]]
Creates a KsqlException for a missing matching function exception.
I'm assuming you didn't mean to leave this in :D
@@ -257,14 +257,16 @@ namespace Dynamo.Engine if (updatedNodes == null) return null; + var tempSyncDataManager = syncDataManager.Clone(); var activeNodes = updatedNodes.Where(n => n.State != ElementState.Error); if (activeNodes.Any()) { - astBuilder.CompileToAstNodes(activeNodes, AstBuilder.CompilationContext.DeltaExecution, verboseLogging); + astBuilder.CompileToAstNodes(activeNodes, AstBuilder.CompilationContext.PreviewGraph, verboseLogging); } GraphSyncData graphSyncdata = syncDataManager.GetSyncData(); List<Guid> previewGraphData = this.liveRunnerServices.PreviewGraph(graphSyncdata, verboseLogging); + syncDataManager = tempSyncDataManager; lock (previewGraphQueue) {
[EngineController->[ReconcileTraceDataAndNotify->[OnTraceReconciliationComplete],ImportLibrary->[ImportLibrary],UpdateGraph->[UpdateGraph],ShowRuntimeWarnings->[GetRuntimeWarnings],ShowBuildWarnings->[GetBuildWarnings],GetRuntimeWarnings->[GetRuntimeWarnings],GetBuildWarnings->[GetBuildWarnings],Dispose->[Dispose]],CompilationServices->[PreCompileCodeBlock->[PreCompileCodeBlock]]]
PreviewGraphSyncData - Previewes the graph sync data for the given nodes.
So same Clone strategy applies to UI side?
@@ -105,7 +105,7 @@ public class NoProcessFilesBehind extends TestWatcher { java.lang.management.RuntimeMXBean runtime = java.lang.management.ManagementFactory.getRuntimeMXBean(); java.lang.reflect.Field jvmField = runtime.getClass().getDeclaredField("jvm"); jvmField.setAccessible(true); - sun.management.VMManagement jvm = (sun.management.VMManagement) jvmField.get(runtime); + Object jvm = jvmField.get(runtime); java.lang.reflect.Method getProcessIdMethod = jvm.getClass().getDeclaredMethod("getProcessId"); getProcessIdMethod.setAccessible(true); return (Integer) getProcessIdMethod.invoke(jvm);
[NoProcessFilesBehind->[starting->[getOpenFD],finished->[getOpenFD],getOpenList->[getOpenFiles]]]
getProcessId - Get the process id of the JVM.
What about the other internal class we use for get files open, that one is ok?
@@ -85,13 +85,13 @@ public class BeamSqlEnv implements Serializable{ } private static class BeamCalciteTable implements ScannableTable, Serializable { - private BeamSqlRecordType beamSqlRecordType; - public BeamCalciteTable(BeamSqlRecordType beamSqlRecordType) { - this.beamSqlRecordType = beamSqlRecordType; + private BeamSqlRowType beamSqlRowType; + public BeamCalciteTable(BeamSqlRowType beamSqlRowType) { + this.beamSqlRowType = beamSqlRowType; } @Override public RelDataType getRowType(RelDataTypeFactory typeFactory) { - return CalciteUtils.toCalciteRecordType(this.beamSqlRecordType) + return CalciteUtils.toCalciteRecordType(this.beamSqlRowType) .apply(BeamQueryPlanner.TYPE_FACTORY); }
[BeamSqlEnv->[findTable->[get],registerUdaf->[add,create],registerUdf->[add,create],BeamCalciteTable->[getRowType->[apply]],registerTable->[add,BeamCalciteTable,getRecordType,put],BeamQueryPlanner,createRootSchema]]
Get the row type of the next row.
to `return CalciteUtils.toCalciteRowType(this.beamSqlRowType)`
@@ -149,6 +149,9 @@ namespace ComWrappersTests.Common [DllImport(nameof(MockReferenceTrackerRuntime))] extern public static int Trigger_NotifyEndOfReferenceTrackingOnThread(); + + [DllImport(nameof(MockReferenceTrackerRuntime))] + extern public static int IsWrapperConnected(IntPtr instance); } [Guid("42951130-245C-485E-B60B-4ED4254256F8")]
[ITrackerObjectWrapper->[AddObjectRef->[AddObjectRef],DropObjectRef->[DropObjectRef]],ITestVtbl->[SetValueInternal->[SetValue]]]
Notifies the end of the reference tracking thread.
> int Are you just wanting to avoid the marshalling, but want `BOOL` on the native export? (Just stood out to me, since the value goes `bool` -> `BOOL` -> `int`).
@@ -217,6 +217,15 @@ func retrievePulumiTemplates(templateName string, offline bool) (TemplateReposit subDir := templateDir if templateName != "" { subDir = filepath.Join(subDir, templateName) + + // Provide a nicer error message when the template can't be found (dir doesn't exist). + _, err := os.Stat(subDir) + if err != nil { + if os.IsNotExist(err) { + return TemplateRepository{}, newTemplateNotFoundError(templateDir, templateName) + } + contract.IgnoreError(err) + } } return TemplateRepository{
[CopyTemplateFiles->[ReadFile,IsDir,Mkdir,Base,IsExist],Templates->[IsNotExist,Dir,Join,Stat,IsDir,Name,ReadDir],Delete->[RemoveAll],CopyTemplateFilesDryRun->[Base,Stat,IsDir],RemoveAll,GitCloneOrPull,TempDir,Wrap,HasPrefix,PlainOpen,IgnoreClose,Stat,New,ReadDir,Errorf,Assert,Join,Current,Name,GetGitReferenceNameOrHashAndSubDirectory,ToLower,Base,Require,ParseGitRepoURL,MkdirAll,Write,IsDir,Sprintf,GitCloneAndCheckoutCommit,FromSlash,IsPackageName,String,Replace,OpenFile,Getenv]
RetrievePulumiTemplates retrieves the template repository for the given template name. check if there is a clone and checkout the commit.
This does suffer from TOCTOU issues. I wonder, is it possible instead to check the error when we actually try to open the file?
@@ -187,6 +187,13 @@ class JvmCompile(NailgunTaskBase): register('--suggest-missing-deps', type=bool, help='Suggest missing dependencies on a best-effort basis from target\'s transitive' 'deps for compilation failures that are due to class not found.') + + register('--missing-deps-not-found-msg', advanced=True, type=str, + help='The string that should be printed when pants can\'t find any suggestions for ' + 'targets containing the classes not found during compilation. This should ' + 'likely include a link to documentation about dependency management.', + default='Please see https://www.pantsbuild.org/3rdparty_jvm.html for ' + 'more information.') register('--class-not-found-error-patterns', advanced=True, type=list, default=CLASS_NOT_FOUND_ERROR_PATTERNS,
[JvmCompile->[_find_failed_compile_logs->[name],_check_unused_deps->[joined_dep_msg],_register_vts->[compute_classes_by_source],_compile_context->[_analysis_for_target,strict_deps_enabled,_portable_analysis_for_target],_fingerprint_strategy->[ResolvedJarAwareFingerprintStrategy],strict_deps_enabled->[_compute_language_property],do_compile->[execute,name],__init__->[size_estimator_by_name,compiler_plugin_types,create_analysis_tools],_portable_analysis_for_target->[_analysis_for_target],execute->[_fingerprint_strategy,_compile_context,extra_compile_time_classpath_elements,classpath_for_context],_create_compile_jobs->[check_cache->[check_artifact_cache],work_for_vts->[_check_unused_deps,check_cache,_compute_language_property,write_extra_resources,_register_vts,dependencies,_upstream_analysis,should_compile_incrementally,_compile_vts],Counter,exec_graph_key_for_target],validate_analysis->[validate_analysis],_compute_extra_classpath->[extra_compile_classpath_iter],check_artifact_cache->[post_process->[_compile_context]],_record_target_stats->[record],_compute_sources_for_target->[resolve_target_sources],_compile_vts->[_record_compile_classpath,compile,name]],ResolvedJarAwareFingerprintStrategy->[dependencies->[direct]]]
Register options for jvm compile. Additional options for the class. Register the compile_classpath option.
small copy edit: s/`The string that should be printed`/`The message to print`/
@@ -499,12 +499,6 @@ public class DataflowRunner extends PipelineRunner<DataflowPipelineJob> { PTransformOverride.of( PTransformMatchers.writeWithRunnerDeterminedSharding(), new StreamingShardedWriteFactory(options))); - if (fnApiEnabled) { - overridesBuilder.add( - PTransformOverride.of( - PTransformMatchers.classEqualTo(Create.Values.class), - new StreamingFnApiCreateOverrideFactory())); - } overridesBuilder.add( PTransformOverride.of(
[DataflowRunner->[ImpulseTranslator->[translate->[getCoder]],replaceTransforms->[getOverrides],containsUnboundedPCollection->[BoundednessVisitor],StreamingPubsubSinkTranslators->[StreamingPubsubIOWriteTranslator->[translate->[translate,getOverriddenTransform]],translate->[IdentityMessageFn]],StreamingFnApiCreate->[expand->[apply],DecodeAndEmitDoFn->[getCoder->[getCoder],processElement->[getCoder]]],run->[stageArtifacts,maybeRegisterDebuggee],ReflectiveViewOverrideFactory->[findCreatePCollectionView->[enterCompositeTransform->[enterCompositeTransform]]],fromOptions->[DataflowRunner],StreamingUnboundedRead->[ReadWithIdsTranslator->[translate->[getSource]],expand->[apply]],StreamingBoundedRead->[getSource],StreamingPubsubIOWriteOverrideFactory->[getReplacementTransform->[StreamingPubsubIOWrite]],getContainerImageForJob->[toString],useStreamingEngine->[hasExperiment],StreamingPubsubIOReadTranslator->[translate->[translateOverriddenPubsubSourceStep,getOverriddenTransform]],maybeRegisterDebuggee->[debuggerMessage],PrimitiveCombineGroupedValuesOverrideFactory->[getReplacementTransform->[getCoder]],Deduplicate->[expand->[apply]],useUnifiedWorker->[hasExperiment],ImpulseTranslator,fromOptions]]
Returns a list of overrides that should be applied to the given input. Adds a missing override for any type of object that can be transformed. Adds a filter for a specific key - value mapping that can be used to filter the results Add a missing override for any type.
I just noticed that we only swap SDF wrapper out when in batch and we are using SDF wrapper in streaming on runner_v1 now. Though runner_v1 supports Splittable DoFn, should we swap the read for streaming as well?
@@ -218,7 +218,7 @@ class SemanticRoleLabeler(Model): default_lstm_params = { 'type': 'alternating_lstm', - 'input_size': 100, + 'input_size': 101, # Because of the verb_indicator feature. 'hidden_size': 300, 'num_layers': 8, 'recurrent_dropout_probability': 0.1,
[SemanticRoleLabeler->[tag->[forward],from_params->[from_params]]]
Constructs a new semantic role labeler from the given model and parameters.
Did you ever put in a check in the model that these sizes matches?
@@ -904,10 +904,10 @@ func WithNetNS(portMappings []ocicni.PortMapping, postConfigureNetNS bool, netmo } ctr.config.PostConfigureNetNS = postConfigureNetNS - ctr.config.CreateNetNS = true + ctr.config.NetMode = namespaces.NetworkMode(netmode) + ctr.config.CreateNetNS = !ctr.config.NetMode.IsUserDefined() ctr.config.PortMappings = portMappings ctr.config.Networks = networks - ctr.config.NetMode = namespaces.NetworkMode(netmode) return nil }
[WithPod->[ID],ID,Wrapf,Join,Stat,MatchString,InfraContainerID,Wrap,MustCompile,ParseIP,NetworkMode]
WithDependencyCtrs sets the dependencies of the given container. This function is called to initialize the container object.
We can nuke netmode now, not using it anymore
@@ -1580,14 +1580,14 @@ namespace System.Windows.Forms.Design /// </summary> protected override void WndProc(ref Message m) { - switch ((User32.WM)m.Msg) + switch (m._Msg) { case User32.WM.KILLFOCUS: base.WndProc(ref m); - IntPtr focussedWindow = m.WParam; - if (!IsParentWindow(focussedWindow)) + IntPtr focusedWindow = m._WParam; + if (!IsParentWindow(focusedWindow)) { - owner.Commit(false, false); + owner.Commit(enterKeyPressed: false, tabKeyPressed: false); } break;
[ToolStripTemplateNode->[TemplateTextBox->[IsInputKey->[IsInputKey,Commit],WndProc->[Commit,WndProc,IsParentWindow],ProcessDialogKey->[ProcessDialogKey]],TransparentToolStrip->[CommitAndSelectNext->[Commit],ProcessDialogKey->[ProcessDialogKey,ProcessTabKey],ProcessTabKey->[CommitAndSelectNext],WndProc->[WndProc],SetBoundsCore->[SetBoundsCore]],OnKeyUp->[Commit,CommitEditor],MiniToolStripRenderer->[OnRenderLabelBackground->[Size,OnRenderLabelBackground,DrawDropDown],OnRenderToolStripBorder->[Size],OnRenderSplitButtonBackground->[Size,DrawArrow],OnRenderItemText->[OnRenderItemText],DrawDropDown->[DrawArrow],OnRenderToolStripBackground->[OnRenderToolStripBackground],DrawArrow->[DrawArrow]],CommitAndSelect->[Commit],OnKeyDefaultAction->[CommitEditor],RollBack->[CommitEditor],OnLoaderFlushed->[Commit],CommitEditor->[CommitTextToDesigner],SetupNewEditNode->[SetUpMenuTemplateNode,SetUpToolTemplateNode],FocusEditor->[EnterInSituEdit]]]
Override WndProc to handle unknown key types.
Missing `IntPtr `replacement?
@@ -117,7 +117,12 @@ def pandas_input_fn(x, features = features[1:] features = dict(zip(list(x.columns), features)) if y is not None: - target = features.pop(target_column) + if isinstance(target_column, list): + keys = [k for k, _ in y_columns] + values = [features.pop(column) for column in target_column] + target = {k: v for k, v in zip(keys, values)} + else: + target = features.pop(target_column) return features, target return features return input_fn
[pandas_input_fn->[input_fn->[list,dict,dequeue_many,dequeue_up_to,pop,len,_enqueue_data,zip],TypeError,array_equal,max,len,copy,format,isinstance,ValueError],tf_export]
Returns input function that will feed pandas DataFrame into the model. Input function for picking a single missing node.
in order to make this check valid, it is critical to add a precheck above to ensure the target_column passed in must not be a list.
@@ -74,7 +74,7 @@ namespace Microsoft.Extensions.DependencyModel while (reader.Read() && reader.TokenType == JsonTokenType.String) { - items.Add(reader.GetString()); + items.Add(reader.GetString()!); } if (reader.TokenType != JsonTokenType.EndArray)
[Utf8JsonReaderExtensions->[TryReadStringProperty->[IsTokenTypeProperty],ReadAsNullableBoolean->[IsTokenTypeProperty],ReadAsBoolean->[IsTokenTypeProperty],ReadAsString->[IsTokenTypeProperty]]]
Read a string array from the given Utf8JsonReader.
Why is this guaranteed not to be null? I think we are lying about it here.
@@ -13,7 +13,8 @@ import ( func dataSourceAwsKmsSecret() *schema.Resource { return &schema.Resource{ - Read: dataSourceAwsKmsSecretRead, + DeprecationMessage: "This data source will change or be removed in Terraform AWS provider version 2.0. Please see migration information available in: https://www.terraform.io/docs/providers/aws/d/kms_secrets.html", + Read: dataSourceAwsKmsSecretRead, Schema: map[string]*schema.Schema{ "secret": {
[Printf,Decrypt,Now,UTC,UnsafeSetFieldRaw,List,String,Errorf,DecodeString,SetId,Get]
dataSourceAwsKmsSecretImports imports the specified secrets into a schema. - returns 64 - bit value for a given secret.
I don't know if we have precedent for provider-specific "upgrade guides" on the website, but I feel like it'd be better to gather that information on a version-specific upgrade page like we do for Core, since that way that information is obviously attached to a particular version of the provider and we won't get into a confusing situation where in future this page might no longer contain the migration information this message refers to but older builds of the provider are still out there in the wild. If there isn't already a precedent for this then I guess we'll need to come up with a convention. My first idea would be a doc URL like `/docs/providers/aws/upgrade-guides/1-28.html`, which mimics our core-oriented upgrade guide URL structure `/upgrade-guides/0-12.html`, but I'm not wedded to that.
@@ -1943,7 +1943,10 @@ class TestReviewViewSetFlag(TestCase): response = self.client.post( self.url, data={'flag': 'review_flag_reason_spam'}) assert response.status_code == 202 - assert response.content == '' + data = json.loads(response.content) + assert data == { + 'msg': 'Thanks; this review has been flagged for editor approval.' + } assert ReviewFlag.objects.count() == 1 flag = ReviewFlag.objects.latest('pk') assert flag.flag == 'review_flag_reason_spam'
[TestCreate->[test_new_reply->[login_dev],test_new_activity_log_on_reply_but_no_mail_if_one_already_exists->[login_dev],test_list_none_add_review_link_dev->[login_dev],test_mail_but_no_activity_log_on_reply->[login_dev],test_get_reply->[login_dev],test_add_link_dev->[login_dev],test_double_reply->[login_dev]],TestFlag->[test_new_flag_mine->[make_it_my_review],setUp->[login_admin]],TestViews->[test_list_item_actions->[make_it_my_review,login_admin],test_empty_reviews_in_list->[login_dev,login_admin,create_review]],TestEdit->[test_edit_reply->[login_dev],test_new_activity_log_but_no_mail_on_edit_by_admin->[login_admin],test_new_activity_log_but_no_mail_on_reply_edit->[login_dev]],TestDelete->[setUp->[login_admin],test_reviewer_cannot_delete_unmoderated_review->[login_dev],test_delete_own_review->[login_dev],test_reviewer_can_delete_moderated_review->[login_dev],test_editor_own_addon_cannot_delete->[login_dev]],TestReviewViewSetGet->[test_list_addon_without_grouped_ratings->[test_list_addon],test_list_addon_slug->[test_list_addon],test_list_user_grouped_ratings_not_present->[test_list_user],test_list_addon_grouped_ratings->[test_list_addon],test_list_with_empty_reviews->[create_review],test_list_addon_guid->[test_list_addon],test_list_addon_grouped_ratings_unknown_addon_not_present->[test_list_addon_unknown]]]
Test whether the user is logged in and whether the flag is logged in.
Ah, this reminds me that I should display a message in the UI explaining what happens next.
@@ -423,7 +423,7 @@ static ssl_trace_tbl ssl_ciphers_tbl[] = { {0xCCAC, "TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305"}, {0xCCAD, "TLS_DHE_PSK_WITH_CHACHA20_POLY1305"}, {0xCCAE, "TLS_RSA_PSK_WITH_CHACHA20_POLY1305"}, - {0x0D01, "TLS_AES_128_GCM_SHA256"}, + {0x1301, "TLS_AES_128_GCM_SHA256"}, {0xFEFE, "SSL_RSA_FIPS_WITH_DES_CBC_SHA"}, {0xFEFF, "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA"}, };
[No CFG could be retrieved]
Returns the TLS descriptor for the specified key type if any. This function returns the list of TLS trace tables that are used by the SSL engine. Returns the name of the first non - null value in the list of known TLSEXT.
which leads to the obvious question as to why we're not using the TLS...CK... define's masked off? and the semi-obvious next question, why we're not using the TXT defines (which I hate and want to get rid of)
@@ -87,8 +87,8 @@ class AutoHSTSEnhancement(object): increase over time. The plugins implementing new style enhancements are responsible of handling - the saving of configuration checkpoints as well as calling possible restarts - of managed software themselves. + the saving of configuration checkpoints as well as the other calls to + interface methods of `interfaces.IInstaller` such as prepare() and restart() Methods: enable_autohsts is called when the header is initially installed using a
[are_requested->[enabled_enhancements],enable->[enabled_enhancements],are_supported->[enabled_enhancements]]
Creates a new object. This method is called when a certificate is successfully renewed.
nit: The plugin only has to call `prepare` for the `update_autohsts` method right? I'd undo this change and mention `prepare` in the docstring for `update_autohsts` and/or say here that they "may" have to call `prepare`.
@@ -50,7 +50,8 @@ namespace Microsoft.Extensions.Hosting /// <summary> /// Sets up the configuration for the remainder of the build process and application. This can be called multiple times and /// the results will be additive. The results will be available at <see cref="HostBuilderContext.Configuration"/> for - /// subsequent operations, as well as in <see cref="IHost.Services"/>. + /// subsequent operations, as well as in <see cref="IHost.Services"/>. Delegate execution does not affect current <see cref="HostBuilderContext.Configuration"/>. + /// The resulting <see cref="IConfiguration"/> will be created and replace current <see cref="HostBuilderContext.Configuration"/> only after all delegates are executed. /// </summary> /// <param name="configureDelegate">The delegate for configuring the <see cref="IConfigurationBuilder"/> that will be used /// to construct the <see cref="IConfiguration"/> for the host.</param>
[HostBuilder->[CreateServiceProvider->[CreateServiceProvider]]]
Register a configuration action that will be called when the application is configured.
For me, one of the problems with the current wording is this sentence: > `The results will be available at <see cref="HostBuilderContext.Configuration"/> for subsequent operations, as well as in <see cref="IHost.Services"/>.` It is a little unclear what "subsequent operations" means here. Does it mean for subsequent `ConfigureAppConfiguration` delegates? Or for operations after the whole AppConfiguration has been built?
@@ -923,9 +923,9 @@ function matchTerms(file, terms) { const {whitelist, checkInTestFolder} = terms[term]; // NOTE: we could do a glob test instead of exact check in the future // if needed but that might be too permissive. - if (Array.isArray(whitelist) && + if (isInBuildSystemFixtureFolder(relative) || (Array.isArray(whitelist) && (whitelist.indexOf(relative) != -1 || - (isInTestFolder(relative) && !checkInTestFolder))) { + (isInTestFolder(relative) && !checkInTestFolder)))) { return false; } // we can't optimize building the `RegExp` objects early unless we build
[No CFG could be retrieved]
Match the given file with the given terms and return the match string if any of the terms Checks if the contents string contains any forbidden term.
@rsimha do you mind re-reviewing this part. had to whitelist files in build-system/babel-plugins test/fixtures
@@ -202,6 +202,8 @@ class ResourceProvider: def __init__(self) -> None: pass +#https://github.com/python/mypy/issues/1102 +@no_type_check def serialize_provider(provider: ResourceProvider) -> str: # We need to customize our Pickler to ensure we sort dictionaries before serializing to try to # ensure we get a deterministic result. Without this we would see changes to our serialized
[Resource->[__init__->[serialize_provider]],ResourceProvider->[check->[CheckResult],read->[ReadResult],diff->[DiffResult],update->[UpdateResult]]]
Initialize the object with a pickle object.
We have a general TODO style across our codebase of `TODO[pulumi/pulumi#1102]: Some description of the issue`.
@@ -261,9 +261,9 @@ def with_exponential_backoff( try: try: sleep_interval = next(retry_intervals) - except StopIteration: + except StopIteration as stop_iteration: # Re-raise the original exception since we finished the retries. - raise exn.with_traceback(exn_traceback) + raise exn.with_traceback(exn_traceback) from stop_iteration logger( 'Retry with exponential backoff: waiting for %s seconds before '
[Clock->[sleep->[sleep]],retry_if_valid_input_but_server_error_and_timeout_filter->[retry_on_server_errors_and_timeout_filter],retry_on_server_errors_and_timeout_filter->[retry_on_server_errors_filter],retry_on_server_errors_and_notfound_filter->[retry_on_server_errors_filter],retry_on_server_errors_timeout_or_quota_issues_filter->[retry_on_server_errors_and_timeout_filter],with_exponential_backoff->[real_decorator->[wrapper->[FuzzedExponentialIntervals,sleep]],Clock]]
Decorator to retry on transient errors and retry on HTTP 404 responses. A decorator to retry the call with exponential backoff.
Here it looks intentional to not include `stop_iteration`. Could you disable the check instead?
@@ -270,7 +270,12 @@ func (provider *Docker) getIPAddress(container dockertypes.ContainerJSON) string } } } - for _, network := range container.NetworkSettings.Networks { + for networkName, network := range container.NetworkSettings.Networks { + // If net==host, quick n' dirty, we return 127.0.0.1 + // This will work locally, but will fail with swarm. + if "host" == networkName { + return "127.0.0.1" + } return network.IPAddress } return ""
[createClient->[NewClient,ParseHost,Client,ConfigureTransport],loadDockerConfig->[Filter,getFrontendName,getConfiguration,Error],ContainerFilter->[String,MatchConstraints,Split,Atoi,Debugf],getPort->[Port],getFrontendRule->[getSubDomain,Warnf],getEntryPoints->[Split],getSubDomain->[TrimPrefix,Replace],Provide->[Handle,createClient,loadDockerConfig,NewHandler,Background,WithCancel,Go,NewArgs,MonitorWithHandler,Errorf,RetryNotify,Fatalf,NewExponentialBackOff,Add,ServerVersion,Debugf],getFrontendName->[getFrontendRule],ContainerList,ContainerInspect,New,Warnf]
getIPAddress returns the IP address of the container.
I'm pretty sure it's more reliable to look at `container.HostConfig.NetworkMode` to detect the `host` network mode :angel:
@@ -4,9 +4,13 @@ module Engine module Step module HalfPay def half(entity, revenue) - withheld = (revenue / 2 / entity.total_shares).to_i * entity.total_shares + withheld = half_pay_withhold_amount(entity, revenue) { corporation: withheld, per_share: payout_per_share(entity, revenue - withheld) } end + + def half_pay_withhold_amount(entity, revenue) + (revenue / 2 / entity.total_shares).to_i * entity.total_shares + end end end end
[half->[payout_per_share,total_shares,to_i]]
Returns the score of a withheld entity.
I'm not sure if adding a non-publicly-exposed method in a module in Ruby is kosher.
@@ -76,10 +76,13 @@ def setup_experiment(installed=True): pypath = ':'.join([sdk_path, cmd_path]) os.environ['PYTHONPATH'] = pypath -def fetch_nni_log_path(experiment_url): +def get_experiment_id(experiment_url): + experiment_id = requests.get(experiment_url).json()['id'] + return experiment_id + +def get_nni_log_path(experiment_url): '''get nni's log path from nni's experiment url''' - experiment_profile = requests.get(experiment_url) - experiment_id = json.loads(experiment_profile.text)['id'] + experiment_id = get_experiment_id(experiment_url) experiment_path = os.path.join(os.path.expanduser('~'), 'nni', 'experiments', experiment_id) nnimanager_log_path = os.path.join(experiment_path, 'log', 'nnimanager.log')
[parse_max_duration_time->[int],get_experiment_status->[get],dump_yml_content->[open,dump,write],get_yml_content->[open,load],deep_update->[get,isinstance,items,deep_update],setup_experiment->[abspath,getcwd,get,join],remove_files->[remove,suppress],read_last_line->[open,strip],print_stderr->[trial_job,get,run],get_succeeded_trial_num->[print,get],is_experiment_done->[exists,join,run],fetch_nni_log_path->[expanduser,loads,get,join]]
setup the experiment if nni is not installed.
why refactor this place?
@@ -725,7 +725,7 @@ void MapblockMeshGenerator::drawGlasslikeFramedNode() v3s16 n2p = blockpos_nodes + p + g_26dirs[i]; MapNode n2 = data->m_vmanip.getNodeNoEx(n2p); content_t n2c = n2.getContent(); - if (n2c == current || n2c == CONTENT_IGNORE) + if (n2c == current) nb[i] = 1; } }
[blendLightColor->[blendLight,blendLightColor],drawRaillikeNode->[drawQuad,isSameRail,useTile],prepareLiquidNodeDrawing->[getSpecialTile],drawPlantlikeRootedNode->[drawPlantlike,getSmoothLightFrame,useTile],drawTorchlikeNode->[drawQuad,useTile],drawNode->[drawGlasslikeNode,drawNodeboxNode,drawAllfacesNode,getSmoothLightFrame,drawSignlikeNode,drawRaillikeNode,drawGlasslikeFramedNode,drawPlantlikeNode,drawMeshNode,errorUnknownDrawtype,drawFencelikeNode,drawFirelikeNode,drawPlantlikeRootedNode,drawLiquidNode,drawTorchlikeNode],drawSignlikeNode->[drawQuad,useTile],generate->[drawNode],drawFencelikeNode->[drawAutoLightedCuboid,useTile],drawPlantlike->[drawPlantlikeQuad],drawLiquidSides->[blendLightColor],drawMeshNode->[blendLightColor,useTile],drawFirelikeQuad->[drawQuad],drawAutoLightedCuboid->[blendLight,generateCuboidTextureCoords,drawCuboid],drawFirelikeNode->[drawFirelikeQuad,useTile],drawPlantlikeQuad->[drawQuad],drawLiquidNode->[getLiquidNeighborhood,prepareLiquidNodeDrawing,calculateCornerLevels,drawLiquidSides,drawLiquidTop],drawGlasslikeNode->[drawQuad,useTile],drawNodeboxNode->[drawAutoLightedCuboid,getTile],drawAllfacesNode->[drawAutoLightedCuboid,useTile],drawGlasslikeFramedNode->[drawAutoLightedCuboid,getTile,getSpecialTile],drawPlantlikeNode->[drawPlantlike,useTile],ndef->[ndef],drawLiquidTop->[blendLightColor]]
Draw a glass - like node on the mesh. - - - - - - - - - - - - - - - - - - check for all nodes in the tree that are not in the tree Internal liquid level is textured with 1 tile defined in node.
Otherwise `glasslike_framed` disappears from the inventory
@@ -114,10 +114,7 @@ function jetpack_og_tags() { $tags['og:description'] = $archive->description; } } - } elseif ( is_singular() ) { - global $post; - $data = $post; // so that we don't accidentally explode the global. - + } elseif ( is_singular() && is_a( $data, 'WP_Post' ) ) { $tags['og:type'] = 'article'; if ( empty( $data->post_title ) ) { $tags['og:title'] = ' ';
[No CFG could be retrieved]
This function is used to output the Open Graph tags Tag for Open Graph protocol Filters the post object and returns the post object. Add Open Graph tags to the post. This function will return any Open Graph Meta tags if we don t have any info about a Add Open Graph tags if the WP - FB data mapping exists.
Why `is_a( $data, 'WP_Post' )` rather than `$data instanceof WP_Post`?
@@ -57,8 +57,8 @@ class MenuCreate(ModelMutation): return user.has_perm('menu.manage_menus') @classmethod - def clean_input(cls, info, instance, input, errors): - cleaned_input = super().clean_input(info, instance, input, errors) + def clean_input(cls, info, instance, input): + cleaned_input = super().clean_input(info, instance, input) items = [] for item in cleaned_input.get('items', []): category = item.get('category')
[MenuItemCreate->[Arguments->[MenuItemCreateInput]],MenuUpdate->[Arguments->[MenuInput]],AssignNavigation->[mutate->[AssignNavigation]],MenuCreate->[Arguments->[MenuCreateInput]],MenuItemUpdate->[Arguments->[MenuItemInput]]]
Get the items from the input and replace the missing items with missing items.
`else` don't need anymore
@@ -189,7 +189,8 @@ frappe.views.TreeView = Class.extend({ label:__("Delete"), condition: function(node) { return !node.root && me.can_delete; }, click: function(node) { - frappe.model.delete_doc(me.doctype, node.label, function() { + var node_name = node.data.name || node.data.value; + frappe.model.delete_doc(me.doctype, node_name, function() { node.parent.remove(); }); },
[No CFG could be retrieved]
Displays the menu for the menu item Displays the menu for the group node.
This should be visible based on permission
@@ -704,6 +704,13 @@ var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", fu By("Ensuring that we have functional DNS resolution from a linux container") validateDNSLinuxName := "validate-dns-linux" validateDNSLinuxNamespace := "default" + // Create a kubectl version check to avoid api version mismatch which due to deployment failure in next step + cmd := exec.Command("k", "version") + out, err := cmd.CombinedOutput() + if out != nil { + log.Printf("kubectl version is :\n %s", out) + util.PrintCommand(cmd) + } j, err := job.CreateJobFromFileDeleteIfExists(filepath.Join(WorkloadDir, fmt.Sprintf("%s.yaml", validateDNSLinuxName)), validateDNSLinuxName, validateDNSLinuxNamespace, 3*time.Second, cfg.Timeout) Expect(err).NotTo(HaveOccurred()) ready, err := j.WaitOnSucceeded(sleepBetweenRetriesWhenWaitingForPodReady, validateDNSTimeout)
[ValidateOmsAgentLogs,Exec,ParseInput,CreatePVCFromFileDeleteIfExist,HasPrefix,Should,WaitOnReadyMin,GetAll,WaitForIngress,GetAllRunningByPrefixWithRetry,New,CreateServiceFromFileDeleteIfExist,IsWindows,IsLargeVMSKU,RunLinuxDeployDeleteIfExists,Bool,RequiresDocker,ValidateResources,CreateIfNotExist,RunDeploymentMultipleTimes,PrintCommand,ReadString,ScaleDeployment,ParseIP,NotTo,GetArg,Println,CreateLinuxDeployDeleteIfExists,WaitOnSuccesses,ExposeDeleteIfExist,PrintPodsLogs,GetAllByPrefixWithRetry,ReplaceContainerImageFromFile,DeleteNetworkPolicy,WaitOnReadyMax,Now,ValidateWithRetry,StderrPipe,HasZonesForAllAgentPools,RequiresCloudproviderConfig,IsSpotScaleSet,Atoi,CheckLinuxOutboundConnection,CombinedOutput,HasSubstring,IsRBACEnabled,MatchString,IsKubernetesVersionGe,IsLinux,HasWindowsAgents,CreateDeploymentHPADeleteIfExist,IsVHDDistroForAllNodes,IsUbuntu,UnixNano,CreateNetworkPolicyFromFile,HasDCSeriesSKU,Kill,GetByRegexWithRetry,StdoutPipe,Printf,Describe,ValidatePVC,NewReader,CopyTo,GetByAnnotations,AnyAgentIsLinux,IsLowPriorityScaleSet,Getwd,GetWindowsTestImages,AddToSSHKeyChain,Sleep,GetSSHKeyPath,ValidateCurlConnection,WaitForReplicas,ValidateAzureFile,WaitOnSucceeded,Compile,HasNetworkPolicy,GetReadyWithRetry,HasMultipleNodes,CreateLinuxDeployIfNotExist,RunLinuxWithRetry,CreateWindowsJobFromTemplateDeleteIfExists,Start,To,ExecuteRemoteWithRetry,IsUsingEphemeralDisks,PodsRunning,CreatePersistentVolumeClaimsFromFile,CreateLinuxDeploy,IsVirtualMachineScaleSets,WaitOnReady,To4,Execute,CheckOutboundConnection,GetWithRetry,GetEnvironmentVariable,Get,CheckWindowsOutboundConnection,NodeCount,GetServerName,NewSource,Sprintf,HasAddon,String,Intn,CreatePodFromFileIfNotExist,GetNodePort,CreateWindowsDeployWithHostportDeleteIfExist,CreatePodFromFile,Delete,ExposeIfNotExist,RunCommandMultipleTimes,CopyToRemote,ParseOutput,HasAvailabilityZones,HasNonRegularPriorityScaleset,Wait,CreateWindowsDeployDeleteIfExist,HasNSeriesSKU,Join,GetByLabel,Contains,Label,ParseConfig,NewConnectionWithRetry,Remove,CreateStorageClassFromFile,CreateJobFromFileDeleteIfExists,Command,GetAddressByType,Version,IsUsingManagedDisks,Print,ToNot,GetAgentPoolIndexByName]
Determines if the pod has a specific and prints it to the standard output. Check if functional DNS resolution is available on the current system.
FYI, this is fine as an informational check, but the `k` util is already designed to use the version of `kubectl` that the API returns in a call to the cluster. Is that not working in your experience?
@@ -163,6 +163,10 @@ func (e *PGPEncrypt) Run(m libkb.MetaContext) error { } } + for _, warning := range e.warnings.Strings() { + m.Warning(warning) + } + recipients := ks.Sorted() if err := libkb.PGPEncrypt(e.arg.Source, writer, signer, recipients); err != nil { return err
[Run->[verifyUsers,GetSecretKeyWithPrompt,G,New,UsersPlusKeys,LoadMeByMetaContextAndUID,Sorted,PGPEncrypt,Close,loadSelfKey,SecretKeyPromptArg,Encode,Add],loadSelfKey->[FilterActivePGPKeys,NewLoadUserArg,LoadMe,G],Add->[GetKID],verifyUsers->[Result,GetName,G,Error],NewContextified]
Run executes the command RunEngine2 encrypts a message using the given key chain.
Don't do this to communicate with a user, this a log message. The fact that it shows up in the CLI is incidental. You should store any warnings in the engine type (like `warnings` in the PGPSignEngine, but expose it by making it `Warnings`). After calling Run on the engine, check for warnings. Put them in a result type avdl so clients can get them.
@@ -390,6 +390,16 @@ class AmpYoutube extends AMP.BaseElement { if (eventType == 'initialDelivery') { this.info_ = info; + + if (info['duration'] !== undefined) { + // Video not available. + if (info['duration'] <= 0 && info['videoData'] && + info['videoData']['title'] == '') { + this.removeIframe_(); + this.toggleFallback(true); + } + } + return; }
[No CFG could be retrieved]
Private methods - This function is called from YouTube API. Creates an img placeholder for a videoid.
Occasionally (1 out of about 25 refreshes) this check is not triggered, leaving fallback never toggled when loading an invalid video. Actually, the fallback is almost never toggled when we scroll down the page slowly to the invalid videos
@@ -1,5 +1,16 @@ # frozen_string_literal: true +Given(/^the following users are registered:$/) do |table| + table.hashes.each do |hash| + team_name = hash.delete "team" + team_role = hash.delete "role" + user = FactoryBot.create(:user, hash) + team = FactoryBot.create(:team, {name: team_name, users: [user]}) + UserTeam.where(user: user, team: team).first.update role: team_role + User.find_by_email(hash.fetch('email')).confirm + end +end + When(/^I click "(.+)" button$/) do |button| click_button(button) end
[visit,fetch,create,find,have_xpath,manage,have_current_path,join,accept,should,to,have_content,click_button,click_link,set,save_screenshot,dismiss,click,find_by_id,user,sleep,update,execute_script,click_on,id,within,resize_to,within_frame,not_to,send_keys,seed_demo_data,find_by,attach_file]
JS function for handling missing node - tag names find user in root_path.
Rails/DynamicFindBy: Use find_by instead of dynamic find_by_email.
@@ -49,8 +49,6 @@ <body data-user-status="<%= user_logged_in_status %>" class="<%= Settings::UserExperience.default_font.tr("_", "-") %>-article-body default-header" - data-pusher-key="<%= ApplicationConfig["PUSHER_KEY"] %>" - data-app-domain="<%= ChatChannel.urlsafe_encoded_app_domain %>" data-honeybadger-key="<%= ApplicationConfig["HONEYBADGER_JS_API_KEY"] %>" data-deployed-at="<%= ForemInstance.deployed_at %>" data-latest-commit-id="<%= ForemInstance.latest_commit_id %>"
[No CFG could be retrieved]
Outputs a bunch of meta tags that can be used to render a Honeybadger Renders the .
Does anyone know what this was needed for? Is it still required even with Connect gone?
@@ -17,7 +17,9 @@ class WebhookQueries(graphene.ObjectType): description="Look up a webhook by ID.", ) webhook_events = graphene.List( - WebhookEvent, description="List of all available webhook events." + WebhookEvent, + description="List of all available webhook events.", + deprecation_reason=f"{DEPRECATED_IN_3X_FIELD}", ) webhook_sample_payload = graphene.Field( graphene.JSONString,
[WebhookQueries->[resolve_webhook_events->[resolve_webhook_events],resolve_webhook->[resolve_webhook]]]
Imports a single webhook event.
Missing information on which field the user should use.
@@ -879,7 +879,13 @@ _safe_re = re.compile(r'^((\.\.)?/+)+') def make_path_safe(path): """Make path safe by making it relative and local """ - return _safe_re.sub('', path) or '.' + if sys.platform != 'win32': + return _safe_re.sub('', path) or '.' + else: + tail = path + if path[0:2] == '//' or path[0:2] == '\\\\' or path[1] == ':': + drive, tail = os.path.splitdrive(path) + return _safe_re.sub('', tail) or '.' def daemonize():
[ChunkIteratorFileWrapper->[_read->[_refill],read->[_read]],ProgressIndicatorPercent->[show->[progress]],get_cache_dir->[write,get_home_dir],update_excludes->[load_excludes],Location->[preformat_text->[uid2user,format_line],to_key_filename->[get_keys_dir],parse->[preformat_text,match],_parse->[match]],open_item->[ChunkIteratorFileWrapper],ItemFormatter->[get_item_data->[remove_surrogates],format_item->[get_item_data],__init__->[partial_format,update],keys_help->[available_keys],hash_item->[update],time->[safe_timestamp],format_time->[format_time,safe_timestamp]],get_keys_dir->[get_home_dir],ShellPattern->[_match->[match]],sizeof_fmt_iec->[sizeof_fmt],PatternMatcher->[match->[match]],sizeof_fmt_decimal->[sizeof_fmt],format_archive->[format_time,to_localtime],location_validator->[validator->[Location]],ProgressIndicatorEndless->[show->[output,progress]],FnmatchPattern->[_match->[match]],dir_is_tagged->[dir_is_cachedir]]
Make path safe by making it relative and local .
you just throw away the drive?
@@ -10,4 +10,5 @@ $content = elgg_view('groups/edit'); echo elgg_view_page(elgg_echo('groups:add'), [ 'content' => $content, + 'filter_id' => 'groups/edit', ]);
[No CFG could be retrieved]
View the group add page.
i think groups/add
@@ -532,11 +532,11 @@ public class DoFnTester<InputT, OutputT> implements AutoCloseable { return mainOutputTag; } - private TestContext createContext(OldDoFn<InputT, OutputT> fn) { + private TestContext createContext(DoFn<InputT, OutputT> fn) { return new TestContext(); } - private class TestContext extends OldDoFn<InputT, OutputT>.Context { + private class TestContext extends DoFn<InputT, OutputT>.Context { TestContext() { fn.super(); }
[DoFnTester->[advanceInputWatermark->[advanceInputWatermark,collectInto],peekOutputElementsWithTimestamp->[apply->[of]],peekOutputElementsInWindow->[of,peekOutputElementsInWindow],startBundle->[startBundle],finishBundle->[unwrapUserCodeException,finishBundle],takeOutputElements->[peekOutputElements,clearOutputElements],takeOutputElementsWithTimestamp->[peekOutputElementsWithTimestamp,clearOutputElements],takeSideOutputElements->[peekSideOutputElements,clearSideOutputElements],advanceProcessingTime->[collectInto,advanceProcessingTime],createProcessContext->[of],TestContext->[sideOutput->[sideOutputWithTimestamp]],TestProcessContext->[windowingInternals->[outputWindowedValue->[of,noteOutput],sideOutputWindowedValue->[of,noteOutput]],sideInput->[apply],getPipelineOptions->[getPipelineOptions],sideOutputWithTimestamp->[of,noteOutput],outputWithTimestamp->[sideOutputWithTimestamp],output->[sideOutput],sideOutput->[sideOutputWithTimestamp],createContext],close->[finishBundle],processTimestampedElement->[startBundle,unwrapUserCodeException,processElement],processBundle->[processBundle]]]
Returns the main output tag.
Maybe just delete this method? It doesn't even use the parameter.
@@ -23,9 +23,9 @@ namespace System.Xml.Serialization { internal class CodeGenerator { - internal static BindingFlags InstancePublicBindingFlags = BindingFlags.Instance | BindingFlags.Public; - internal static BindingFlags InstanceBindingFlags = BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic; - internal static BindingFlags StaticBindingFlags = BindingFlags.Static | BindingFlags.Public | BindingFlags.NonPublic; + internal const BindingFlags InstancePublicBindingFlags = BindingFlags.Instance | BindingFlags.Public; + internal const BindingFlags InstanceBindingFlags = BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic; + internal const BindingFlags StaticBindingFlags = BindingFlags.Static | BindingFlags.Public | BindingFlags.NonPublic; internal static MethodAttributes PublicMethodAttributes = MethodAttributes.Public | MethodAttributes.HideBySig; internal static MethodAttributes PublicOverrideMethodAttributes = MethodAttributes.Public | MethodAttributes.Virtual | MethodAttributes.HideBySig; internal static MethodAttributes ProtectedOverrideMethodAttributes = MethodAttributes.Family | MethodAttributes.Virtual | MethodAttributes.HideBySig;
[CodeGenerator->[Ldstr->[Ldstr],Beq->[Beq],Ldc->[New,Ldc,Call,Ldtoken],LoadAddress->[Load],Ldloc->[Ldloc],InitElseIf->[MarkLabel,Pop,Br,EndIf],WhileEndCondition->[Brtrue],Stelem->[Stelem],LocalBuilder->[TryDequeueLocal],If->[If],EndExceptionBlock->[EndExceptionBlock],Box->[Box],BeginCatchBlock->[BeginCatchBlock],Stloc->[Stloc],Ldloca->[Ldloca],Castclass->[Castclass],Ldarg->[Ldarg],GotoMethodEnd->[Br],Call->[Call],WhileEnd->[MarkLabel,Pop],InternalConvert->[Ldobj,Box,Nop,Castclass,Unbox],Ret->[Ret],Unbox->[Unbox],InternalIf->[Brfalse,Brtrue,EndIf],Cne->[Ceq],Dup->[Dup],Throw->[Throw],Ldtoken->[Ldtoken],Add->[Add],Ldelem->[Ldelem],EndIf->[EndIf],Brtrue->[Brtrue],Brfalse->[Brfalse],Ldlen->[Ldlen],Ceq->[Ceq],Br->[Br],Pop->[Pop],IfState->[Pop],AndIf->[If,Brfalse],WhileContinue->[Br],StoreArrayElement->[Call],BeginExceptionBlock->[BeginExceptionBlock],MarkLabel->[MarkLabel],Blt->[Blt],Type->[GetPropertyMethodFromBaseType,Call],Ble->[Ble],Ldelema->[Ldelema],Leave->[Leave],Clt->[Clt],WhileBeginCondition->[MarkLabel,Nop],Ldobj->[Ldobj],Stobj->[Stobj],Br_S->[Br_S],WhileBegin->[Ldc,MarkLabel,Brtrue],Ldarga->[Ldarga],Nop->[Nop],EndFor->[AssertHasInterface],StoreMember->[GetPropertyMethodFromBaseType,Call],Nop],LocalScope->[TryGetValue->[TryGetValue],ContainsKey->[ContainsKey],AddToFreeLocals->[TryGetValue,Add],TryGetValue]]
Creates a class which is the base class for all of the classes that are responsible for generating.
It would be general goodness to change all these to be const too.
@@ -627,7 +627,11 @@ public class BigQueryIOReadTest implements Serializable { .toSource(stepUuid, TableRowJsonCoder.of(), BigQueryIO.TableRowParser.INSTANCE); PipelineOptions options = PipelineOptionsFactory.create(); - assertEquals(108, bqSource.getEstimatedSizeBytes(options)); + + // Each row should have 24 bytes (See StringUtf8Coder in detail): + // first 1 byte indicating length and following 23 bytes: {"name":"a","number":1} + long expectedSize = 24L * data.size(); + assertEquals(expectedSize, bqSource.getEstimatedSizeBytes(options)); } @Test
[BigQueryIOReadTest->[testBuildTableBasedSourceWithDefaultProject->[checkReadTableObject],testBuildTableBasedSourceWithoutValidation->[checkReadTableObjectWithValidate],testValidateReadSetsDefaultProject->[apply],testPassThroughThenCleanupExecuted->[apply],testBuildSourceWithTableAndFlatten->[apply],setUp->[setUp],testReadFromTable->[apply],testBuildQueryBasedTypedReadSource->[checkTypedReadQueryObject],testBuildSourceWithTableAndFlattenWithoutValidation->[apply],testPassThroughThenCleanup->[apply],testBuildQueryBasedSourceWithoutValidation->[checkReadQueryObjectWithValidate],testReadTableWithSchema->[apply],testBuildSourceWithTableAndSqlDialect->[apply],testBuildQueryBasedSource->[checkReadQueryObject],apply->[evaluate->[evaluate],apply],testBuildSourceWithTableReference->[checkReadTableObject],testBuildTableBasedSource->[checkReadTableObject]]]
This test tests whether the estimated size of a table is not greater than the estimated size of This method is used to test if a sequence of records in a data set has a specific.
Old value `108` did not carry any explanation. Now `expectedSize` is easier to understand.
@@ -4,6 +4,10 @@ import os import re import warnings +IS_WIN32 = sys.platform.startswith('win32') +MACHINE_BITS = tuple.__itemsize__ * 8 +IS_32BITS = MACHINE_BITS == 32 + def _readenv(name, ctor, default): try:
[_readenv->[warn,ctor],_force_cc->[groups,match,ValueError,int],_readenv]
Read environment variables and return a .
Out of curiosity, is there a reason we use this odd-looking trick to detect the machine bits rather than using `platform.architecture()`?
@@ -59,4 +59,14 @@ class Profile < ApplicationRecord def clear! update(data: {}) end + + private + + def conditionally_validate_summary + return unless summary.present? && ProfileField.exists?(attribute_name: SUMMARY_ATTRIBUTE) + # Grandfather in people who had a too long summary before. + return if data_was[SUMMARY_ATTRIBUTE] && data_was[SUMMARY_ATTRIBUTE].size > MAX_SUMMARY_LENGTH + + errors.add(:summary, "is too long.") if summary.size > MAX_SUMMARY_LENGTH + end end
[Profile->[attributes!->[refresh_attributes!],refresh_attributes!]]
Clear the from the cache.
This was moved over from `User`. Since this is a profile field defined in `Profiles::AddBaseFields` that gets added to each new Forem via `bin/setup` I thought it's worth keeping. However, admins can delete this field, so we still need a guard clause below.
@@ -207,6 +207,14 @@ const strings = { description: 'Label for the fourth answer choice from a multiple choice quiz (e.g. D in A/B/C/D)', }, + [LocalizedStringId.AMP_STORY_PAUSE_BUTTON_LABEL]: { + string: 'Pause story', + description: 'Label for the pause button on the system layer', + }, + [LocalizedStringId.AMP_STORY_PLAY_BUTTON_LABEL]: { + string: 'Play story', + description: 'Label for the play button on the system layer', + }, [LocalizedStringId.AMP_STORY_SHARE_BUTTON_LABEL]: { string: 'Share story', description:
[No CFG could be retrieved]
Label for the button to play the video visible on the page. A mage - story - sharing - clipboard - failure - text - A m.
"Without knowing the context of a message, a translator can't do much better than machine translation. Writing good message descriptions will help the translator understand the purpose of the message and its relationship to the surrounding messages. [...] Before writing a description, you should realize that translators will not know the product as well as you do. They work on multiple products, and they may not be engineers, so you should write descriptions that are understandable by a general audience." Translators won't know what a Story or a system layer is. I'm pinging you a link to good/bad examples.
@@ -72,9 +72,9 @@ export class AmpAnalytics extends AMP.BaseElement { this.isSandbox_ = false; /** - * @private {Object<string, RequestHandler>} A map of request handler with requests + * @private {Object<!RequestHandler>} A map of request handler with requests */ - this.requests_ = {}; + this.requests_ = Object.create(null); /** * @private {JsonObject}
[No CFG could be retrieved]
Creates an AMP Analytics class. Private methods for the layout - priority property.
But `this.request_` is still object<string, RequestHandler>
@@ -286,6 +286,10 @@ class DoOperation(Operation): # provided directly. assert self.side_input_maps is None + # Get experiments active in the worker to check for side input metrics exp. + experiments = set(RuntimeValueProvider( + 'experiments', str, '').get().split(',')) + # We will read the side inputs in the order prescribed by the # tags_and_types argument because this is exactly the order needed to # replace the ArgumentPlaceholder objects in the args/kwargs of the DoFn
[SimpleMapTaskExecutor->[execute->[start,finish,create_operation]],_TaggedReceivers->[__missing__->[ConsumerSet]],ReadOperation->[start->[output]],Operation->[start->[ConsumerSet],output->[cast]],PGBKOperation->[flush->[output]],ConsumerSet->[receive->[cast]],PGBKCVOperation->[output_key->[output]],FlattenOperation->[process->[output]],create_operation->[ReadOperation,create_pgbk_op,InMemoryWriteOperation,CombineOperation,DoOperation,FlattenOperation],CombineOperation->[process->[output]],DoOperation->[start->[_read_side_inputs,start,_TaggedReceivers],finish->[finish],process->[receive]],DoFnRunnerReceiver->[receive->[process]],FakeCython]
Reads the side inputs of a node. A context manager for a missing node - id view.
Should we add RuntimeValueProvider.get(option_name) as a class method to avoid creating dummy instance like this?
@@ -98,7 +98,7 @@ public class AsyncQueryForwardingServlet extends HttpServlet ctx = req.startAsync(req, resp); final AsyncContext asyncContext = ctx; - if (req.getAttribute(DISPATCHED) != null) { + if (req.getDispatcherType() == DispatcherType.ASYNC) { return; }
[AsyncQueryForwardingServlet->[doGet->[done->[propagate,flushBuffer,getObj,finished,complete,close],handleChunk->[propagate,getContent,readableBytes,complete,readBytes],run->[get,makeUrl],handleResponse->[propagate,getContent,finished,readableBytes,setContentType,complete,readBytes,setStatus,getCode],exceptionCaught->[handleException],getAttribute,setAttribute,getOutputStream,start,Runnable,handleException,startAsync,getDefaultHost,dispatch],handleException->[propagate,getMessage,write,flushBuffer,getOutputStream,isCommitted,complete,resetBuffer,getBytes,setStatus],doPost->[done->[QueryStats,propagate,debug,of,getObj,build,flushBuffer,finished,emit,RequestLogLine,log,DateTime,complete,getRemoteAddr,currentTimeMillis,close],handleChunk->[propagate,getContent,readableBytes,complete,readBytes],run->[post,makeUrl],handleResponse->[propagate,getContent,finished,readableBytes,setContentType,complete,readBytes,setStatus,getCode],exceptionCaught->[handleException],equals,start,withId,startAsync,toString,isDebugEnabled,dispatch,setAttribute,of,emit,handleException,RequestLogLine,log,DateTime,getRemoteAddr,getAttribute,debug,error,getContentType,currentTimeMillis,getHost,QueryStats,getOutputStream,getInputStream,Runnable,getId,readValue],makeUrl->[getRequestURI,getQueryString,format],on,forName,EmittingLogger]]
This method is called to handle a GET request for a single node. if any exception occurs dispatch it.
can we make doGet and doPost share more code and avoid copy pasting?
@@ -437,6 +437,7 @@ module.exports = class extends Generator { { name: 'Dutch', value: 'nl' }, { name: 'English', value: 'en' }, { name: 'Estonian', value: 'et' }, + { name: 'Farsi', value: 'fa' }, { name: 'French', value: 'fr' }, { name: 'Galician', value: 'gl' }, { name: 'German', value: 'de' },
[No CFG could be retrieved]
get all the languages supported by JHipster Add new social configuration in the application. yml.
add a new attribute `{ name: 'Farsi', value: 'fa', rtl: true },` and use that in `isRTLSupportNecessary` instead of hardcoding the values
@@ -107,11 +107,17 @@ def analyze_member_access(name: str, elif isinstance(typ, UnionType): # The base object has dynamic type. msg.disable_type_names += 1 + old_num_messages = msg.num_messages() results = [analyze_member_access(name, subtype, node, is_lvalue, is_super, is_operator, builtin_type, not_ready_callback, msg, original_type=original_type, chk=chk) for subtype in typ.items] msg.disable_type_names -= 1 + if msg.num_messages() != old_num_messages and any(isinstance(t, AnyType) + for t in results): + # If there was an error, return AnyType to avoid generating multiple messages for the + # same error. + return AnyType() return UnionType.make_simplified_union(results) elif isinstance(typ, TupleType): # Actually look up from the fallback instance type.
[add_class_tvars->[add_class_tvars],analyze_class_attribute_access->[handle_partial_attribute_type],bind_self->[expand,bind_self],analyze_member_access->[analyze_member_access]]
Analyzes a member of a base object. Analyzes the member access of a node. Analyzes the member access of a node. Analyzes the member access of a type object.
Was this not a problem before?
@@ -90,6 +90,18 @@ public class KsqlGenericRowAvroSerializer implements Serializer<GenericRow> { } } + private Schema getNonNullSchema(Schema schema) { + if (schema.getType() == Schema.Type.UNION) { + List<Schema> schemaList = schema.getTypes(); + for (Schema innerSchema: schemaList) { + if (innerSchema.getType() != Schema.Type.NULL) { + return innerSchema; + } + } + } + throw new KsqlException("Field need to have at least one not null type." + schema); + } + @Override public void close() {
[KsqlGenericRowAvroSerializer->[serialize->[serialize]]]
Serializes a single row of generic data into an avro byte array.
This should never happen now that all fields are unioned with Null by default right? If should, this should be an `IllegalStateException` with an error message like `"Expecting non-null value or a Union type for " + schema.field()`.
@@ -35,6 +35,7 @@ class ExtractorResourceMetadataFactoryTest extends FileConfigurationMetadataFact $resourceMetadataFactory = new ExtractorResourceMetadataFactory(new XmlExtractor([$configPath])); $resourceMetadata = $resourceMetadataFactory->create(FileConfigDummy::class); + $resourceMetadataDummy = $resourceMetadataFactory->create(Dummy::class); $this->assertInstanceOf(ResourceMetadata::class, $resourceMetadata); $this->assertEquals($expectedResourceMetadata, $resourceMetadata);
[ExtractorResourceMetadataFactoryTest->[testYamlCreateResourceMetadata->[assertInstanceOf,create,assertEquals],testYamlDoesNotExistMetadataFactory->[create],testYamlOptionalResourceMetadata->[assertInstanceOf,create,assertEquals],testXmlCreateResourceMetadata->[assertInstanceOf,create,assertEquals],testCreateWithMalformedYaml->[create],testYamlSingleResourceMetadata->[assertInstanceOf,create,assertEquals],testXmlDoesNotExistMetadataFactory->[create],testXmlExistingParentResourceMetadataFactory->[create,prophesize,reveal,assertEquals,shouldBeCalled],testXmlOptionalResourceMetadata->[assertInstanceOf,create,assertEquals],testInvalidXmlResourceMetadataFactory->[create],testYamlParentResourceMetadataFactory->[create,assertEquals,withDescription,prophesize,reveal,shouldBeCalled],testXmlParentResourceMetadataFactory->[create,assertEquals,withDescription,prophesize,reveal,shouldBeCalled],testYamlExistingParentResourceMetadataFactory->[create,prophesize,reveal,assertEquals,shouldBeCalled]]]
Tests if an Xml file contains a ResourceMetadata object.
Maybe you should add some assertions.
@@ -27,6 +27,14 @@ class Seq2SeqEncoder(_EncoderBase, Registrable): """ raise NotImplementedError + def is_bidirectional(self) -> bool: + """ + Returns ``True`` if this encoder is bidirectional. If so, we assume the forward direction + of the encoder is the first half of the final dimension, and the backward direction is the + second half. + """ + raise NotImplementedError + @classmethod def from_params(cls, params: Params) -> 'Seq2SeqEncoder': choice = params.pop_choice('type', cls.list_available())
[Seq2SeqEncoder->[from_params->[by_name,list_available,pop_choice]]]
Returns the dimension of each vector in the sequence output by this Seq2SeqEncoder.
Maybe this should be `False`, given otherwise this will break any user-implemented `Seq2SeqEncoder`s.
@@ -77,7 +77,7 @@ class TestMultiprocessReaderException(unittest.TestCase): reader.decorate_sample_generator( decorated_reader, batch_size=batch_size, - places=fluid.cuda_places()) + places=fluid.cuda_places(0)) else: reader.decorate_sample_generator( decorated_reader,
[TestMultiprocessReaderException->[main_impl->[ReaderException,fake_reader],test_main->[main_impl,places]]]
Implementation of the main method of the sequence sequence sequence sequence sequence sequence sequence sequence sequence sequence sequence Reads a sequence of images from the image file and checks that the sequence is met.
only test single device here?
@@ -89,4 +89,13 @@ public class FastThreadLocalThread extends Thread { public boolean willCleanupFastThreadLocals() { return cleanupFastThreadLocals; } + + /** + * Returns {@code true} if {@link FastThreadLocal#removeAll()} will be called once {@link Thread#run()} completes. + */ + @UnstableApi + public static boolean willCleanupFastThreadLocals(Thread thread) { + return thread instanceof FastThreadLocalThread && + ((FastThreadLocalThread) thread).willCleanupFastThreadLocals(); + } }
[FastThreadLocalThread->[wrap]]
Returns true if the fast thread local variables should be cleaned up.
might be clearer to break on `&&`
@@ -11,10 +11,10 @@ describe User do it { is_expected.to have_many(:events) } end - it 'should only send one email during creation' do + it 'does not send an email when #create is called' do expect do User.create(email: 'nobody@nobody.com') - end.to change(ActionMailer::Base.deliveries, :count).by(1) + end.to change(ActionMailer::Base.deliveries, :count).by(0) end describe 'password validations' do
[email,minute,password,create,new,let,describe,build_stubbed,ago,have_many,it,identities,to,before,profiles,require,deactivate,confirm_within,receive,now,send_new_otp,match,context,be_a,build,uuid,hour,by,eq,confirmation_sent_at,save,raise_error,and_return]
A basic example of how to create a user object. when the user already has a uuid it returns the current uuid.
did we change this behavior intentionally? If so, seems potentially unrelated to moving SP info from the session into the DB
@@ -125,7 +125,7 @@ module SubmissionsHelper def construct_repo_browser_directory_table_row(directory_name, directory) table_row = {} table_row[:id] = directory.object_id - table_row[:filter_table_row_contents] = render_to_string :partial => 'submissions/repo_browser/directory_row', :locals => {:directory_name => directory_name, :directory => directory} + table_row[:filter_table_row_contents] = render_to_string partial: 'submissions/repo_browser/directory_row', locals: {directory_name: directory_name, directory: directory} table_row[:filename] = directory_name table_row[:last_modified_date] = directory.last_modified_date.strftime('%d %B, %l:%M%p') table_row[:last_modified_date_unconverted] = directory.last_modified_date.strftime('%b %d, %Y %H:%M')
[construct_file_manager_table_rows->[construct_file_manager_table_row]]
Construct a table row for the repo browser directory table.
Line is too long. [174/80]<br>Space inside { missing.<br>Space inside } missing.
@@ -150,12 +150,9 @@ class TestCase(unittest.TestCase): "python {} {} --accept").format(munged_id, subname_output, output, __main__.__file__, munged_id)) if ACCEPT: - equal = False try: - equal = self.assertEqual(output, expected, prec=prec) + self.assertEqual(output, expected, prec=prec) except Exception: - equal = False - if not equal: return accept_output("updated output") else: self.assertEqual(output, expected, prec=prec)
[map_nested_tensor_object->[MapNestedTensorObjectImpl],TestCase->[assertExportImportModule->[assertEqual,getExportImportCopy],assertEqual->[assertEqual,assertTensorsEqual,is_iterable],assertExpected->[accept_output,remove_prefix_suffix],checkModule->[assertEqual]]]
r Asserts that a python value matches the recorded contents of a file derived from the Check if the output of the nanoseconds is equal to the expected output. Check if two variables are equal. Checks if two test cases have a .
I have an impression that `return accept_output("updated output")` is always returned ?
@@ -125,6 +125,11 @@ public abstract class AbstractTestingPrestoClient<T> { ImmutableMap.Builder<String, String> properties = ImmutableMap.builder(); properties.putAll(session.getSystemProperties()); + for (Entry<CatalogName, Map<String, String>> catalogAndConnectorProperties : session.getConnectorProperties().entrySet()) { + for (Entry<String, String> connectorProperties : catalogAndConnectorProperties.getValue().entrySet()) { + properties.put(catalogAndConnectorProperties.getKey() + "." + connectorProperties.getKey(), connectorProperties.getValue()); + } + } for (Entry<String, Map<String, String>> connectorProperties : session.getUnprocessedCatalogProperties().entrySet()) { for (Entry<String, String> entry : connectorProperties.getValue().entrySet()) { properties.put(connectorProperties.getKey() + "." + entry.getKey(), entry.getValue());
[AbstractTestingPrestoClient->[tableExists->[execute,tableExists],execute->[getResultSession,execute],listTables->[execute,listTables]]]
Creates a new client session from a session.
I am having hard time understanding that. What are connectorProperties and what are unprocessedCatalogProperties.
@@ -1,6 +1,6 @@ <div class="comments-container" data-object-id = <%= object.id %>> <% per_page = Constants::COMMENTS_SEARCH_LIMIT %> - <div class="content-comments"> + <div class="content-comments inline_scroll_block"> <% if comments.size == per_page %> <div class="comment-more text-center"> <a class="btn btn-default btn-more-comments-new"
[No CFG could be retrieved]
Displays a hidden container of the object s n - th comment list.
where do you define this `inline_scroll_block` class name? btw, can we use `inline-scroll-block` name?
@@ -0,0 +1,8 @@ +package games.strategy.triplea.delegate.battle.steps.fire; + +import games.strategy.triplea.delegate.battle.BattleState; +import java.util.List; +import java.util.function.Function; + +/** Converts the BattleState into a list of FiringGroups */ +public interface FiringGroupFilter extends Function<BattleState, List<FiringGroup>> {}
[No CFG could be retrieved]
No Summary Found.
Couple questions: - Could you explain why an interface "alias" is helpful? - Is the output list ordered? A filter will typically exclude or include items. My presumption is that this is more of a sieve, that we are not losing any elements but instead are grouping them. Is that correct? If so, perhaps this would be better named as a `Splitter`, or as a `Sorter`. OTOH - If we do not need an interface alias, then the naming problem would drop away.
@@ -519,6 +519,7 @@ func (d *Dispatcher) createAppliance(conf *config.VirtualContainerHostConfigSpec } } + d.op.Info("Creating the VCH vm") info, err = tasks.WaitForResult(d.op, func(ctx context.Context) (tasks.Task, error) { return vchFolder.CreateVM(ctx, *spec, d.vchPool, d.session.Host) })
[createAppliance->[createApplianceSpec,setDockerPort],CheckServiceReady->[CheckDockerAPI,ensureApplianceInitializes],createApplianceSpec->[addIDEController,addNetworkDevices,addParaVirtualSCSIController],ensureApplianceInitializes->[waitForKey,applianceConfiguration],deleteVM->[getName],reconfigureApplianceSpec->[encodeConfig,configIso,configLogging],checkExistence->[isVCH]]
createAppliance creates a new appliance on the target VM. This function creates a virtual machine and stores it. This function is called to initialize the environment variables for the iptables service.
s/vm/VM for consistent casing for log messages.
@@ -741,12 +741,15 @@ class Montage(object): The type of montage (e.g. 'standard_1005'). selection : array of int The indices of the selected channels in the montage file. + fids : dict | None + A dictionary specifying the fiducials as keys: lpa, rpa, nasion """ - def __init__(self, pos, ch_names, kind, selection): + def __init__(self, pos, ch_names, kind, selection, fids): self.pos = pos self.ch_names = ch_names self.kind = kind self.selection = selection + self.fids = fids def __repr__(self): s = '<Montage | %s - %d Channels: %s ...>'
[_auto_topomap_coords->[_pol_to_cart,_cart_to_sph],make_grid_layout->[Layout],read_montage->[Montage],find_layout->[read_layout,make_eeg_layout],read_layout->[_read_lout,Layout,_read_lay],make_eeg_layout->[Layout],_pair_grad_sensors->[_find_topomap_coords]]
Initialize object.
can you give an example in the docstring? and put a . at the end of sentence?
@@ -216,13 +216,13 @@ define([ magFilter = TextureMagnificationFilter.NEAREST; } - sampler = { + sampler = new Sampler({ wrapS : TextureWrap.CLAMP_TO_EDGE, wrapT : TextureWrap.CLAMP_TO_EDGE, minificationFilter : minFilter, magnificationFilter : magFilter, maximumAnisotropy : 1.0 - }; + }); } if (this._pixelDatatype === PixelDatatype.FLOAT) {
[No CFG could be retrieved]
The texture s sampler and the negativeY functions are defined by the constructor. Initializes the parameters of the texture.
Any idea why just don't have `_sampler` defined? It would clean up some logic here.
@@ -224,6 +224,16 @@ class User < ApplicationRecord end end + # TODO: (Alex Smith) - Adjust TTL and limit query size post Codeland + def cached_subscription_source_article_ids + cache_key = "user-#{id}-#{subscribed_to_user_subscriptions_count}/subscription_source_article_ids" + Rails.cache.fetch(cache_key, expires_in: 12.hours) do + subscribed_to_user_subscriptions. + where(user_subscription_sourceable_type: "Article"). + pluck(:user_subscription_sourceable_id) + end + end + # handles both old (prefer_language_*) and new (Array of language codes) formats def preferred_languages_array return @preferred_languages_array if defined?(@preferred_languages_array)
[User->[check_for_username_change->[path],send_welcome_notification->[send_welcome_notification],blocked_by?->[blocking?],auditable?->[any_admin?],blocking?->[blocking?],resave_articles->[path]]]
Returns an array of all followers and podcasts that are following this user.
Very open to changing the TTL. Just stuck with the `12.hours` trend. It'll be ignored anyway with `subscribed_to_user_subscriptions_count` changing.
@@ -16,6 +16,7 @@ */ package org.apache.hadoop.hdds.scm.ha; + /** * Interface for background services in SCM, including ReplicationManager, * SCMBlockDeletingService and BackgroundPipelineCreator.
[No CFG could be retrieved]
This class implements the logic for handling background events in a background service. Node handler triggered.
undo this change.
@@ -89,8 +89,15 @@ public class RedisProtocol extends AbstractProtocol { config.setTimeBetweenEvictionRunsMillis(url.getParameter("time.between.eviction.runs.millis", 0)); if (url.getParameter("min.evictable.idle.time.millis", 0) > 0) config.setMinEvictableIdleTimeMillis(url.getParameter("min.evictable.idle.time.millis", 0)); - final JedisPool jedisPool = new JedisPool(config, url.getHost(), url.getPort(DEFAULT_PORT), - url.getParameter(Constants.TIMEOUT_KEY, Constants.DEFAULT_TIMEOUT)); + if (StringUtils.isBlank(url.getPassword())) { + jedisPool = new JedisPool(config, url.getHost(), url.getPort(DEFAULT_PORT), + url.getParameter(Constants.TIMEOUT_KEY, Constants.DEFAULT_TIMEOUT), null, + url.getParameter("db.index", 0)); + } else { + jedisPool = new JedisPool(config, url.getHost(), url.getPort(DEFAULT_PORT), + url.getParameter(Constants.TIMEOUT_KEY, Constants.DEFAULT_TIMEOUT), url.getPassword(), + url.getParameter("db.index", 0)); + } final int expiry = url.getParameter("expiry", 0); final String get = url.getParameter("get", "get"); final String set = url.getParameter("set", Map.class.equals(type) ? "put" : "set");
[RedisProtocol->[getSerialization->[getParameter,getExtension],export->[UnsupportedOperationException,getUrl],refer->[doInvoke->[getMessage,equals,expire,serialize,ByteArrayOutputStream,warn,RpcResult,del,setCode,IllegalArgumentException,getArguments,writeObject,set,deserialize,ByteArrayInputStream,getResource,RpcException,close,getBytes,UnsupportedOperationException,getName,getMethodName,toByteArray,get,readObject],destroy->[getMessage,warn,destroy],JedisPool,getMessage,setMaxTotal,equals,setMinEvictableIdleTimeMillis,GenericObjectPoolConfig,setTestOnReturn,setMinIdle,setTestWhileIdle,RpcException,setTestOnBorrow,setTimeBetweenEvictionRunsMillis,getHost,setMaxWaitMillis,getName,getPort,setNumTestsPerEvictionRun,getParameter,setMaxIdle]]]
Create a new object pool with the specified properties. Invoke a method with the given arguments. This method is invoked by the Redis service when a delete method is invoked.
You can use StringUtils.isBlank(url.getPassword())? style to replace if style, reduce reduplicated codes.
@@ -53,6 +53,18 @@ func (r *Runtime) Log(containers []*Container, options *LogOptions, logChannel c // ReadLog reads a containers log based on the input options and returns loglines over a channel func (c *Container) ReadLog(options *LogOptions, logChannel chan *LogLine) error { + // TODO Skip sending logs until journald logs can be read + // TODO make this not a magic string + if c.LogDriver() == JournaldLogging { + if options.Follow { + return errors.Errorf("The follow option with journald logging is not currently supported") + } + return c.readFromJournal(options, logChannel) + } + return c.readFromLogFile(options, logChannel) +} + +func (c *Container) readFromLogFile(options *LogOptions, logChannel chan *LogLine) error { t, tailLog, err := getLogFile(c.LogPath(), options) if err != nil { // If the log file does not exist, this is not fatal.
[Since->[After],ReadLog->[LogPath,ID,IsNotExist,Wrapf,Done,Error,Cause,Partial,Since,Add],String->[Sprintf,Format],Log->[ReadLog],Wrapf,Join,ReadFile,Errorf,Parse,Partial,TailFile,Split]
ReadLog reads the log file for the container and sends it to the logChannel.
@baude We support follow with journald events, right? How bad would this be to add?
@@ -73,7 +73,9 @@ def download(url, target_full_path, md5sum): with open(target_full_path, 'wb') as fh: streamed_bytes = 0 for chunk in resp.iter_content(2 ** 14): - streamed_bytes += len(chunk) + # chunk could be the decompressed form of the real data + # but we want the exact number of bytes read till now + streamed_bytes = resp.raw.tell() try: fh.write(chunk) except IOError as e:
[download->[SingleThreadCondaSession,disable_ssl_verify_warning]]
Download a file from the given url and store it in target_path. Raise an exception if the URL is not available.
Does this need to be inside the for loop, or can we just call it once after the loop exits and right before line 89/91 if content_length and streamed_bytes != content_length: ? Does my question make sense?
@@ -382,12 +382,10 @@ namespace DSCore /// <returns name="list">List with items removed.</returns> /// <search>remove,index,indices,cull</search> public static IList RemoveItemAtIndex( - [ArbitraryDimensionArrayImport] IList list, - [ArbitraryDimensionArrayImport] object indices) + IList list, + int[] indices) { - return indices is ICollection - ? list.Cast<object>().Where((_, i) => !((IList)indices).Contains(i)).ToList() - : list.Cast<object>().Where((_, i) => i != (int)indices).ToList(); + return list.Cast<object>().Where((_, i) => !indices.Contains(i)).ToList(); } /// <summary>
[List->[Count->[Count],GetCombinations->[Singleton,GetCombinations],GetPermutations->[Singleton,GetPermutations],FilterByMaskHelper->[FilterByMaskHelper],LastItem->[Count],IList->[Count]]]
Removes the item at the specified index from the list.
indices parameter of method List.RemoveItemAtIndex is changed to int[] from object so that only integers are passed. VM will automatically promote single object to a collection while calling this method.
@@ -326,7 +326,7 @@ class Openmpi(AutotoolsPackage): # knem support was added in 1.5 conflicts('fabrics=knem', when='@:1.4') - conflicts('schedulers=slurm ~pmi', when='@1.5.4:', + conflicts('schedulers=slurm ~pmi ~legacylaunchers', when='@1.5.4:', msg='+pmi is required for openmpi(>=1.5.5) to work with SLURM.') conflicts('schedulers=loadleveler', when='@3.0.0:', msg='The loadleveler scheduler is not supported with '
[Openmpi->[setup_dependent_build_environment->[setup_run_environment],filter_rpaths->[filter_lang_rpaths],test->[_test_examples,_test_bin_ops,_test_check_versions]]]
This function returns a list of all possible versions of the NICs that are not compatible Determine the version of the from the OpenMPI output.
I don't understand the restriction here or configuration change below. The `legacylaunchers` option is just to remove the installed mpiexec/mpirun , not change the config in any other way.
@@ -286,7 +286,7 @@ namespace System version & 0xFF); } - private static readonly Lazy<bool> _net5CompatFileStream = new Lazy<bool>(() => GetStaticNonPublicBooleanPropertyValue("System.IO.FileStreamHelpers", "UseNet5CompatStrategy")); + private static readonly Lazy<bool> _net5CompatFileStream = new Lazy<bool>(() => GetStaticNonPublicBooleanPropertyValue("System.IO.Strategies.FileStreamHelpers", "UseNet5CompatStrategy")); public static bool IsNet5CompatFileStreamEnabled => _net5CompatFileStream.Value;
[PlatformDetection->[GetTls11Support->[OpenSslGetTlsSupport],GetTls13Support->[AndroidGetSslProtocolSupport],GetTls10Support->[OpenSslGetTlsSupport],GetStaticNonPublicBooleanPropertyValue]]
Get the ICU version.
Should we make this throw if the type can't be found?
@@ -337,7 +337,12 @@ func (a *apiServer) InspectBranchInTransaction(txnCtx *txncontext.TransactionCon func (a *apiServer) ListBranch(request *pfs.ListBranchRequest, srv pfs.API_ListBranchServer) (retErr error) { func() { a.Log(request, nil, nil, 0) }() defer func(start time.Time) { a.Log(request, nil, retErr, time.Since(start)) }(time.Now()) - return a.driver.listBranch(srv.Context(), request.Repo, request.Reverse, srv.Send) + if request.Repo == nil { + return a.driver.listBranch(srv.Context(), request.Reverse, srv.Send) + } + return a.txnEnv.WithReadContext(srv.Context(), func(txnCtx *txncontext.TransactionContext) error { + return a.driver.listBranchInTransaction(txnCtx, request.Repo, request.Reverse, srv.Send) + }) } // DeleteBranchInTransaction is identical to DeleteBranch except that it can run
[FinishCommit->[FinishCommit],SquashCommitSet->[SquashCommitSet],CreateFileSet->[modifyFile],CreateBranch->[CreateBranch],RunLoadTestDefault->[RunLoadTest],DeleteRepo->[DeleteRepo],InspectRepo->[InspectRepoInTransaction],Write->[Write],DeleteBranch->[DeleteBranch],RunLoadTest->[CreateBranch,CreateRepo],CreateRepo->[CreateRepo],StartCommit->[StartCommit]]
ListBranch - list branch.
I separated these two explicitly based on transaction compatibility, but I am happier to call out that this is a different path with potentially different auth requirements
@@ -152,7 +152,7 @@ if (isset($device['device_id']) && $device['device_id'] > 0) { $param = array($device['device_id']); } -$query = " FROM alert_rules $sql ORDER BY id ASC"; +$query = " FROM alert_rules $sql"; $count_query = $count_query.$query; $count = dbFetchCell($count_query, $param); if (!isset($_POST['page_number']) && $_POST['page_number'] < 1) {
[No CFG could be retrieved]
Displays a table of alert rules that match the given criteria. - > - > - > - > - > - > - > - > - >.
don't need ORDER BY for count query so moved to full query.
@@ -168,6 +168,8 @@ public abstract class KafkaSource<S, D> extends EventBasedSource<S, D> { private MetricContext metricContext; protected Optional<LineageInfo> lineageInfo; + @Setter + protected EventBus eventBus; private List<String> getLimiterExtractorReportKeys() { List<String> keyNames = new ArrayList<>();
[KafkaSource->[getWorkUnitForTopicPartition->[addDatasetUrnOptionally,getWorkUnitForTopicPartition],getFilteredTopics->[getFilteredTopics],getWorkunits->[getLimiterExtractorReportKeys,setLimiterReportKeyListToWorkUnits],addTopicSpecificPropsToWorkUnit->[addTopicSpecificPropsToWorkUnit],WorkUnitCreator->[run->[getWorkUnitsForTopic]],createEmptyWorkUnit->[getWorkUnitForTopicPartition]]]
This method returns the list of keys that can be used to report limiter extractor reports.
it sounds awkward to set this object from external.