patch stringlengths 18 160k | callgraph stringlengths 4 179k | summary stringlengths 4 947 | msg stringlengths 6 3.42k |
|---|---|---|---|
@@ -68,11 +68,15 @@ public class TryScope extends AbstractMessageProcessorOwner implements Scope {
return event;
}
final boolean txPrevoiuslyActive = isTransactionActive();
+ Transaction previousTx = TransactionCoordination.getInstance().getTransaction();
ExecutionTemplate<CoreEvent> executionTemplate =
createScopeTransactionalExecutionTemplate(muleContext, transactionConfig);
ExecutionCallback<CoreEvent> processingCallback = () -> {
- if (!txPrevoiuslyActive && isTransactionActive()) {
- TransactionAdapter transaction = (TransactionAdapter) TransactionCoordination.getInstance().getTransaction();
+ Transaction newTx = TransactionCoordination.getInstance().getTransaction();
+ // Whether there wasn't a tx and now there is, or if there is a newer one (if we have a nested tx, using xa)
+ // we must set the component location of this try scope
+ if ((!txPrevoiuslyActive && isTransactionActive()) || (txPrevoiuslyActive && previousTx != newTx)) {
+ TransactionAdapter transaction = (TransactionAdapter) newTx;
transaction.setComponentLocation(getLocation());
return processWithChildContextBlocking(event, nestedChain, ofNullable(getLocation()), messagingExceptionHandler);
} else {
| [TryScope->[stop->[stop],initialise->[initialise],dispose->[dispose],start->[start],apply->[apply]]] | Process the given event using the current context of the current transaction. | newTx -> currentTx |
@@ -52,6 +52,14 @@ env => {
serviceAdapter = new ServiceAdapter(subscriptionService);
});
+ describe('getEncryptedDocumentKey', () => {
+ it('should call getEncryptedDocumentKey of subscription service', () => {
+ const stub = sandbox.stub(subscriptionService, 'getEncryptedDocumentKey');
+ serviceAdapter.getEncryptedDocumentKey('serviceId');
+ expect(stub).to.be.calledOnce;
+ });
+ });
+
describe('getPageConfig', () => {
it('should call getPageConfig of subscription service', () => {
const stub = sandbox.stub(subscriptionService, 'getPageConfig')
| [No CFG could be retrieved] | The real win environment Delegate action to local service. | If we do go down this path, we should also check `calledWith('serviceId')` and that it returns the same value as the service returns. Otherwise, it's just a basic call propagation and doesn't have to be tested. |
@@ -152,7 +152,7 @@ class Task(Model):
finished_at = models.DateTimeField(null=True)
non_fatal_errors = JSONField(default=list)
- result = JSONField(null=True)
+ error = JSONField(null=True)
parent = models.ForeignKey("Task", null=True, related_name="spawned_tasks")
worker = models.ForeignKey("Worker", null=True, related_name="tasks")
| [ReservedResource->[ForeignKey,OneToOneField,TextField],TaskTag->[ForeignKey,TextField],WorkerManager->[get_unreserved_worker->[DoesNotExist,annotate,filter,Count]],TaskLock->[DateTimeField,TextField],Worker->[save_heartbeat->[save],DateTimeField,WorkerManager,TextField],Task->[set_failed->[save,exception_to_dict,now],set_running->[warning,save,now,_],set_completed->[warning,save,now,_],JSONField,DateTimeField,ForeignKey,UUIDField,TextField],getLogger] | set this task to the running state save it and log output in warning cases. | The Fields in the docstring need to be updated. |
@@ -1174,7 +1174,12 @@ namespace Microsoft.Xna.Framework
return matrix1;
}
-
+ /// <summary>
+ /// Divides the components of a <see cref="Matrix"/> by a scalar.
+ /// </summary>
+ /// <param name="matrix1">Source <see cref="Matrix"/>.</param>
+ /// <param name="divider">Divisor scalar.</param>
+ /// <param name="result">The result of dividing a matrix by a scalar as an output parameter.</param>
public static void Divide(ref Matrix matrix1, float divider, out Matrix result)
{
float num = 1f / divider;
| [Matrix->[GetHashCode->[GetHashCode],Equals->[Equals],CreateFromYawPitchRoll->[CreateFromQuaternion,CreateFromYawPitchRoll],CreateScale->[CreateScale],Add]] | Creates a new matrix that is the sum of all elements of the given matrix divided by the Creates a new matrix with all elements of the named order that are not missing. | Use `elements` instead of `components`. |
@@ -285,8 +285,15 @@ namespace System.Text.Json.Serialization
internal bool TryWriteDataExtensionProperty(Utf8JsonWriter writer, T value, JsonSerializerOptions options, ref WriteStack state)
{
+ if (!IsInternalConverter)
+ {
+ return TryWrite(writer, value, options, ref state);
+ }
+
Debug.Assert(this is JsonDictionaryConverter<T>);
+ state.Current.PolymorphicJsonPropertyInfo = state.Current.DeclaredJsonPropertyInfo!.RuntimeClassInfo.ElementClassInfo!.PropertyInfoForClassInfo;
+
if (writer.CurrentDepth >= options.EffectiveMaxDepth)
{
ThrowHelper.ThrowJsonException_SerializerCycleDetected(options.EffectiveMaxDepth);
| [JsonConverter->[TryWrite->[TryWriteAsObject,OnTryWrite],TryRead->[OnTryRead]]] | Try write data extension property. | Let's assert that `value` is not `null` at the start of this method. |
@@ -88,6 +88,7 @@ public class ContentProviderTest {
"pdsqoelhmemmmbwjunnu",
"scxipjiyozczaaczoawo",
"cmxieunwoogyxsctnjmv::abcdefgh::ZYXW",
+ "cmxieunwoogyxsctnjmv::INSBGDS",
};
private static final String TEST_MODEL_NAME = "com.ichi2.anki.provider.test.a1x6h9l";
private static final String[] TEST_MODEL_FIELDS = {"FRONTS","BACK"};
| [ContentProviderTest->[testSuspendCard->[getCol,getFirstCardFromScheduler],testInsertField->[getCol],getCol->[getCol],testQueryNextCard->[getCol],testQueryCardFromCertainDeck->[getCol],testUpdateTags->[getCol,getFirstCardFromScheduler],setUp->[getCol],tearDown->[getCol],reopenCol->[getCol],testQueryCertainDeck->[getCol],testBuryCard->[getCol,getFirstCardFromScheduler],testAnswerCard->[getCol,getFirstCardFromScheduler],testProviderProvidesDefaultForEmptyModelDeck->[getCol],testQueryAllDecks->[getCol],testInsertTemplate->[getCol]]] | A content provider test for the presence of a specific header field value. The initial capacity of the model. | Confirming: does this break the test when used in association with the previous pull request? |
@@ -385,5 +385,7 @@ public class NewSparkInterpreterTest {
}
})
);
+ context.setClient(mockRemoteEventClient);
+ return context;
}
}
| [NewSparkInterpreterTest->[testSparkInterpreter->[run->[assertTrue,interpret,code,printStackTrace,contains,assertEquals],assertTrue,getValue,size,getDelegation,flush,mock,Thread,start,getType,getDefaultValue,join,cancel,setInterpreterGroup,setProperty,code,getDisplayName,trim,sleep,getData,completion,isAlive,open,assertEquals,SparkInterpreter,interpret,getOptions,getName,getProgress,get,Properties,contains,getInterpreterContext],testDependencies->[assertTrue,getDelegation,mock,openStream,getAbsolutePath,URL,setInterpreterGroup,setProperty,code,File,transferFrom,open,assertEquals,SparkInterpreter,interpret,FileOutputStream,newChannel,Properties,getInterpreterContext],tearDown->[close],getInterpreterContext->[onAppend->[getData,printStackTrace],InterpreterContext,AngularObjectRegistry,GUI,AuthenticationInfo,InterpreterOutputListener,InterpreterOutput]]] | get interpreter context. | Is this a good idea to have a similar test for OldSparkInterpreter too? |
@@ -15,11 +15,7 @@
*/
package com.alibaba.dubbo.examples.annotation.api;
-/**
- * AsyncService
- *
- * @author william.liangf
- */
+
public interface AnnotationService {
String sayHello(String name);
| [No CFG could be retrieved] | Say hello. | only remove author info |
@@ -1715,7 +1715,7 @@ dss_srv_init()
dss_register_key(&daos_srv_modkey);
xstream_data.xd_init_step = XD_INIT_REG_KEY;
- rc = bio_nvme_init(dss_storage_path, dss_nvme_conf, dss_nvme_shm_id);
+ rc = bio_nvme_init(dss_storage_path, dss_nvme_conf, dss_nvme_shm_id, dss_nvme_mem_size);
if (rc != 0)
D_GOTO(failed, rc);
xstream_data.xd_init_step = XD_INIT_NVME;
| [No CFG could be retrieved] | function to initialize the xstream takes all the data from the DSS and creates the necessary state. | (style) line over 80 characters |
@@ -39,7 +39,7 @@ class GobiertoBudgets::BudgetLineIntegrationTest < ActionDispatch::IntegrationTe
end
def test_invalid_budget_line_url
- with_current_site(site) do
+ with_each_current_site(placed_site, organization_site) do |site|
visit gobierto_budgets_budget_line_path("1", last_year, GobiertoBudgets::EconomicArea.area_name, "foo")
assert_equal 400, status_code
| [test_invalid_budget_line_url->[visit,assert_equal,gobierto_budgets_budget_line_path,with_current_site,area_name],site->[sites],test_metric_boxes->[visit,all?,text,has_css?,assert,with_current_site],last_year->[last],setup->[gobierto_budgets_budget_line_path,area_name],test_budget_line_information->[visit,has_content?,with_current_site,assert],require] | Checks if there is a 404 invalid budget line url. | Unused block argument - site. You can omit the argument if you don't care about it. |
@@ -353,6 +353,7 @@ func (c *Config) Adjust(meta *toml.MetaData) error {
if meta == nil || !meta.IsDefined("enable-prevote") {
c.PreVote = true
}
+ c.PDServerCfg.EnableRegionStorage = true
return nil
}
| [Parse->[Parse],adjust->[validate],Adjust->[validate,Parse],Parse] | Adjust applies the default values to the configuration. adjusts the configuration parameters to match the default values. | Can we provide an option for this? |
@@ -298,12 +298,16 @@ func GetOwnedOrgsByUserIDDesc(userID int64, desc string) ([]*User, error) {
// GetOrgUsersByUserID returns all organization-user relations by user ID.
func GetOrgUsersByUserID(uid int64, all bool) ([]*OrgUser, error) {
ous := make([]*OrgUser, 0, 10)
- sess := x.Where("uid=?", uid)
+ sess := x.
+ Join("LEFT", "user", `"org_user".org_id="user".id`).
+ Where(`"org_user".uid=?`, uid)
if !all {
// Only show public organizations
sess.And("is_public=?", true)
}
- err := sess.Find(&ous)
+ err := sess.
+ Asc("`user`.name").
+ Find(&ous)
return ous, err
}
| [GetUserRepositories->[GetUserTeamIDs],GetUserMirrorRepositories->[GetUserTeamIDs],GetUserTeamIDs->[getUserTeams],GetTeams->[getTeams],GetUserTeams->[getUserTeams],GetTeam->[getTeam],getOwnerTeam->[getTeam],GetOwnerTeam->[getOwnerTeam],RemoveOrgRepo->[removeOrgRepo],GetOwnerTeam] | GetOrgUsersByUserIDDesc returns a list of organizations owned by given user ID ordered descending ChangeOrgUserStatus changes public or private membership status. | No JOIN should be needed here, as user id is already in the OrgUser table ? |
@@ -346,7 +346,7 @@ class Options(object):
for (_, kwargs) in sorted(parser.option_registrations_iter()):
if kwargs.get('recursive', False) and not kwargs.get('recursive_root', False):
continue # We only need to fprint recursive options once.
- if bool(invert) == bool(kwargs.get(fingerprint_key, False)):
+ if kwargs.get(fingerprint_key, fingerprint_default) is not True:
continue
# Note that we read the value from scope, even if the registration was on an enclosing
# scope, to get the right value for recursive options (and because this mirrors what
| [Options->[for_global_scope->[for_scope],drop_flag_values->[Options],create->[OptionTrackerRequiredError,complete_scopes],registration_function_for_optionable->[register->[register]],for_scope->[for_scope],__getitem__->[for_scope],get_fingerprintable_for_scope->[for_scope,passthru_args_for_scope]]] | Returns a list of fingerprintable options for the given scope. Returns a list of tuples of tuples where the first tuple is the key and the second is. | I think this is >10x more readable and will hopefully avoid future confusion about the `invert` argument. |
@@ -0,0 +1,16 @@
+from django.db import models
+
+from ..core.permissions import ChannelPermission
+
+
+class Channel(models.Model):
+ name = models.CharField(max_length=250)
+ slug = models.SlugField(max_length=255, unique=True)
+ currencyCode = models.CharField(max_length=128)
+
+ class Meta:
+ ordering = ("slug",)
+ app_label = "channel"
+ permissions = (
+ (ChannelPermission.MANAGE_CHANNELS.codename, "Manage channels.",),
+ )
| [No CFG could be retrieved] | No Summary Found. | It should be in snake case: `currency_code` |
@@ -175,7 +175,14 @@ async function confirmTransfer(transfer, user) {
* @returns {Promise<String>} Hash of the transaction
*/
async function executeTransfer(transfer, transferTaskId, token) {
- const user = await hasBalance(transfer.userId, transfer.amount, transfer.id)
+ const balance = await getBalance(transfer.userId)
+ // Subtract the current transfer amount from the available balance because
+ // it is what we are transferring
+ if (BigNumber(transfer.amount).gt(balance.minus(transfer.amount))) {
+ throw new RangeError(
+ `Amount of ${transfer.amount} OGN exceeds the ${balance} available for executing transfer for user ${transfer.userId}`
+ )
+ }
await transfer.update({
status: enums.TransferStatuses.Processing,
| [No CFG could be retrieved] | Executes a single token transfer Update the status of the crediting token and send a transaction to the user. | same question regarding locking user. |
@@ -284,6 +284,8 @@ public class DataSourceUtils {
props.getString(DataSourceWriteOptions.HIVE_PASS_OPT_KEY(), DataSourceWriteOptions.DEFAULT_HIVE_PASS_OPT_VAL());
hiveSyncConfig.jdbcUrl =
props.getString(DataSourceWriteOptions.HIVE_URL_OPT_KEY(), DataSourceWriteOptions.DEFAULT_HIVE_URL_OPT_VAL());
+ hiveSyncConfig.hiveMetaStoreUri =
+ props.getString(DataSourceWriteOptions.HIVE_URL_OPT_KEY(), DataSourceWriteOptions.DEFAULT_HIVE_METASTORE_URI_OPT_VAL());
hiveSyncConfig.partitionFields =
props.getStringList(DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY(), ",", new ArrayList<>());
hiveSyncConfig.partitionValueExtractorClass =
| [DataSourceUtils->[createHoodieRecord->[createPayload],buildHiveSyncConfig->[checkRequiredProperties],getTablePath->[getTablePath],dropDuplicates->[dropDuplicates],createHoodieClient->[createHoodieConfig],doWriteOperation->[createUserDefinedBulkInsertPartitioner],getCommitActionType->[getCommitActionType]]] | Build HiveSyncConfig from typed properties. HiveSyncConfig. | is this not "HIVE_METASTORE_URI_OPT_VAL()" ? |
@@ -239,9 +239,8 @@ namespace System.Runtime.InteropServices.JavaScript
if (IDFromJSOwnedObject.TryGetValue(o, out result))
return result;
- result = NextJSOwnedObjectID++;
+ result = (int)(IntPtr)GCHandle.Alloc(o, GCHandleType.Normal);
IDFromJSOwnedObject[o] = result;
- JSOwnedObjectFromID[result] = o;
return result;
}
}
| [Runtime->[DateTime->[DateTime],InvokeJS->[InvokeJS],SetupJSContinuation->[FreeObject],IntPtr->[SafeHandleAddRef],DumpAotProfileData->[DumpAotProfileData],New->[New],SafeHandleReleaseByHandle->[SafeHandleRelease],CompileFunction->[CompileFunction],GetGlobalObject->[GetGlobalObject]]] | GetJSOwnedObjectHandle - gets the object handle. | I don't think this is valid, afaik there's no guarantee that a GCHandle's id will not be re-used |
@@ -22,12 +22,13 @@ class GCSResultHandler(ResultHandler):
def __init__(self, bucket: str = None) -> None:
self.client = storage.Client()
+ self._bucket = bucket # used for serialization
self.bucket = self.client.bucket(bucket)
super().__init__()
def write(self, result: Any) -> str:
"""
- Given a result, writes the result and writes to a location in GCS
+ Given a result, writes the result to a location in GCS
and returns the resulting URI.
Args:
| [GCSResultHandler->[read->[debug,error,format,blob,b64decode,loads],__init__->[bucket,super,Client],write->[debug,uuid4,now,b64encode,format,blob]]] | Initialize the object with a client and a bucket. | Suggestion: store the string as the full `self.bucket` attribute (which will automatically work with serialization and not require overriding `create_object`) and dynamically create the `bucket` EITHER as `self.gcs_bucket` in `__init__` OR dynamically in each of the instance methods, since I don't think GCS really does any work when the bucket object is created (it's not sent to GCP to confirm it exists, for example) so it's a very low-overhead call |
@@ -142,6 +142,8 @@ public interface ComponentMapper {
void resetBChangedForRootComponentUuid(@Param("projectUuid") String projectUuid);
+ void setPrivateForRootComponentUuid(@Param("projectUuid") String projectUuid, @Param("isPrivate") boolean isPrivate);
+
void delete(long componentId);
void updateTags(ComponentDto component);
| [No CFG could be retrieved] | This method resets b - changed flag for root component. | I would prefer `componentUuid`, if it is not only relevant for qualifier `TRK`. |
@@ -75,7 +75,7 @@ namespace System.Text.Json.Serialization
}
// Provide a default implementation for value converters.
- internal virtual bool OnTryRead(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options, ref ReadStack state, out T value)
+ internal virtual bool OnTryRead(ref Utf8JsonReader reader, Type typeToConvert, JsonSerializerOptions options, ref ReadStack state, [MaybeNullWhen(false)] out T value)
{
value = Read(ref reader, typeToConvert, options);
return true;
| [JsonConverter->[TryWrite->[TryWriteAsObject,OnTryWrite],TryRead->[OnTryRead]]] | OnTryRead is called when a try - read operation is successful. | I don't know if this is accurate. Why was this annotation necessary? - `T` could be null when true (imagine the payload contains `{"foo": null}`) - `T` could also be non-null when returning true (payload is `{"foo": "hello"}`) - `T` could be null when false **Questions:** - I don't know whether `T` can be non-null when returning false or if it is guaranteed to be null when returning false - I don't know when `OnTryRead` is intended to return false. @steveharter - what is the intent here of this design and how do components that override this virtual behave? |
@@ -7606,7 +7606,7 @@ void wallet2::get_outs(std::vector<std::vector<tools::wallet2::get_outs_entry>>
// check whether we're shortly after the fork
uint64_t height;
boost::optional<std::string> result = m_node_rpc_proxy.get_height(height);
- throw_on_rpc_response_error(result, "get_info");
+ THROW_WALLET_EXCEPTION_IF(result, error::wallet_internal_error, "Failed to get height");
bool is_shortly_after_segregation_fork = height >= segregation_fork_height && height < segregation_fork_height + SEGREGATION_FORK_VICINITY;
bool is_after_segregation_fork = height >= segregation_fork_height;
| [No CFG could be retrieved] | Find random output keys for the requested amount. missing public key. | Why ignore error cases? |
@@ -172,16 +172,13 @@ class TagManager implements TagManagerInterface
$tag = $this->tagRepository->createNew();
}
- $user = $this->userRepository->findUserById($userId);
-
// update data
$tag->setName($name);
- $tag->setChanger($user);
if (!$id) {
- $tag->setCreator($user);
$this->em->persist($tag);
}
+
$this->em->flush();
return $tag;
| [TagManager->[findOrCreateByName->[findByName],resolveTagIds->[findById],resolveTagNames->[findByName]]] | Save a tag. | Just a small reminder for me: Test if this still works. |
@@ -222,6 +222,18 @@ public class SegmentMetadataQueryQueryToolChest extends QueryToolChest<SegmentAn
}
};
}
+
+ @Override
+ public Function<SegmentAnalysis, SegmentAnalysis> prepareForResultLevelCache()
+ {
+ return prepareForCache();
+ }
+
+ @Override
+ public Function<SegmentAnalysis, SegmentAnalysis> pullFromResultLevelCache()
+ {
+ return pullFromCache();
+ }
};
}
| [SegmentMetadataQueryQueryToolChest->[makeMetrics->[makeMetrics],getCacheStrategy->[getCacheObjectClazz->[getResultTypeReference]]]] | Returns a cache strategy that caches the given segments. Checks if the target interval overlaps with the current interval. | can we make these default implementations in CacheStrategy interface itself , so that they are not repeated everywhere ? |
@@ -759,11 +759,12 @@ class ostatus {
$contact = q("SELECT `id`, `rel`, `network` FROM `contact` WHERE `uid` = %d AND `nurl` = '%s' AND `network` != '%s'",
$uid, normalise_link($actor), NETWORK_STATUSNET);
- if (!$contact)
+ if (!dbm::is_result($contact)) {
$contact = q("SELECT `id`, `rel`, `network` FROM `contact` WHERE `uid` = %d AND `alias` IN ('%s', '%s') AND `network` != '%s'",
$uid, $actor, normalise_link($actor), NETWORK_STATUSNET);
+ }
- if ($contact) {
+ if (dbm::is_resul($contact)) {
logger("Found contact for url ".$actor, LOGGER_DEBUG);
$details["contact_id"] = $contact[0]["id"];
$details["network"] = $contact[0]["network"];
| [ostatus->[add_author->[appendChild,createElement],follow_entry->[get_hostname,appendChild],entry_header->[setAttribute,createElementNS,createElement,appendChild],add_header->[setAttribute,createElementNS,appendChild],source_entry->[createElement],like_entry->[appendChild,createElement],fetchauthor->[item,query],salmon->[saveXML,appendChild],reshare_entry->[appendChild,createElement],feed->[saveXML,appendChild],import->[item,registerNamespace,query,loadXML],add_person_object->[appendChild,createElement],salmon_author->[registerNamespace,query,loadXML]]] | Get the details of an actor. | You forgot a "t". |
@@ -2186,13 +2186,17 @@ def _write_raw(fname, raw, info, picks, fmt, data_type, reset_range, start,
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp)
# previous file name and id
+ if split_naming == 'elekta':
+ data = part_idx - 1
+ else:
+ data = part_idx - 2
if part_idx > 0 and prev_fname is not None:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, info['meas_id'])
- write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1)
+ write_int(fid, FIFF.FIFF_REF_FILE_NUM, data)
end_block(fid, FIFF.FIFFB_REF)
pos_prev = fid.tell()
| [_start_writing_raw->[append],ToDataFrameMixin->[to_data_frame->[_get_check_picks]],_check_update_montage->[append],BaseRaw->[notch_filter->[notch_filter],apply_function->[_check_fun],_preload_data->[_read_segment],crop->[_update_times,set_annotations],__setitem__->[_parse_get_set_params],resample->[_update_times,resample],append->[_read_segment,_update_times,append,set_annotations],estimate_rank->[time_as_index],apply_hilbert->[apply_function],__getitem__->[_read_segment,_parse_get_set_params],save->[time_as_index]],_write_raw->[close,_write_raw]] | Internal function to write a raw file with a . Write out a in the file. Write a chunk of data from the file. End of sequence of sequence of sequence of file number and index of next sequence of file number. | data is not a great name for an "int" can it be a name based on part_idx ? part_idx_tag ? |
@@ -569,8 +569,10 @@ func (fs *FS) ensureParentDir(filename string) error {
return nil
}
-func (fs *FS) requireNonEmpty() error {
- if fs.empty {
+func (fs *FS) checkEmpty(notExistErrIfEmpty bool) error {
+ if fs.empty && notExistErrIfEmpty {
+ return os.ErrNotExist
+ } else if fs.empty {
return errors.New("Not supported for an empty TLF")
}
return nil
| [SyncAll->[requireNonEmpty,SyncAll],readDir->[makeFileInfo],Lchown->[PathForLogging],Chown->[PathForLogging],Chtimes->[requireNonEmpty,PathForLogging,lookupOrCreateEntry],Open->[OpenFile],Symlink->[requireNonEmpty,PathForLogging,lookupParent,ensureParentDir],Create->[OpenFile],Readlink->[requireNonEmpty,PathForLogging,lookupParent],ToHTTPFileSystem->[WithContext],Root->[Join],ChrootAsLibFS->[requireNonEmpty,PathForLogging,lookupOrCreateEntry,Join],OpenFile->[requireNonEmpty,PathForLogging,lookupOrCreateEntry,ensureParentDir],lookupOrCreateEntry->[PathForLogging,lookupParent,lookupOrCreateEntryNoFollow],Chroot->[ChrootAsLibFS],Rename->[requireNonEmpty,Rename,mkdirAll,PathForLogging,lookupParent],Join->[Join],mkdirAll->[lookupParentWithDepth],SubscribeToObsolete->[requireNonEmpty],ensureParentDir->[mkdirAll],Lstat->[requireNonEmpty,makeFileInfo,Stat,PathForLogging,lookupParent],MkdirAll->[requireNonEmpty,PathForLogging,mkdirAll],lookupOrCreateEntryNoFollow->[lookupOrCreateEntryNoFollow],Stat->[requireNonEmpty,PathForLogging,makeFileInfo,lookupOrCreateEntry],lookupParentWithDepth->[lookupParentWithDepth],TempFile->[requireNonEmpty,Join,OpenFile],Remove->[requireNonEmpty,PathForLogging,lookupParent],Chmod->[requireNonEmpty,PathForLogging,lookupOrCreateEntry],lookupParent->[lookupParentWithDepth],ReadDir->[requireNonEmpty,PathForLogging,lookupOrCreateEntry,readDir]] | ensureParentDir creates the parent directory for filename. | I'd just call this field `errIfEmpty`. |
@@ -47,6 +47,9 @@ public class JpaOutboundGateway extends AbstractReplyProducingMessageHandler {
private final JpaExecutor jpaExecutor;
private OutboundGatewayType gatewayType = OutboundGatewayType.UPDATING;
private boolean producesReply = true; //false for outbound-channel-adapter, true for outbound-gateway
+ private EvaluationContext evaluationContext;
+ private Expression firstRecordExpression;
+
/**
* Constructor taking an {@link JpaExecutor} that wraps all JPA Operations.
| [JpaOutboundGateway->[handleRequestMessage->[IllegalArgumentException,equals,poll,build,format,executeOutboundJpaOperation],onInit->[setBeanFactory,onInit,getBeanFactory],setGatewayType->[notNull],notNull]] | Provides a way to create a Jpa outbound gateway. Executes the JPA operation on the specified request message. | Why hasn't been this property moved to `JpaExecutor`? To be together with `maxNumberOfResults` Anyway it should be `volatile` and it would better to make his name more consistent - **firstResultExpression** |
@@ -65,12 +65,10 @@ func (a keystonePasswordAuthenticator) AuthenticatePassword(username, password s
}
client.HTTPClient = *a.client
- err = openstack.AuthenticateV3(client, opts)
+ err = openstack.AuthenticateV3(client, &opts, gophercloud.EndpointOpts{})
if err != nil {
- if responseErr, ok := err.(*gophercloud.UnexpectedResponseCodeError); ok {
- if responseErr.Actual == 401 {
- return nil, false, nil
- }
+ if _, ok := err.(gophercloud.ErrDefault401); ok {
+ return nil, false, nil
}
glog.Warningf("Failed: Calling openstack AuthenticateV3: %v", err)
return nil, false, err
| [AuthenticatePassword->[UserFor,AuthenticateV3,Infof,Warningf,V,Errorf,NewClient,HandleError,Stack,NewDefaultUserIdentityInfo]] | AuthenticatePassword authenticates a user using the keystone password authentication scheme. | Do any of the options need to make it into EndpointOpts, or is empty always appropriate? |
@@ -5,9 +5,12 @@ class ImageUploadsController < ApplicationController
def create
authorize :image_upload
+ raise "too many uploads" if RateLimitChecker.new(current_user).limit_by_situation("image_upload")
+
uploader = ArticleImageUploader.new
begin
uploader.store!(params[:image])
+ limit_uploads
rescue CarrierWave::IntegrityError => e # client error
respond_to do |format|
format.json { render json: { error: e.message }, status: :unprocessable_entity }
| [ImageUploadsController->[create->[new,respond_to,render,authorize,store!,message,json,url,cloud_cover_url],before_action,after_action]] | Creates a new unique identifier for the current user. | This error shows up in the logs but isn't passed over to the client. |
@@ -89,6 +89,18 @@ func (s *PublicPoolService) SendRawTransaction(
return tx.Hash(), nil
}
+func (s *PublicPoolService) verifyChainID(tx *types.Transaction) error {
+ nodeChainID := s.hmy.ChainConfig().ChainID
+
+ if tx.ChainID().Cmp(nodeconfig.GetDefaultConfig().GetNetworkType().ChainConfig().EthCompatibleChainID) == -1 && tx.ChainID().Cmp(nodeChainID) != 0 {
+ return errors.Wrapf(
+ ErrInvalidChainID, "blockchain chain id:%s, given %s", nodeChainID.String(), tx.ChainID().String(),
+ )
+ }
+
+ return nil
+}
+
// SendRawStakingTransaction will add the signed transaction to the transaction pool.
// The sender is responsible for signing the transaction and using the correct nonce.
func (s *PublicPoolService) SendRawStakingTransaction(
| [GetPoolStats->[GetPoolStats],GetCurrentTransactionErrorSink->[GetCurrentTransactionErrorSink],GetPendingCXReceipts->[GetPendingCXReceipts],GetCurrentStakingErrorSink->[GetCurrentStakingErrorSink]] | SendRawTransaction sends a raw transaction to a single recipient or a single recipient SendRawStakingTransaction sends a raw staking transaction to the chain. | same here, can check exactly chainID with != 0 |
@@ -39,6 +39,8 @@ import org.wildfly.security.x500.cert.X509CertificateBuilder;
* @since 10.0
**/
public abstract class AbstractInfinispanServerDriver implements InfinispanServerDriver {
+ public static final String DEFAULT_CLUSTERED_INFINISPAN_CONFIG_FILE_NAME = "infinispan.xml";
+
public static final String TEST_HOST_ADDRESS = "org.infinispan.test.host.address";
public static final String BASE_DN = "CN=%s,OU=Infinispan,O=JBoss,L=Red Hat";
public static final String KEY_PASSWORD = "secret";
| [AbstractInfinispanServerDriver->[createKeyStores->[getCertificateFile],stop->[stop],start->[start],createServerHierarchy->[createServerHierarchy],createSelfSignedCertificate->[getCertificateFile],createSignedCertificate->[getCertificateFile]]] | Creates an instance of the AbstractInfinispanServerDriver. This is a public method for the base class. | Is this still necessary? |
@@ -48,8 +48,11 @@ import org.apache.gobblin.metrics.event.TimingEvent;
* A FileSystem based implementation of {@link JobStatusRetriever}. This implementation stores the job statuses
* as {@link org.apache.gobblin.configuration.State} objects in a {@link FsStateStore}.
* The store name is set to flowGroup.flowName, while the table name is set to flowExecutionId.jobGroup.jobName.
+ *
+ * This retriever is deprecated in favour of {@link MysqlJobStatusRetriever}.
*/
@Slf4j
+@Deprecated
public class FsJobStatusRetriever extends JobStatusRetriever {
public static final String CONF_PREFIX = "fsJobStatusRetriever";
| [FsJobStatusRetriever->[getLatestExecutionIdsForFlow->[limit,checkArgument,copyOf,getTableNames,jobStatusStoreName,descendingSet],getJobStatusesForFlowExecution->[jobStatusTableName,singletonIterator,error,checkArgument,getAll,getJobStatus,get,getTableNames,iterator,isEmpty,shouldFilterJobStatus,add,startsWith,jobStatusStoreName,valueOf,emptyIterator],shouldFilterJobStatus->[get,size,equals],createStateStore,getConfig]] | This class is used to retrieve the job statuses for a given flow execution. Get all the job statuses for the given flow. | I don't this should be deprecated. We should support both JobStatusRetriever implementations. |
@@ -229,14 +229,13 @@ def _delay_auto_approval_indefinitely(version):
defaults={'auto_approval_delayed_until': datetime.max})
-def run_action(version_id):
+def run_action(version):
"""This function tries to find an action to execute for a given version,
based on the scanner results and associated rules.
It is not run as a Celery task but as a simple function, in the
auto_approve CRON."""
- log.info('Checking rules and actions for version %s.', version_id)
- version = Version.objects.get(pk=version_id)
+ log.info('Checking rules and actions for version %s.', version.pk)
rule = (
ScannerRule.objects.filter(
| [_delay_auto_approval->[_flag_for_human_review],run_customs->[run_scanner],_delay_auto_approval_indefinitely->[_flag_for_human_review],run_wat->[run_scanner]] | This function tries to find an action to execute for a given version. It will also check. | Do we really need to re-fetch the object then? |
@@ -57,6 +57,14 @@ namespace Dynamo.Wpf.Extensions
get { return dynamoViewModel.PackageManagerClientViewModel; }
}
+ /// <summary>
+ /// A reference to list of Local loaded packages
+ /// </summary>
+ public System.Collections.Generic.IEnumerable<Package> LocalPackages
+ {
+ get { return dynamoViewModel.Model.GetPackageManagerExtension().PackageLoader.LocalPackages; }
+ }
+
/// <summary>
/// A reference to the Dynamo Window object. Useful for correctly setting the parent of a
/// newly created window.
| [ViewLoadedParams->[AddMenuItem->[AddItemToMenu],AddItemToMenu->[Insert,Count,Add,SearchForMenuItem],OnSelectionCollectionChanged->[SelectionCollectionChanged],AddSeparator->[AddItemToMenu],MenuItem->[ToDisplayString,ToString,Items,First],AddToExtensionsSideBar->[ExtensionAlreadyPresent,AddTabItem,ExtensionAdded,Log],BackgroundPreviewViewModel,Factory,CollectionChanged,titleBar,PackageManagerClientViewModel,Model]] | Creates a new view object that can be used to create a new view object. Extension object that is being added to the extensions side bar. | I find this super weird - Can you avoid adding anything here for now and lets just get the ExtensionsManager and then the PackageManagerExtension to avoid polluting this class until we have a plan? |
@@ -206,7 +206,9 @@ texinfo_documents = [
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
+ 'torch': ('https://pytorch.org/docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
+ 'PIL': ('https://pillow.readthedocs.io/en/stable/', None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
| [setup->[connect],inject_minigalleries->[append,split],patched_make_field->[handle_item->[astext,make_xrefs,pop,extend,len,paragraph,literal_strong,replace,isinstance,join,Text],list_item,handle_item,list_type,field_body,len,field,field_name],get_html_theme_path] | One entry per manual page output --------------------------------------- The type of the field is the type of the field. | Could we also add matplotlib here? |
@@ -37,6 +37,8 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+WORKUNIT_TY = Dict[str, Any]
+
@dataclass(frozen=True)
class ExecutionRequest:
| [Scheduler->[_to_type->[_to_id],rule_graph_visualization->[visualize_rule_graph_to_file],graph_len->[graph_len],visualize_graph_to_file->[_raise_or_return],poll_workunits->[_from_value],visualize_rule_graph_to_file->[_raise_or_return],visualize_rule_subgraph_to_file->[_to_ids_buf,_to_id,_raise_or_return],add_root_selection->[_to_vals_buf,_to_type,_to_params_list,_raise_or_return],_metrics->[_from_value],lease_files_in_graph->[lease_files_in_graph],new_session->[new_session],_run_and_return_roots->[ExecutionError,_from_value],rule_subgraph_visualization->[visualize_rule_subgraph_to_file],check_invalidation_watcher_liveness->[check_invalidation_watcher_liveness],garbage_collect_store->[garbage_collect_store],graph_trace->[graph_trace],_register_task->[add_get_edge->[_to_type],_to_key,add_get_edge,_to_type]],SchedulerSession->[lease_files_in_graph->[lease_files_in_graph],execute->[_run_and_return_roots,_maybe_visualize,graph_len],garbage_collect_store->[garbage_collect_store],merge_directories->[_to_value,_DirectoryDigests,merge_directories,_raise_or_return],_trace_on_error->[trace,ExecutionError],node_count->[graph_len],metrics->[_metrics],run_goal_rule->[_trace_on_error,execution_request,visualize_rule_subgraph_to_file,execute,_to_params_list],run_local_interactive_process->[run_local_interactive_process,_to_value,_raise_or_return],trace->[graph_trace],invalidate_files->[invalidate_files],graph_len->[graph_len],visualize_graph_to_file->[visualize_graph_to_file],poll_workunits->[poll_workunits],materialize_directories->[materialize_directories,_DirectoriesToMaterialize,_to_value,_raise_or_return],_maybe_visualize->[visualize_graph_to_file],execution_request_literal->[ExecutionRequest,add_root_selection],product_request->[execute,execution_request,_trace_on_error,ExecutionError],execution_request->[execution_request_literal],visualize_rule_graph_to_file->[visualize_rule_graph_to_file],capture_snapshots->[_PathGlobsAndRootCollection,capture_snapshots,_to_value,_raise_or_return],invalidate_all_files->[invalidate_all_files]]] | Creates a new object of type type id. Initialize a new instance of the RuleSet class. | What does TY mean? Also, normally with MyPy type aliases, you use PascalCase. |
@@ -905,6 +905,7 @@ class WPSEO_Metabox extends WPSEO_Meta {
'worker' => $worker_script_data,
'estimatedReadingTimeEnabled' => $this->estimated_reading_time_conditional->is_met(),
],
+ 'dismissedAlerts' => $dismissed_alerts,
];
if ( post_type_supports( get_post_type(), 'thumbnail' ) ) {
| [WPSEO_Metabox->[enqueue->[current_post_type_has_taxonomies,display_metabox,determine_scope,get_metabox_script_data],get_recommended_replace_vars->[get_metabox_post],get_replace_vars->[get_metabox_post]]] | Enqueue metaboxes for the current page. Adds the script and styles to the administration panel. This function is used to build the script data for the shortcodes. - - - - - - - - - - - - - - - - - -. | `wpseoScriptData` is also loaded in other contexts (Elementor, taxonomies and config). Should this data be added there too? (your reducer would now produce an error already in Elementor without that safety check ) |
@@ -216,7 +216,16 @@ public class NameUtils
return false;
}
- public static String getGlobalPojoTypeName(DataType type)
+ /**
+ * Returns the name of the give top level {@code type}. If the
+ * {@code type}'s {@link DataType#getRawType()} contains the {@link Alias}
+ * annotation, then the {@link Alias#value()} is returned. Otherwise, the raw
+ * type's {@link Class#getName()} is returned
+ *
+ * @param type the {@link DataType} which name you want
+ * @return the name for the given {@code type}
+ */
+ public static String getTopLevelTypeName(DataType type)
{
Alias alias = type.getRawType().getAnnotation(Alias.class);
String name = alias != null ? alias.value() : type.getRawType().getSimpleName();
| [NameUtils->[irregular->[singular,plural],getGlobalPojoTypeName->[hyphenize]]] | Checks if a word is uncountable. | Add "<p\>" before "If the..." |
@@ -255,6 +255,10 @@ func PGPKeyRawToArmored(raw []byte, priv bool) (ret string, err error) {
return
}
+func (k *PGPKeyBundle) SerializePrivate(w io.Writer) error {
+ return k.Entity.SerializePrivate(w, &packet.Config{ReuseSignaturesOnSerialize: !k.Generated})
+}
+
func (k *PGPKeyBundle) EncodeToStream(wc io.WriteCloser, private bool) error {
// See Issue #32
which := "PUBLIC"
| [VerifyString->[VerifyStringAndExtract],CheckFingerprint->[Eq,GetFingerprint],ToIDString->[String],KeysByIdUsage->[toList,KeysByIdUsage],GetFingerprintP->[GetFingerprint],Match->[String],EncodeToStream->[Encode],KeysById->[toList,KeysById],DecryptionKeys->[toList,DecryptionKeys],HumanDescription->[ToKeyID,GetFingerprint],Unlock->[unlockAllPrivateKeys,VerboseDescription],ToKeyValuePair->[ToIDString],ToDisplayString->[ToKeyID,String],ToQuads->[String],StoreToLocalDb->[String],Encode,String] | EncodeToStream encodes the PGP key bundle to the given writer. | Out of curiosity, why does the generated case make more signatures during serialization? |
@@ -4,6 +4,17 @@ class ServiceProviderSessionDecorator
DEFAULT_LOGO = 'generic.svg'.freeze
+ SP_ALERTS = {
+ 'CBP Trusted Traveler Programs' => {
+ i18n_name: 'trusted_traveler',
+ learn_more: 'https://login.gov/help/trusted-traveler-programs/sign-in-doesnt-work/',
+ },
+ 'USAJOBS' => {
+ i18n_name: 'usa_jobs',
+ learn_more: 'https://login.gov/help/',
+ },
+ }.freeze
+
def initialize(sp:, view_context:, sp_session:, service_provider_request:)
@sp = sp
@view_context = view_context
| [ServiceProviderSessionDecorator->[request_url->[url],sp_name->[friendly_name,agency],cancel_link_url->[sign_up_start_url],verification_method_choice->[t],return_to_service_provider_partial->[present?],sp_logo->[logo],sp_agency->[friendly_name,agency],sp_return_url->[present?,return_to_sp_url,decline_redirect_uri,valid?],openid_connect_redirector->[from_request_url],return_to_sp_from_start_page_partial->[present?],new_session_heading->[t],attr_reader,freeze,include,url_helpers]] | Initializes the object with the values specified. | Are we planning on creating a new help page for USA Jobs? Is that why there is no URL configured here? If so, we should wait until that page is ready before merging this, right? |
@@ -111,7 +111,12 @@ class PytestRun(PartitionedTestRunnerTaskMixin, Task):
"it's best to use an absolute path to make it easy to find the subprocess "
"profiles later.")
- register('--options', type=list, fingerprint=True, help='Pass these options to pytest.')
+ register('--options', type=list, fingerprint=True,
+ removal_version='1.7.0.dev0',
+ removal_hint='You can supply py.test options using the generic pass through the args '
+ 'facility. At the end of the pants command line, add `-- <py.test pass'
+ 'through args>`.',
+ help='Pass these options to pytest.')
register('--coverage', fingerprint=True,
help='Emit coverage information for specified packages or directories (absolute or '
| [_Workdirs->[junitxml_path->[target_set_id],files->[files_iter]],PytestRun->[_run_pytest->[junitxml_path,get_pytest_rootdir,_get_failed_targets_from_junitxml,_get_target_from_test,_test_runner,_do_run_tests_with_args],_maybe_emit_coverage_data->[compute_coverage_pkgs->[packages->[package],packages],_cov_setup,compute_coverage_pkgs,ensure_trailing_sep,coverage_run],fingerprint_strategy->[NeverCacheFingerprintStrategy],collect_files->[files],_expose_results->[junitxml_path,target_set_id],_add_plugin_config->[_ensure_section],partitions->[iter_partitions_with_args->[for_partition,iter_partitions]],_get_failed_targets_from_junitxml->[_map_relsrc_to_targets],_generate_coverage_config->[_format_string_list,_add_plugin_config,_ensure_section],_conftest->[_get_conftest_content],_scrub_cov_env_vars->[_is_coverage_env_var],_get_shard_conftest_content->[InvalidShardSpecification],_get_target_from_test->[_map_relsrc_to_targets],_test_runner->[_conftest,_maybe_emit_coverage_data],_get_conftest_content->[_get_shard_conftest_content],_cov_setup->[_generate_coverage_config,_scrub_cov_env_vars]]] | Register options for the command line tool. | s/pass through the args/pass through args/ |
@@ -106,7 +106,7 @@ GCP_REQUIREMENTS = [
'proto-google-cloud-datastore-v1==0.90.0',
'googledatastore==7.0.0',
# GCP packages required by tests
- 'google-cloud-bigquery>=0.22.1,<0.23',
+ 'google-cloud-bigquery>=0.23.0,<1.0.0',
]
| [get_version->[open,exec],find_packages,get_distribution,system,get_version,format,warn,cythonize,setup,StrictVersion] | Return a tuple of int64_t if the package is present in the system. Package hierarchy for all packages. | Please put the upper bound to `0.24.0`. With major version `0` it is possible to introduce breaking changes with minor version changes. |
@@ -213,7 +213,6 @@ class Trainer:
self._last_log = time.time()
batch_num = 0
- logger.info("Training")
for batch in train_generator_tqdm:
batch_num += 1
self._optimizer.zero_grad()
| [Trainer->[train->[_validation_loss,_update_learning_rate,_should_stop_early,_metrics_to_tensorboard,_enable_gradient_clipping,_metrics_to_console,_train_epoch,_get_metrics],_validation_loss->[_get_metrics,_batch_loss],__init__->[TensorboardWriter],_metrics_to_tensorboard->[add_validation_scalar,add_train_scalar],_train_epoch->[_rescale_gradients,add_train_scalar,_get_metrics,_batch_loss],from_params->[Trainer,from_params]]] | Trains one epoch and returns metrics. Adds the train loss and metrics to the tensorboard. | I would keep this here, as we have progress bars for training and validation separately, and this makes it more obvious which one is which. |
@@ -79,7 +79,7 @@ func main() {
}
}
- encData, err := asset.EncodeData(string(data))
+ encData, err := asset.EncodeData(strings.Replace(string(data), "\r", "", -1))
if err != nil {
fmt.Fprintf(os.Stderr, "Error encoding the data: %s\n", err)
os.Exit(1)
| [Write,StringVar,Fprintf,Fprintln,Execute,NewReader,Args,ReadFile,EncodeData,Find,Source,Parse,Bytes,ReadAll,Exit,WriteFile] | Reads the data from the specified file and writes it to the specified file or stdout. | Do you want to add a note here on why it's here to make sure if someone touches the code in the future he knows? |
@@ -28,6 +28,10 @@ public final class FormatOptions {
return new FormatOptions(word -> true);
}
+ public static FormatOptions noEscape() {
+ return new FormatOptions(word -> false);
+ }
+
/**
* Construct instance.
*
| [FormatOptions->[none->[FormatOptions],of->[FormatOptions],escape->[isReservedWord]]] | Returns a FormatOptions object that is a no - op if the format is not supported. | Probably worth a java doc that this is dangerous to use in anything other than in feedback to the user, e.g. error messages |
@@ -33,6 +33,11 @@ class ShowCommand(Command):
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
+ self.cmd_opts.add_option(
+ '--json',
+ action='store_true',
+ default=False,
+ help='Show full output in JSON format.')
self.parser.insert_option_group(0, self.cmd_opts)
| [ShowCommand->[__init__->[add_option,insert_option_group,super],run->[warning,print_results,search_packages_info]],print_results->[get,strip,join,requires,info,enumerate],search_packages_info->[requires,join,startswith,get_metadata,append,len,sorted,isinstance,feed,close,canonicalize_name,relpath,split,FeedParser,get,strip,has_metadata,get_metadata_lines,splitlines],getLogger] | Initialize the command line interface. | I'm thinking me might want to stick to `--format json` like for the `pip list` command. cc @pypa/pip-committers |
@@ -916,8 +916,11 @@ namespace rpc
if (matched_handler == std::end(handlers) || matched_handler->method_name != request_type)
return BAD_REQUEST(request_type, req_full.getID());
- std::string response = matched_handler->call(*this, req_full.getID(), req_full.getMessage());
- MDEBUG("Returning RPC response: " << response);
+ epee::byte_slice response = matched_handler->call(*this, req_full.getID(), req_full.getMessage());
+
+ const boost::string_ref response_view{reinterpret_cast<const char*>(response.data()), response.size()};
+ MDEBUG("Returning RPC response: " << response_view);
+
return response;
}
catch (const std::exception& e)
| [operator<->[c_str],getBlockHeaderByHash->[front,,size,get_block_by_hash,get_blockchain_storage,get_current_blockchain_height,typeid],handleTxBlob->[AUTO_VAL_INIT,handle_incoming_tx,empty,MERROR,get_protocol,push_back,get_payload_object],handle_message->[fromJson,handle],handle->[,get_public_gray_peers_count,get_is_background_mining_enabled,are_key_images_spent_in_pool,getMessage,size,mlog_set_log_level,get_alternative_blocks_count,what,get_account_address_from_str,get_nettype,get_total_transactions,get_difficulty_for_next_block,get_current_hard_fork_version,get_public_outgoing_connections_count,LOG_PRINT_L0,BAD_JSON,parse_and_validate_block_from_blob,get_block_id_by_height,reserve,handleTxBlob,end,begin,empty,erase,get_current_blockchain_height,push_back,get_target_blockchain_height,get_public_connections_count,MDEBUG,emplace,get_tail_id,get_output_distribution,get_transactions,get_mining_address,get_blockchain_storage,back,get_pool_transactions,get_speed,MERROR,get_pool_for_rpc,get_ideal_hard_fork_version,emplace_back,get_transaction_hash,getID,parse_and_validate_tx_from_blob,call,get_miner,get_hard_fork_voting_info,get_start_time,get_threads_count,resize,are_key_images_spent,get_difficulty_target,get_public_white_peers_count,is_mining,get_tx_outputs_gindexs,get_pool_transactions_count,getBlockHeaderByHash,get_account_address_as_str,BAD_REQUEST,get_hard_fork_state,find_blockchain_supplement,clear,getRequestType]] | Handle a single RPC request. | Could be moved inside `MDEBUG(...)`, so it doesn't get evaluated of debug logs are disabled. |
@@ -1512,11 +1512,7 @@ namespace ProtoCore
}
StackValue ret = core.Heap.AllocateArray(retSVs, null);
-#if GC_MARK_AND_SWEEP
- core.AddCallSiteGCRoot(callsiteID, ret);
-#else
GCUtils.GCRetain(ret, core);
-#endif
return ret;
}
else
| [CallSite->[FunctionEndPoint->[GetCandidateFunctions],WillCallReplicate->[GetCandidateFunctions],StackValue->[UpdateCallsiteExecutionState,ComputeFeps,ArgumentSanityCheck,IsFunctionGroupAccessible],TraceSerialiserHelper->[GetObjectData->[GetObjectData]],SingleRunTraceData->[GetObjectData->[GetObjectData],RecursiveGetNestedData->[RecursiveGetNestedData],Contains->[Contains]]]] | This method is a slow path to execute a single - run function with a single - run This method is called to build the list of call - parameters and the list of formal parameters This method is called when a new object is created. | Because callsite still handles the objects the same way, Is my assumption correct that When Marksweep is on, refcount is also working along with it? At least for this specific case |
@@ -162,8 +162,8 @@ func (b *Botanist) DeploySeedMonitoring(ctx context.Context) error {
alertManagerValues, err := b.InjectSeedShootImages(map[string]interface{}{
"ingress": map[string]interface{}{
- "basicAuthSecret": basicAuth,
- "host": b.Seed.GetIngressFQDN("a", b.Shoot.Info.Name, b.Garden.Project.Name),
+ "basicAuthSecret": basicAuthUsers,
+ "host": b.Seed.GetIngressFQDN("a-users", b.Shoot.Info.Name, b.Garden.Project.Name),
},
"replicas": b.Shoot.GetReplicas(1),
"storage": b.Seed.GetValidVolumeSize("1Gi"),
| [deployGrafanaCharts->[Join,InjectSeedShootImages,Sprintf,GetReplicas,ApplyChartSeed,GetIngressFQDN],DeleteSeedMonitoring->[IgnoreNotFound,Delete,Client],DeploySeedMonitoring->[CreateSHA1Secret,ComputePrometheusHost,GetNodeNetwork,InjectSeedShootImages,GetServiceNetwork,GetIngressFQDN,GetAPIServerDomain,GetPodNetwork,deployGrafanaCharts,Enabled,RESTConfig,GetReplicas,ComputeClusterIP,GetSecretKeysOfRole,Join,GetNodeCount,Sprintf,DeleteAlertmanager,ApplyChartSeed,Client,GetValidVolumeSize]] | DeploySeedMonitoring deploys seed monitoring InjectSeedShootConfig injects seed images into seed. Info. Spec. Cloud. Deploy the kube - state - metrics image into the shoot namespace Injects alert manager images and applies chart seed alertmanager. | What was changed here? I can't see the variable has changed in this PR?. Can we choose a shorter ingress name? I would suggest `au` instead `a-users`, or why not simply stay with `a`? |
@@ -554,6 +554,8 @@ class _JsonToDictCoder(coders.Coder):
for x in table_field_schemas]
def decode(self, value):
+ if isinstance(value, bytes):
+ value = value.decode('utf-8')
value = json.loads(value)
return self._decode_with_schema(value, self.fields)
| [RowAsDictJsonCoder->[RowAsDictJsonCoder],BigQueryReader->[BigQueryReader],_JsonToDictCoder->[_decode_with_schema->[_decode_with_schema],_convert_to_tuple->[_convert_to_tuple]],_StreamToBigQuery->[expand->[InsertIdPrefixFn,BigQueryWriteFn]],WriteToBigQuery->[expand->[_compute_method,_StreamToBigQuery],table_schema_to_dict->[get_table_field->[get_table_field],get_table_field],__init__->[validate_write,validate_create],get_dict_table_schema->[table_schema_to_dict,get_table_schema_from_string],display_data->[format],get_table_schema_from_string->[split]],BigQuerySink->[display_data->[format],schema_as_json->[schema_list_as_object->[schema_list_as_object],schema_list_as_object],writer->[BigQueryWriter],__init__->[RowAsDictJsonCoder,validate_write,split,validate_create]],default_encoder->[default_encoder],_to_bytes->[encode],parse_table_schema_from_json->[parse_table_schema_from_json],BigQueryWrapper->[BigQueryWrapper],_CustomBigQuerySource->[split->[BigQueryWrapper],estimate_size->[BigQueryWrapper]],BigQuerySource->[reader->[BigQueryReader],__init__->[RowAsDictJsonCoder]],BigQueryWriteFn->[start_bundle->[_reset_rows_buffer,BigQueryWrapper],_create_table_if_needed->[get_table_schema],get_table_schema->[parse_table_schema_from_json],process->[_create_table_if_needed]],BigQueryWriter->[BigQueryWriter],_ReadFromBigQuery->[expand->[RemoveJsonFiles,_CustomBigQuerySource,_get_destination_uri,_PassThroughThenCleanup],_validate_gcs_location->[format],_get_destination_uri->[format]]] | Decodes the value of a . | Is the value here always `bytes`? If so, do we need the if? In general it is strongly discouraged to have apis that take both bytes and strings in Py3 sense. |
@@ -84,7 +84,10 @@ class BooleanAccuracy(Metric):
-------
The accumulated accuracy.
"""
- accuracy = float(self._correct_count) / float(self._total_count)
+ if self._total_count > 1e-12:
+ accuracy = float(self._correct_count) / float(self._total_count)
+ else:
+ accuracy = 0.0
if reset:
self.reset()
return accuracy
| [BooleanAccuracy->[__call__->[,unwrap_to_tensors,size,sum,eq,ValueError,ones,view],get_metric->[float,reset]],register] | Returns the accumulated accuracy. | Can't you just test this against 0? |
@@ -381,10 +381,10 @@ function getSentinel_(iframe, opt_is3P) {
*/
function parseIfNeeded(data) {
const shouldBeParsed = typeof data === 'string'
- && data.charAt(0) === '{';
+ && data.indexOf('amp-') == 0;
if (shouldBeParsed) {
try {
- data = JSON.parse(data);
+ data = deserializeMessage(data);
} catch (e) {
dev().warn('IFRAME-HELPER', 'Postmessage could not be parsed. ' +
'Is it in a valid JSON format?', e);
| [No CFG could be retrieved] | Provides a postMessage API for a message sent to the target. The constructor for a window object. | Is this changed needed right now? No code except IframeMessagingClient is sending message in this format yet. I would expect a deprecation of this whole method in your refactoring CL. |
@@ -160,11 +160,11 @@ public class ColumnSelectorBitmapIndexSelector implements BitmapIndexSelector
public boolean hasMultipleValues(final String dimension)
{
if (isVirtualColumn(dimension)) {
- return virtualColumns.getVirtualColumn(dimension).capabilities(dimension).hasMultipleValues();
+ return virtualColumns.getVirtualColumn(dimension).capabilities(dimension).hasMultipleValues().isMaybeTrue();
}
final ColumnHolder columnHolder = index.getColumnHolder(dimension);
- return columnHolder != null && columnHolder.getCapabilities().hasMultipleValues();
+ return columnHolder != null && columnHolder.getCapabilities().hasMultipleValues().isMaybeTrue();
}
@Override
| [ColumnSelectorBitmapIndexSelector->[getDimensionValues->[close->[close],iterator->[iterator]],hasMultipleValues->[hasMultipleValues],getBitmapIndex->[getBitmap->[getNumRows],getNumRows,getBitmapIndex,getIndex,getBitmap]]] | Checks if the dimension has multiple values. | IMO, better if this returns a `Capable`. Then the caller can decide what to do with unknowns. |
@@ -27,7 +27,7 @@ type Reader interface {
}
const (
- secondsInYear = int64(31_557_600)
+ secondsInYear = int64(31557600)
)
var (
| [Mul,Info,Add,CurrentHeader,Cmp,New,Sub,Logger,Time,Sign,Uint64,ParentHash,Config,Msg,ZeroDec,NewInt,Div,Epoch,GetHeader,Number] | apr import imports a block header from the database by hash and number. pastTwoEpochHeaders returns the current header and the next header in the block. | why change this line? |
@@ -385,6 +385,8 @@ func (f *StoreStateFilter) anyConditionMatch(typ int, opt *config.PersistOptions
case regionTarget:
funcs = []conditionFunc{f.isTombstone, f.isOffline, f.isDown, f.isDisconnected, f.isBusy,
f.exceedAddLimit, f.tooManySnapshots, f.tooManyPendingPeers}
+ case scatterRegionTarget:
+ funcs = []conditionFunc{f.isTombstone, f.isOffline, f.isDown, f.isDisconnected}
}
for _, cf := range funcs {
if cf(opt, store) {
| [Source->[anyConditionMatch],Target->[anyConditionMatch]] | anyConditionMatch returns true if any condition matches the given type. | If the target store is chosen by `scatter`, we can ignore some limitation like `exceedAddLimit` |
@@ -18,6 +18,11 @@ Examples:
------------------------------------------
$ prefect create project 'default'
$ prefect register --project default -m prefect.hello_world
+
+
+ Run this flow with the Prefect backend and agent
+ ------------------------------------------
+ $ prefect run --name "hello-world" --watch
"""
from prefect import task, Flow, Parameter
| [capitalize->[capitalize],say_hello->[print],task,say_hello,Flow,capitalize,Parameter] | Provide a function to print a message if the is not present in the system. | is there a way to make it clearer that an agent must be running for this to work? |
@@ -0,0 +1,13 @@
+require 'spec_helper'
+
+describe AssignmentFile do
+ context 'a good AssignmentFile model' do
+ it { is_expected.to belong_to(:assignment) }
+ it { is_expected.to validate_presence_of(:filename ) }
+ it { is_expected.to validate_uniqueness_of(:filename)
+ .scoped_to(:assignment_id) }
+ it { is_expected.to allow_value('est.java').for(:filename) }
+ it { is_expected.not_to allow_value('est?java').for(:filename) }
+ it { is_expected.not_to allow_value('&*%word^!').for(:filename) }
+ end
+end
\ No newline at end of file
| [No CFG could be retrieved] | No Summary Found. | Expression at 8, 52 should be on its own line. |
@@ -138,6 +138,12 @@ class Amp3QPlayer extends AMP.BaseElement {
}
}
+ /**
+ *
+ * @param {!Event} event
+ * @private
+ * @return {void}
+ */
sdnBridge_(event) {
if (event.source) {
if (event.source != this.iframe_.contentWindow) {
| [No CFG could be retrieved] | Callback for video layout. Post a message to the video. | `return {void}` not needed right? |
@@ -615,6 +615,9 @@ module.exports = class bittrex extends Exchange {
}
throw e;
}
+ if (!response['result']) {
+ throw new OrderNotFound (this.id + ' order ' + id + ' not found');
+ }
return this.parseOrder (response['result']);
}
| [No CFG could be retrieved] | Get a single order by id or symbol Get deposit address by ID. | Hi! What do they reply instead, when `result` is missing? Can you please post their verbose response for that case or tell how to reproduce it? |
@@ -147,7 +147,8 @@ abstract class CommandWithTranslation extends \WP_CLI_Command {
\WP_CLI::success( "Language installed." );
if ( \WP_CLI\Utils\get_flag_value( $assoc_args, 'activate' ) ) {
- $this->activate( array( $language_code ), array() );
+ $set_date_time = \WP_CLI\Utils\get_flag_value( $assoc_args, 'set-date-time' );
+ $this->activate( array( $language_code ), array( 'set-date-time' => $set_date_time ) );
}
} else {
\WP_CLI::error( $response );
| [CommandWithTranslation->[uninstall->[get_installed_languages]]] | Installs a language pack. | Let's only pass `array( 'set-date-time' => $set_date_time )` when it's a truthy value. |
@@ -73,6 +73,15 @@ class PrinterOutputModel(QObject):
self._type = type
self.typeChanged.emit()
+ @pyqtProperty(str, notify = buildplateChanged)
+ def buildplate(self):
+ return self._buildplate_name
+
+ def updateBuildplate(self, buildplate_name):
+ if self._buildplate_name != buildplate_name:
+ self._buildplate_name = buildplate_name
+ self.buildplateChanged.emit()
+
@pyqtProperty(str, notify=keyChanged)
def key(self):
return self._key
| [PrinterOutputModel->[cancelPreheatBed->[cancelPreheatBed],setHeadX->[updateHeadPosition,setHeadPosition],homeBed->[homeBed],homeHead->[homeHead],preheatBed->[preheatBed],setHeadZ->[updateHeadPosition,setHeadPosition],moveHead->[moveHead],setTargetBedTemperature->[updateTargetBedTemperature,setTargetBedTemperature],setHeadY->[updateHeadPosition,setHeadPosition],setHeadPosition->[updateHeadPosition,setHeadPosition]]] | Update the type and key of the object. | Naming is a bit vague, maybe something like setBuildplateName? |
@@ -684,14 +684,14 @@ export class AmpAnalytics extends AMP.BaseElement {
}
/**
- * @param {!Object<string, Object<string, string|Array<string>>>} source1
- * @param {!Object<string, Object<string, string|Array<string>>>} source2
+ * @param {!JsonObject|!./events.AnalyticsEvent} source1
+ * @param {!JsonObject} source2
* @param {number=} opt_iterations
* @param {boolean=} opt_noEncode
* @return {!ExpansionOptions}
*/
expansionOptions_(source1, source2, opt_iterations, opt_noEncode) {
- const vars = map();
+ const vars = dict();
mergeObjects(this.config_['vars'], vars);
mergeObjects(source2['vars'], vars);
mergeObjects(source1['vars'], vars);
| [No CFG could be retrieved] | Expands the template with url parameters and returns a promise that resolves to the identifier of the. | mergeObject requires an `Object`. Does `JsonObject` and `Object` equals in type check? @erwinmombay do we want to avoid using object in the future? |
@@ -129,7 +129,6 @@ class MetaTwigExtension extends \Twig_Extension
// build meta tags
$result = array();
- $result[] = $this->getMeta('title', $seo['title']);
$result[] = $this->getMeta('description', $seo['description']);
$result[] = $this->getMeta('keywords', $seo['keywords']);
$result[] = $this->getMeta('robots', strtoupper(implode(', ', $robots)));
| [MetaTwigExtension->[getSeoMetaTags->[getMeta],getAlternateLinks->[getLocalization,getKey,getDefaultLocalization,getPortal,getAlternate],getAlternate->[getContentPath]]] | Get seo meta tags. | not a valid w3c meta tag. Searchengines use title Tag |
@@ -332,8 +332,15 @@ class ProductForm(forms.ModelForm, AttributesMixin):
def save(self, commit=True):
attributes = self.get_saved_attributes()
self.instance.attributes = attributes
+
+ price = self.cleaned_data["price"]
+ if not price.currency:
+ price.currency = settings.DEFAULT_CURRENCY
+ self.instance.price = price
+
instance = super().save()
instance.collections.clear()
+
for collection in self.cleaned_data["collections"]:
instance.collections.add(collection)
update_product_minimal_variant_price_task.delay(instance.pk)
| [ProductForm->[save->[get_saved_attributes],__init__->[prepare_fields_for_attributes],RichTextField],ProductVariantForm->[save->[get_saved_attributes],__init__->[prepare_fields_for_attributes]],ReorderProductImagesForm->[save->[save]],AttributesMixin->[get_saved_attributes->[save]],ReorderAttributeValuesForm->[save->[save]],VariantBulkDeleteForm->[delete->[delete]],CachingModelChoiceField->[_get_choices->[CachingModelChoiceIterator]]] | Save the object and all of the collections in the object. | How it could happen that price has `None` as currency? |
@@ -57,16 +57,15 @@ if ($full_view) {
$params = [
'collection' => $collection,
- 'metadata' => $menu,
+ 'metadata' => false,
'title' => $title,
'subtitle' => $subtitle,
'content' => $content,
];
-echo elgg_view('object/elements/summary/metadata', $params);
echo elgg_view('object/elements/summary/title', $params);
echo elgg_view('object/elements/summary/subtitle', $params);
echo elgg_format_element('div', [
'class' => 'elgg-body clearfix',
- ], $content);
+], $content);
| [getSection,getMenu,getName,getURL,getMembers,addLinkClass] | Renders a summary element. | This is not the correct way to fix this issue. `$menu` is still used in the listing view of a collection and still needs to be displayed. In the `if` statement which checks `$full_view` `$metadata` needs to be set to `false` (around line 41) |
@@ -198,3 +198,15 @@ func genCompleter(cmd *cobra.Command) []readline.PrefixCompleterInterface {
}
return pc
}
+
+// ReadStdin convert stdin to string array
+func ReadStdin(r io.Reader) (input []string, err error) {
+ b, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ if s := strings.TrimSpace(string(b)); len(s) > 0 {
+ input = strings.Split(s, " ")
+ }
+ return input, nil
+}
| [SetArgs,NewMemberCommand,ParseFlags,GetBool,StringP,NewServiceGCSafepointCommand,NewConfigCommand,Close,MarkHidden,NewPluginCommand,Exit,Set,NewEx,NewExitCommand,NewHotSpotCommand,LocalFlags,PrintPDInfo,NewLabelCommand,AddCommand,BoolP,FlagUsages,NewPrefixCompleter,Execute,Readline,NewOperatorCommand,NewUnsafeCommand,InitHTTPSClient,Split,NewTSOCommand,Printf,NewPingCommand,NewCompletionCommand,Println,Commands,NewSchedulerCommand,NewClusterCommand,NewHealthCommand,HasFlags,NewStoreCommand,GetString,SetOutput,String,Parse,NewRegionCommand,NewLogCommand,VisitAll,NewStoresCommand,PcItem,PersistentFlags,Flags,Trim] | pc }. | Since the name of it is `ReadStdin`, I think there is no need to pass the `os.Stdin` through a parameter? |
@@ -390,6 +390,10 @@ public class HttpServerInventoryView implements ServerInventoryView, FilteredSer
synchronized (servers) {
DruidServerHolder holder = servers.get(server.getName());
if (holder == null) {
+ if (!finalPredicate.apply(Pair.of(server.getMetadata(), null))) {
+ log.debug("Server[%s] is not added due to not match filter.", server.getName());
+ return;
+ }
log.info("Server[%s] appeared.", server.getName());
holder = new DruidServerHolder(server);
servers.put(server.getName(), holder);
| [HttpServerInventoryView->[scheduleSyncMonitoring->[serverAdded,serverRemoved],getDebugInfo->[getDebugInfo],DruidServerHolder->[start->[start],addSegment->[apply,runSegmentCallbacks],stop->[stop],removeSegment->[runSegmentCallbacks]],serverRemoved->[runServerCallbacks,stop],serverInventoryInitialized->[runSegmentCallbacks],serverAdded->[start]]] | Adds a server to the list of servers. | This makes `rhs` of the predicate argument nullable now, but I believe nullable argument can make things complicated. How about adding a new interface like `registerSegmentCallback(Executor exec, SegmentCallback callback, Predicate<DruidServerMetadata> predicate)` to `FilteredServerInventoryView`? |
@@ -76,15 +76,14 @@ func (e *defaultExecutor) ExecCommandInContainerWithFullOutput(ctx context.Conte
// execWithOptions executes a command in the specified container,
// returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed.
-func execWithOptions(ctx context.Context, options execOptions) (stdout, stderr string, err error) {
+func execWithOptions(options execOptions) (stdout, stderr string, err error) {
const tty = false
req := options.client.Kubernetes().CoreV1().RESTClient().Post().
Resource("pods").
Name(options.podName).
Namespace(options.namespace).
SubResource("exec").
- Param("container", options.containerName).
- Context(ctx)
+ Param("container", options.containerName)
req.VersionedParams(&corev1.PodExecOptions{
Container: options.containerName,
Command: options.command,
| [Context,RESTClient,Stream,NewSPDYExecutor,Kubernetes,Resource,CoreV1,Post,Name,Param,VersionedParams,String,Namespace,SubResource,URL,RESTConfig,TrimSpace] | NewExecutor returns a new Executor that runs a command in the specified container. Stream streams the given streams to the remote host. | Why is that removed? |
@@ -150,7 +150,7 @@ public class IntegrationComponentScanRegistrar implements ImportBeanDefinitionRe
}
protected Collection<String> getBasePackages(AnnotationMetadata importingClassMetadata,
- @SuppressWarnings("unused") BeanDefinitionRegistry registry) {
+ BeanDefinitionRegistry registry) {
Map<String, Object> componentScan =
importingClassMetadata.getAnnotationAttributes(IntegrationComponentScan.class.getName());
| [IntegrationComponentScanRegistrar->[invokeAwareMethods->[setResourceLoader,setEnvironment],registerBeanDefinitions->[setResourceLoader,registerBeanDefinitions]]] | Get the base packages from the annotation. | ? It is still unused (this was added to suppress a Sonar issue). |
@@ -161,4 +161,17 @@ public class InterpreterGroup {
public int hashCode() {
return id != null ? id.hashCode() : 0;
}
+
+ public void close() {
+ for (List<Interpreter> session : sessions.values()) {
+ for (Interpreter interpreter : session) {
+ try {
+ interpreter.close();
+ } catch (InterpreterException e) {
+ LOGGER.warn("Fail to close interpreter: " + interpreter.getClassName(), e);
+ }
+ }
+ }
+ sessions.clear();
+ }
}
| [InterpreterGroup->[addInterpreterToSession->[put,get],equals->[equals],values->[values],hashCode->[hashCode],get->[get],put->[put],isEmpty->[isEmpty]]] | Returns the hashCode of this node. | would the exception stack be useful? just to LOGGER.warn( .... , e);? |
@@ -159,7 +159,7 @@ namespace TwoMGFX
{
private SamplerState _state;
- private bool _dirty;
+ private bool _dirty = true;
private TextureFilterType _minFilter;
private TextureFilterType _magFilter;
| [PassInfo->[ValidateShaderModels->[ParseShaderModel]],ShaderInfo->[],SamplerStateInfo->[Parse->[Parse],UpdateSamplerState]] | Initialize a sampler state object. Name - Gets the name of the filter. | This is most likely wrong. It would cause 2MGFX to always write sampler state to the effect and always overwrite the sampler set from code. This is not the normal behavior of XNA Effect. |
@@ -29,8 +29,6 @@ const (
)
type RegistryComponentOptions struct {
- ClusterAdminKubeConfig *rest.Config
-
OCImage string
MasterConfigDir string
Images string
| [Install->[DiscardContainer,AddSCCToServiceAccount,NewForConfig,LogContainer,Bind,New,HostPid,Errorf,Services,NewHelper,NewError,Join,Infof,Output,Privileged,Name,Get,Entrypoint,Command,HostNetwork,Image,Sprintf,Core,IsNotFound,NewRunHelper,WithCause]] | Imports a component from the registry Check if the registry service is not found. | Can we call this OriginImage ? |
@@ -38,7 +38,7 @@ class Sqlite(AutotoolsPackage):
'(unsafe for <3.26.0.0 due to Magellan).')
variant('rtree', default=False, description='Build with Rtree module')
- variant('column_metadata', default=False, description="Build with COLUMN_METADATA")
+ variant('column_metadata', default=True, description="Build with COLUMN_METADATA")
# See https://blade.tencent.com/magellan/index_en.html
conflicts('+fts', when='@:3.25.99.99')
| [Sqlite->[url_for_version->[list,Version,len,str,ValueError,format,join],libs->[find_libraries],get_arch->[str,platform,target,Arch],build_libsqlitefunctions->[install,Executable,cc],configure_args->[append,get_arch,extend],depends_on,resource,conflicts,version,patch,variant,run_after]] | Get all the versions of a single object. Creates an instance of the extension - functions. c resource. | Is this change necessary? |
@@ -83,9 +83,13 @@ public class IngestSegmentFirehoseFactory implements FirehoseFactory<InputRowPar
public IngestSegmentFirehoseFactory(
@JsonProperty("dataSource") final String dataSource,
@JsonProperty("interval") Interval interval,
+ // Specifying "segments" is intended only for when this FirehoseFactory has split itself,
+ // not for direct end user use.
+ @JsonProperty("segments") List<WindowedSegmentId> segmentIds,
@JsonProperty("filter") DimFilter dimFilter,
@JsonProperty("dimensions") List<String> dimensions,
@JsonProperty("metrics") List<String> metrics,
+ @JsonProperty("maxInputSegmentBytesPerTask") Long maxInputSegmentBytesPerTask,
@JacksonInject IndexIO indexIO,
@JacksonInject CoordinatorClient coordinatorClient,
@JacksonInject SegmentLoaderFactory segmentLoaderFactory,
| [IngestSegmentFirehoseFactory->[getUniqueMetrics->[getMetrics],connect->[apply->[]],getUniqueDimensions->[getDimensions]]] | Creates an instance of FirehoseFactory which creates a Firehose instance for the given data source Get the data source for a given node id. | Would you please add `@Nullable` for `interval` and `segmentIds`? |
@@ -1219,8 +1219,9 @@ module.exports = class extends Generator {
const typeImports = new Map();
relationships.forEach(relationship => {
const relationshipType = relationship.relationshipType;
+ const otherEntityIsEmbedded = relationship.otherEntityIsEmbedded;
let toBeImported = false;
- if (relationshipType === 'one-to-many' || relationshipType === 'many-to-many') {
+ if (relationshipType === 'one-to-many' || relationshipType === 'many-to-many' || otherEntityIsEmbedded) {
toBeImported = true;
} else if (dto === 'no') {
toBeImported = true;
| [No CFG could be retrieved] | Generate Entity Client Imports Generate Entity Client Enum Imports. | otherEntityIsEmbedded is used only once, use relationship.otherEntityIsEmbedded instead |
@@ -653,10 +653,10 @@ void GenericCAO::initialize(const std::string &data)
pos_translator.init(m_position);
updateNodePos();
- if(m_is_player)
- {
- LocalPlayer *player = m_env->getPlayer(m_name.c_str());
- if (player && player->isLocal()) {
+ if (m_is_player) {
+ // Check if it's the current player
+ LocalPlayer *player = m_env->getLocalPlayer();
+ if (player && strcmp(player->getName(), m_name.c_str()) == 0) {
m_is_local_player = true;
m_is_visible = false;
LocalPlayer* localplayer = player;
| [sharpen->[init],step->[getSceneNode,update,removeFromScene,translate,setAttachments,addToScene,updateNodePos,getParent,getPosition],updateLight->[getParent], ClientActiveObject->[getType],processMessage->[updateTexture,update,init,updateTexturePos,updateTextures,updateAnimation,initialize,updateNodePos,updateAttachments,getParent,updateBonePosition,updateInfoText],initialize->[updateNodePos,updateInfoText,processMessage,init],addToScene->[updateNodePos,getSceneNode],updateNodePos->[getSceneNode,getParent],updateAttachments->[getSceneNode,getParent], ClientActiveObject->[getType],directReportPunch->[updateTextures]] | Initialize a node by reading the init data. add player to CAO. | how is this code cleaned up? |
@@ -163,6 +163,15 @@ func NewValidator(ctx context.Context, input *data.Data) (*Validator, error) {
op.Debugf("new validator Session.Populate: %s", err)
}
+ if v.Session.VMFolder == nil {
+ op.Debugf("Failed to set validator session VM folder")
+ // it's possible that VMFolder is not set, but session.Populate doesn't return any error
+ if err == nil {
+ err = errors.New("validator Session.Populate: no datacenter folder (nil) is found")
+ }
+ return nil, err
+ }
+
if strings.Contains(sessionconfig.DatacenterPath, "/") {
detail := "--target should only specify datacenter in the path (e.g. https://addr/datacenter) - specify cluster, resource pool, or folder with --compute-resource"
op.Error(detail)
| [basics->[NoteIssue],getDatastore->[NoteIssue],registries->[NoteIssue],certificate->[NoteIssue],Validate->[ListIssues,datacenter],sessionValid->[checkSessionSet,NoteIssue],suggestDatacenter->[ListDatacenters],managedbyVC->[NoteIssue],credentials->[NoteIssue],certificateAuthorities->[NoteIssue],ValidateTarget->[ListIssues,datacenter],reachableRegistries->[NoteIssue],compatibility->[sessionValid,NoteIssue],getAllDatastores->[getDatastore],syslog->[NoteIssue],ConfigureVCenter->[sessionValid,IsVC],checkDatastoresAreWriteable->[getAllDatastores,NoteIssue]] | Accept a host and return a valid object. ParseURL parses a URL string into a URL object. | I think this may cause issues when doing a `vic-machine ls` with multiple datacenters. I suggest delaying any further work on this until @matthewavery has delivered the inventory folder support (#773) and then revisting it. |
@@ -51,12 +51,3 @@ class ServerInfoAPI(PulpAPI):
"""
path = self.base_path + 'distributors/'
return self.server.GET(path)
-
- def ping(self):
- """
- Retrieves basic status information from the server.
-
- @return: Response
- """
- path = '/v2/services/status/'
- return self.server.GET(path)
\ No newline at end of file
| [ServerInfoAPI->[ping->[GET],get_types->[GET],get_importers->[GET],__init__->[super],get_distributors->[GET]]] | Returns the list and descriptions of all installed distributors types installed on the server. | Since the bindings could be considered part of our API, we shouldn't remove this until 3.0.0. However, we could insert a DeprecationWarning. |
@@ -146,6 +146,14 @@ namespace Content.Server.GameObjects.Components.Power.ApcNetComponents
HasApcPower = false;
}
+ public bool TryGetWireNet([NotNullWhen(true)] out INodeGroup nodeGroup)
+ {
+ var wireNet = Provider.GetWireNet();
+
+ nodeGroup = wireNet;
+ return wireNet != default;
+ }
+
private void SetProvider(IPowerProvider newProvider)
{
_provider.RemoveReceiver(this);
| [PowerReceiverComponent->[AnchorUpdate->[ClearProvider,TryFindAndSetProvider],OnRemove->[OnRemove],ExposeData->[ExposeData],SetPowerReceptionRange->[TryFindAndSetProvider,ClearProvider],Startup->[Startup]]] | ClearProvider - Clear the provider. | This method doesn't use any non-public information, and is only used by one other class, so I don't think this method should be on this class. Also, you may want to check NeedsProvider, otherwise you may return an instance of PowerProviderComponent.NullProvider which may cause problems with whatever uses the returned value. |
@@ -376,4 +376,15 @@ public class ProgramState {
});
return fieldValues;
}
+
+ public ProgramState addRelation(SymbolicValueRelation constraint) {
+ List<SymbolicValueRelation> newRelations = new ArrayList<>(symbolicValueRelations);
+ newRelations.add(constraint);
+ return new ProgramState(this, newRelations);
+ }
+
+ @CheckForNull
+ public Boolean isFulfilled(SymbolicValueRelation relationToFulfill) {
+ return relationToFulfill.impliedBy(symbolicValueRelations);
+ }
}
| [ProgramState->[addConstraint->[ProgramState],inStack->[equals],cleanupConstraints->[ProgramState,inStack,isDisposable],equals->[peekValue,equals],isDisposable->[isDisposable],visitedPoint->[put,ProgramState],canReach->[isReachable],getFieldValues->[accept->[isField]],unstackValue->[ProgramState,Pop],cleanupDeadSymbols->[ProgramState,inStack,isDisposable,isLocalVariable,decreaseReference],stackValue->[ProgramState],getValuesWithConstraints->[accept->[put]],hashCode->[peekValue],increaseReference->[put],put->[put,ProgramState],toString->[toString],resetFieldValues->[put,increaseReference,ProgramState,decreaseReference],decreaseReference->[put],ProgramState]] | Get the field values. | missing @CheckForNull annotation. |
@@ -482,3 +482,16 @@ func queryElasticSearchComplianceResourceRunReport(client *elastic.Client, start
}
fmt.Println("The details of the runs can be found in : ", filename)
}
+
+func errorMessage(message string, err error) {
+ if err != nil {
+ fmt.Println(message, err)
+ os.Exit(1)
+ }
+}
+
+func endTimeBeforeStartTime(t time.Time, st time.Time) {
+ if t.Before(st) {
+ t = st
+ }
+}
| [Before,Size,Index,Lte,Close,Gte,Exit,Add,Search,Flush,Field,NewFetchSourceContext,Format,Error,Sprint,Marshal,FetchSourceContext,Itoa,DefaultCSVWriter,NewClient,Aggregation,MarshalWithoutHeaders,SetURL,Do,NewCardinalityAggregation,NewRangeQuery,SetSniff,Query,Write,Println,Sprintf,Background,Unmarshal,ValueCount,Include,OpenFile] | Print details of the runs. | Now with this, you are Exiting the App for all Errors, earlier you were not. |
@@ -21,5 +21,4 @@ if ((!$loader = includeIfExists(__DIR__.'/../vendor/autoload.php')) && (!$loader
'php composer.phar install'.PHP_EOL;
exit(1);
}
-
return $loader;
| [No CFG could be retrieved] | Requires a composer. phar install. | should be reverted |
@@ -78,6 +78,14 @@ public class SearchableCacheConfiguration extends SearchConfigurationBase implem
}
}
+ private static Map<Class<? extends ServiceProvider<?>>, Object> initializeProvidedServices(EmbeddedCacheManager uninitializedCacheManager, ComponentRegistry cr) {
+ //Register the SelfLoopedCacheManagerServiceProvider to allow custom IndexManagers to access the CacheManager
+ ConcurrentHashMap map = new ConcurrentHashMap(2);
+ map.put(CacheManagerServiceProvider.class, uninitializedCacheManager);
+ map.put(ComponentRegistryServiceProvider.class, cr);
+ return Collections.unmodifiableMap(map);
+ }
+
@Override
public Iterator<Class<?>> getClassMappings() {
return classes.values().iterator();
| [SearchableCacheConfiguration->[getProperty->[getProperty]]] | Returns an iterator over the classes that have been mapped to a . | Why create a concurrent hash map here? |
@@ -214,6 +214,7 @@ class SyncReport(object):
def __init__(self, success_flag, added_count, updated_count, removed_count, summary, details):
self.success_flag = success_flag
+ self.canceled_flag = False
self.added_count = added_count
self.updated_count = updated_count
self.removed_count = removed_count
| [RelatedRepository->[__init__->[__init__]],AssociatedUnit->[__init__->[__init__]]] | Initialize a ClusterNode object. | In hindsight, I wish I had gone with a state variable instead of the success flag. I go back and forth on how to handle those situations. It's a bit wonky to have two separate flags, but for API compatibility I think this is the right call. |
@@ -769,6 +769,10 @@ def _concretize_specs_together_original(*abstract_specs, **kwargs):
return spack.repo.Repo(repo_path)
+ if kwargs.get('multi_root', False):
+ # This feature cannot be implemented in the old concretizer
+ raise Exception
+
abstract_specs = [spack.spec.Spec(s) for s in abstract_specs]
concretization_repository = make_concretization_repository(abstract_specs)
| [_concretize_specs_together_original->[make_concretization_repository],Concretizer->[_adjust_target->[target_from_package_preferences],adjust_target->[_make_only_one_call],concretize_compiler->[_proper_compiler_style,concretize_version],choose_virtual_or_external->[_valid_virtuals_and_externals]]] | Concretizes the given abstract specs together. Returns a list of concrete specs that have a flag. | Minor, but marking this line since we should raise an object and be a bit more specific. |
@@ -555,6 +555,16 @@ func UpdateValidatorFromEditMsg(validator *Validator, edit *EditValidator) error
return nil
}
+// IsEligibleForEPoSAuction ..
+func IsEligibleForEPoSAuction(validator *ValidatorWrapper) bool {
+ switch validator.EPOSStatus {
+ case effective.FirstTimeCandidate, effective.Candidate:
+ return true
+ default:
+ return false
+ }
+}
+
// String returns a human readable string representation of a validator.
func (v Validator) String() string {
s, _ := json.Marshal(v)
| [MarshalJSON->[String],SanityCheck->[TotalDelegation,String,SanityCheck],EnsureLength] | String returns a string representation of the validator. | For example, I don't see where the validator turn from other status e.g. InCommitteeAndSigning to Candidate? Seems elected validators won't get elected in new epochs. That's the complexity of state transition I am talking about after adding too many different status. |
@@ -1462,12 +1462,14 @@ func putFileHelper(c *client.APIClient, pfc client.PutFileClient,
return nil
}
childDest := filepath.Join(path, strings.TrimPrefix(filePath, source))
+ limiter.Acquire()
eg.Go(func() error {
+ defer limiter.Release()
// don't do a second recursive 'put file', just put the one file at
// filePath into childDest, and then this walk loop will go on to the
// next one
return putFileHelper(c, pfc, repo, commit, childDest, filePath, false,
- overwrite, limiter, split, targetFileDatums, targetFileBytes,
+ overwrite, limit.New(0), split, targetFileDatums, targetFileBytes,
headerRecords, filesPut)
})
return nil
| [StringVar,Pull,PrintDetailedCommitInfo,TempFile,Fd,TempDir,SubscribeCommit,Acquire,HasPrefix,CreateRepo,DeleteCommit,UintVar,Walk,RunFixedArgs,Flush,New,CreateDocsAlias,NewPutFileClient,PrintDetailedBranchInfo,StartCommit,NewWriter,GetFile,MarkFlagCustom,Split,PutFile,InteractiveConfirm,Println,Disable,AddFlagSet,Stdin,Finish,IntVarP,SameFlag,PushFile,BoolVar,Dir,FinishCommit,Close,ParseBranch,Wrap,Is,PrintDetailedFileInfo,RunBoundedArgs,Marshal,ParseBool,Page,WithGZIPCompression,LookPath,NewInWorker,Release,FlushCommit,CopyFile,Create,Next,StringSliceVarP,NewPuller,PrintDiffFileInfo,MkdirAll,ListBranch,Printf,ParseBranches,IsDir,PrintFileInfo,PrintDetailedRepoInfo,CreateAlias,TrimPrefix,FilesystemCompletion,GetTag,CreateBranch,PutFileSplit,PutFileURL,InspectCommit,WithMaxConcurrentStreams,LookupEnv,GlobFile,FileCompletion,Clean,DiffFile,Stat,NewScanner,Text,Wrapf,InspectBranch,IsCygwinTerminal,PutFileOverwrite,PrintCommitInfo,Name,ToSlash,ParseHistory,Get,NewRepo,Scan,ParseFile,PrintBranch,PrintRepoInfo,InspectRepo,String,Parse,Open,RegisterCompletionFunc,Run,BoolVarP,Flags,DeleteFile,RemoveAll,GetObject,NewCommit,Fields,ParseCommit,DeleteBranch,NewFile,Mode,ListRepo,Go,Ctx,StringVarP,VarP,Errorf,DeleteRepo,LoadOrStore,Wait,InspectFile,Join,ExitCode,ListCommitF,InspectPipeline,ListFileF,NewFlagSet,Command,Int64Var,Fprintf,NewOnUserMachine,WithActiveTransaction,NewCommitProvenance,Fsck,IsTerminal,CreateBranchTrigger,ScrubGRPC,ParseCommits,AndCacheFunc] | putFile splits the file into two parts. joinPaths joins paths to prefix and filePath. | So for this to work it looks like it depends on the fact that this callsite will never use the limiter because `recursive` is false. That feels a little bit fragile to me. It's definitely worth a note that it shouldn't be changed but maybe it also makes sense to pass a `nil` limiter? I think a panic is probably preferable to a deadlock, especially in a command like `put file` which can block for while so the user might not figure out they're deadlocked and just assume it's taking a really long time to upload. |
@@ -476,7 +476,7 @@ class Jetpack_Search {
$posts_query = new WP_Query( $args );
// WP Core doesn't call the set_found_posts and its filters when filtering posts_pre_query like we do, so need to do these manually.
- $query->found_posts = $this->found_posts;
+ $query->found_posts = $posts_query->post_count;
$query->max_num_pages = ceil( $this->found_posts / $query->get( 'posts_per_page' ) );
return $posts_query->posts;
| [Jetpack_Search->[get_active_filter_buckets->[get_filters],get_search_aggregations_results->[get_search_result],get_search_facets->[get_search_aggregations_results],do_search->[search],get_filters->[get_search_aggregations_results],get_search_facet_data->[get_filters],get_current_filters->[get_active_filter_buckets],update_search_results_aggregations->[search],set_facets->[set_filters]]] | This method is called before a post is fetched from the database. It will return an array. | Ok this won't work, my mistake this will always be the per page count. However, the underlying issue does still exist (count being off if search index is out of sync). So any opinion on how to solve would be good! |
@@ -18,7 +18,6 @@ class PyApacheBeam(PythonPackage):
depends_on('py-setuptools', type='build')
depends_on('py-pip@7.0.0:', type=('build', 'run'))
depends_on('py-cython@0.28.1:', type=('build', 'run'))
- depends_on('py-avro@1.8.1:1.10.8', type=('build', 'run'), when='^python@:2.9')
depends_on('py-avro-python3@1.8.1:1.10.0', type=('build', 'run'), when='^python@3.0:')
depends_on('py-crcmod@1.7:', type=('build', 'run'))
depends_on('py-dill@0.3.1:0.3.2', type=('build', 'run'))
| [PyApacheBeam->[conflicts,depends_on,version]] | A unified programming model for Batch and Streaming. Find all build - specific dependencies. | why is this removed? |
@@ -53,6 +53,16 @@ class Posts extends Module {
*/
private $import_end = false;
+ /**
+ * Max bytes allowed for post_content => length.
+ * Current Setting : 5MB.
+ *
+ * @access public
+ *
+ * @var int
+ */
+ const MAX_POST_CONTENT_LENGTH = 2500000;
+
/**
* Default previous post state.
* Used for default previous post status.
| [Posts->[get_min_max_object_ids_for_batches->[get_where_sql],filter_post_content_and_add_links->[add_embed,remove_embed],wp_insert_post->[is_gutenberg_meta_box_update]]] | A post module that handles sync for posts. Get a post object by its ID. | Current setting looks like it's 2.5 MB (using the 1000-based definition), not 5 MB. Or else the value isn't actually bytes. |
@@ -224,6 +224,9 @@ class LongitudinalMpc:
def set_weights(self):
if self.e2e:
self.set_weights_for_xva_policy()
+ self.params[:,0] = -10.
+ self.params[:,1] = 10.
+ self.params[:,2] = 1e5*np.ones((N+1))
else:
self.set_weights_for_lead_policy()
| [gen_long_mpc_solver->[gen_long_model,get_safe_obstacle_distance],desired_follow_distance->[get_stopped_equivalence_factor,get_safe_obstacle_distance],LongitudinalMpc->[run->[reset],process_lead->[extrapolate_lead],update->[get_stopped_equivalence_factor,get_safe_obstacle_distance,process_lead]],gen_long_mpc_solver] | Sets the weights for the N - th node. | I assume this should happen regardless of `self.e2e`? |
@@ -193,7 +193,14 @@ func (c *CmdTeamListMemberships) runUser(cli keybase1.TeamsClient) error {
role += strings.ToLower(t.Role.String())
}
if c.showAll {
- fmt.Fprintf(c.tabw, "%s\t%s\t%s\t%s\n", t.FqName, role, t.Username, t.FullName)
+ var reset string
+ if !t.Active {
+ reset = "(inactive due to account reset)"
+ if len(t.FullName) > 0 {
+ reset = " " + reset
+ }
+ }
+ fmt.Fprintf(c.tabw, "%s\t%s\t%s\t%s%s\n", t.FqName, role, t.Username, t.FullName, reset)
} else {
fmt.Fprintf(c.tabw, "%s\t%s\t%d\n", t.FqName, role, t.MemberCount)
}
| [ParseArgv->[Args,String,New,Bool],outputJSON->[MarshalIndent,G,Printf,GetDumbOutputUI],outputInvites->[Fprintf,C,Sprintf,formatInviteName,ToLower,String],runUser->[GetTerminalUI,Printf,Fprintf,Flush,outputInvites,Init,Background,G,GetDumbOutputUI,TeamList,MarshalIndent,ToLower,String,Slice,OutputWriter],output->[outputTerminal,outputJSON],formatInviteName->[Sbs,Sprintf,C],outputTerminal->[GetTerminalUI,Printf,Flush,outputInvites,Init,G,OutputWriter,outputRole],runGet->[TeamGet,output,Background],Run->[runUser,G,runGet],outputRole->[Fprintf],NewContextified,ChooseCommand] | runUser lists all the teams in the system. This function prints out all the members of a given type. | We want to show this information? Does it matter? |
@@ -346,8 +346,12 @@ class SubdirData(object):
return _pickled_state
def _process_raw_repodata_str(self, raw_repodata_str):
- json_obj = json.loads(raw_repodata_str or '{}')
-
+ try:
+ json_obj = json.loads(raw_repodata_str or '{}')
+ except json.decoder.JSONDecodeError:
+ log.debug("Conda repository may be experiencing issues, "
+ + "please try again later or use another mirror")
+ raise
subdir = json_obj.get('info', {}).get('subdir') or self.channel.subdir
assert subdir == self.channel.subdir
add_pip = context.add_pip_as_python_dependency
| [fetch_repodata_remote_request->[maybe_decompress,Response304ContentUnchanged],SubdirData->[_read_local_repdata->[_pickle_me],query_all->[SubdirData],_load->[_load],_read_pickled->[_check_pickled_valid,load],iter_records->[load]]] | Process a repodata string. Add missing packages to the index. Check if the node has a node id. | We need to handle this decode error, but I don't think `log.debug()` is sufficient. Most users won't even see that. We need to throw the whole downloaded file away and start again. |
@@ -559,12 +559,7 @@ exports.singlePassCompile = async function(entryModule, options) {
.then(wrapMainBinaries)
.then(intermediateBundleConcat)
.then(eliminateIntermediateBundles)
- .then(thirdPartyConcat)
- .then(removeInvalidSourcemaps)
- .catch(err => {
- err.showStack = false; // Useless node_modules stack
- return Promise.reject(err);
- });
+ .then(thirdPartyConcat);
};
/**
| [No CFG could be retrieved] | Adds a new to the graph. Get the extension bundle config for the given filename or null if not found. | Why were these lines removed? |
@@ -18,15 +18,9 @@ class GradeEntryStudentTa < ActiveRecord::Base
# Create non-existing association between grade entry students and TAs.
columns = [:grade_entry_student_id, :ta_id]
# Get all existing associations to avoid violating the unique constraint.
- # TODO replace `select ... map` with pluck when migrated to Rails 4.
- existing_values = select(columns)
+ existing_values = GradeEntryStudentTa
.where(grade_entry_student_id: grade_entry_student_ids, ta_id: ta_ids)
- .map do |grade_entry_student_ta|
- [
- grade_entry_student_ta.grade_entry_student_id,
- grade_entry_student_ta.ta_id
- ]
- end
+ .pluck(:grade_entry_student_id, :ta_id)
# Delegate the generation of records to the caller-specified block and
# remove values that already exist in the database.
values = yield(grade_entry_student_ids, ta_ids) - existing_values
| [GradeEntryStudentTa->[merge_non_existing->[grade_entry_student_id,pluck,import,ta_id,map],table_name,belongs_to]] | Merges non - existing records from the database. | Align the operands of an expression in an assignment spanning multiple lines. |
@@ -1359,8 +1359,12 @@ def set_displayer(config):
"""
if config.quiet:
config.noninteractive_mode = True
+
+ devnull = open(os.devnull, "w")
+ atexit.register(devnull.close)
+
displayer: Union[None, display_util.NoninteractiveDisplay, display_util.FileDisplay] =\
- display_util.NoninteractiveDisplay(open(os.devnull, "w"))
+ display_util.NoninteractiveDisplay(devnull)
elif config.noninteractive_mode:
displayer = display_util.NoninteractiveDisplay(sys.stdout)
else:
| [update_account->[_determine_account],renew_cert->[_init_le_client,_get_and_save_cert],install->[_init_le_client,_install_cert,_find_domains_or_certname],certificates->[certificates],register->[_determine_account],revoke->[revoke,_delete_if_appropriate,_determine_account],run->[_suggest_donation_if_appropriate,_find_domains_or_certname,_get_and_save_cert,_report_new_cert,_install_cert,_init_le_client,_find_cert],main->[set_displayer,make_or_verify_needed_dirs],_handle_identical_cert_request->[_handle_unexpected_key_type_migration],_init_le_client->[_determine_account],_ask_user_to_confirm_new_names->[_format_list,_get_added_removed],rollback->[rollback],enhance->[_init_le_client],unregister->[_determine_account],_report_new_cert->[_report_successful_dry_run],certonly->[_suggest_donation_if_appropriate,_find_domains_or_certname,_get_and_save_cert,_report_new_cert,_csr_get_and_save_cert,_init_le_client,_find_cert],delete->[delete],_handle_subset_cert_request->[_handle_unexpected_key_type_migration],_find_lineage_for_domains->[_handle_identical_cert_request,_handle_subset_cert_request],_find_lineage_for_domains_and_certname->[_handle_identical_cert_request,_handle_unexpected_key_type_migration,_find_lineage_for_domains]] | Set the displayer for the given configuration object. | What do you think about transforming `set_displayer` into a context manager, instead of using atexit ? |
@@ -445,6 +445,12 @@ def backward_transfer_pair(
# do anything and wait for the received lock to expire.
if channel.is_channel_usable(backward_channel, lock.amount, lock_timeout):
message_identifier = message_identifier_from_prng(pseudo_random_generator)
+
+ backward_route_state = RouteState(
+ route=[backward_channel.our_state.address],
+ forward_channel_id=backward_channel.canonical_identifier.channel_identifier,
+ )
+
refund_transfer = channel.send_refundtransfer(
channel_state=backward_channel,
initiator=payer_transfer.initiator,
| [events_for_balanceproof->[is_safe_to_wait,get_payer_channel,get_payee_channel],forward_transfer_pair->[get_lock_amount_after_fees],events_for_expired_pairs->[get_pending_transfer_pairs],handle_onchain_secretreveal->[events_for_balanceproof,set_onchain_secret],handle_init->[mediate_transfer],handle_node_change_network_state->[mediate_transfer],state_transition->[handle_onchain_secretreveal,handle_node_change_network_state,handle_lock_expired,handle_refundtransfer,handle_offchain_secretreveal,handle_unlock,handle_block,clear_if_finalized,sanity_check,handle_init],handle_refundtransfer->[handle_refundtransfer,mediate_transfer],handle_offchain_secretreveal->[secret_learned],handle_unlock->[handle_unlock],handle_block->[events_to_remove_expired_locks,events_for_onchain_secretreveal_if_dangerzone,events_for_expired_pairs],sanity_check->[get_payer_channel,is_send_transfer_almost_equal,get_payee_channel],secret_learned->[events_for_balanceproof,set_offchain_secret,set_offchain_reveal_state,events_for_secretreveal,events_for_onchain_secretreveal_if_closed],mediate_transfer->[forward_transfer_pair,get_payer_channel,filter_reachable_routes,filter_used_routes,backward_transfer_pair],events_for_onchain_secretreveal_if_closed->[get_pending_transfer_pairs,has_secret_registration_started,get_payer_channel],backward_transfer_pair->[get_lock_amount_after_fees],events_for_onchain_secretreveal_if_dangerzone->[get_pending_transfer_pairs,is_safe_to_wait,has_secret_registration_started,get_payer_channel],clear_if_finalized->[get_payee_channel,get_payer_channel]] | Sends a transfer backwards with the same amount and secrethash as the original payer Returns a list of events that are being processed. | I think we should not send a route backwards. IOW, the node sending the refund transfer cannot provide a valid path, so it should not send anything. |
@@ -47,8 +47,10 @@ class GiftCard(CountableDjangoObjectType):
@traced_resolver
def resolve_user(root: models.GiftCard, info):
requestor = get_user_or_app_from_context(info.context)
- if requestor_has_access(requestor, root.user, AccountPermissions.MANAGE_USERS):
- return root.user
+ if requestor_has_access(
+ requestor, root.created_by, AccountPermissions.MANAGE_USERS
+ ):
+ return root.created_by
raise PermissionDenied()
@staticmethod
| [GiftCard->[resolve_user->[requestor_has_access,PermissionDenied,get_user_or_app_from_context],resolve_code->[has_perm],String,Field]] | Resolve a user and code. | `traced_resolver` should only be used in more complex resolvers or when fetching relations, should be dropped here |
@@ -104,8 +104,10 @@ public class DruidMeta extends MetaImpl
{
// Build connection context.
final ImmutableMap.Builder<String, Object> context = ImmutableMap.builder();
- for (Map.Entry<String, String> entry : info.entrySet()) {
- context.put(entry);
+ if (info != null) {
+ for (Map.Entry<String, String> entry : info.entrySet()) {
+ context.put(entry);
+ }
}
openDruidConnection(ch.id, context.build());
}
| [DruidMeta->[createStatement->[createStatement],prepareAndExecute->[prepare],prepare->[createStatement],closeAllConnections->[closeConnection],getDruidStatement->[getDruidConnection],getDruidConnection->[closeConnection],sqlResultSet->[closeStatement,createStatement,prepareAndExecute]]] | Open a connection. | i wonder if this was causing some problem before... looking at other implementations some do check for null on this field, :+1: |
@@ -100,6 +100,13 @@ class ProjMixin(object):
return self
+ def add_eeg_ref(self):
+ """Add an average EEG reference projector if one does not exist
+ """
+ if _needs_eeg_average_ref_proj(self.info):
+ eeg_proj = make_eeg_average_ref_proj(self.info, activate=True)
+ self.add_proj(eeg_proj)
+
def apply_proj(self):
"""Apply the signal space projection (SSP) operators to the data.
| [ProjMixin->[plot_projs_topomap->[plot_projs_topomap]],_read_proj->[Projection],make_eeg_average_ref_proj->[Projection],setup_proj->[_needs_eeg_average_ref_proj,make_eeg_average_ref_proj,make_projector_info,activate_proj],_needs_eeg_average_ref_proj->[_has_eeg_average_ref_proj],_uniquify_projs->[_proj_equal],make_projector_info->[make_projector]] | Add a projection to the data container. Apply the projection to the data and return the object. | What does the `activate` do actually? It's not super clear to me ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.