patch stringlengths 18 160k | callgraph stringlengths 4 179k | summary stringlengths 4 947 | msg stringlengths 6 3.42k |
|---|---|---|---|
@@ -382,8 +382,12 @@ public class FileSystemRepository implements ContentRepository {
if (path == null) {
throw new IllegalArgumentException("No container exists with name " + containerName);
}
+ long capacity = path.toFile().getTotalSpace();
+ if(capacity==0) {
+ throw new RuntimeException("System returned total space of the partition for " + containerName + " is zero byte. Nifi can not create a zero sized FileSystemRepository");
+ }
- return Files.getFileStore(path).getTotalSpace();
+ return capacity;
}
@Override
| [FileSystemRepository->[destroyExpiredArchives->[visitFile->[size,toString,getLastModTime],compare->[compare,getLastModTime],size,remove,getLastModTime,toString,deleteBasedOnTimestamp,getContainerUsableSpace],ArchiveInfo->[toString],write->[close->[close,remove],flush->[flush],write],remove->[getPath,remove],getLastModTime->[getLastModTime],clone->[remove,create,decrementClaimantCount],shutdown->[shutdown],getClaimantCount->[getClaimantCount],decrementClaimantCount->[decrementClaimantCount],getPath->[getPath],size->[size,getPath],ContainerState->[signalCreationReady->[getContainerUsableSpace],waitForArchiveExpiration->[isWaitRequired],isWaitRequired->[getContainerUsableSpace]],incrementClaimantCount->[incrementClaimantCount],DestroyExpiredArchiveClaims->[run->[toString,destroyExpiredArchives,getContainerUsableSpace]],purge->[purge],exportTo->[exportTo],removeIncompleteContent->[removeIncompleteContent],ClaimLengthPair->[hashCode->[hashCode],equals->[getClaim,equals]],importFrom->[importFrom],initializeRepository->[shutdown],ArchiveOrDestroyDestructableClaims->[run->[toString,archive,remove]],getOpenStreamCount->[size],isAccessible->[getPath,getArchivePath],archive->[getArchivePath,remove,archive,close,getPath],deleteBasedOnTimestamp->[getLastModTime],read->[getPath]]] | Gets the capacity of the container. | I have the same concern here as above. In the constructor above, it is likely not a huge deal since it's the constructor and if any Exception gets thrown, NiFi will fail to startup. However, here it is a much bigger concern, as this is called from a few different places where IOException is caught. |
@@ -312,11 +312,11 @@ class BaseClient(object): # pylint: disable=too-many-instance-attributes
:param name: The name of the entity, if the 'EntityName' property is
not included in the connection string.
"""
- address, policy, key, entity = parse_conn_str(conn_str)
+ address, policy, key, entity, transport_type = parse_conn_str(conn_str)
entity = name or entity
address = build_uri(address, entity)
name = address.split('/')[-1]
- return cls(address, name, shared_access_key_name=policy, shared_access_key_value=key, **kwargs)
+ return cls(address, name, shared_access_key_name=policy, shared_access_key_value=key, transport_type=transport_type, **kwargs)
def _get_entity(self):
raise NotImplementedError("Must be implemented by child class.")
| [BaseClient->[get_properties->[_get_entity]],ServiceBusMixin->[create_queue->[create_queue],create_subscription->[create_subscription],delete_subscription->[delete_subscription],delete_queue->[delete_queue],delete_topic->[delete_topic],create_topic->[create_topic]],SenderMixin->[queue_message->[queue_message]]] | Create a new client from a Service Bus connection string. | If it wouldn't be a highly vacuous/infeasible test to write, would be nice to have a quick validation that passing the various transport_types works as expected. |
@@ -32,6 +32,8 @@ import javax.annotation.Nullable;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
+import org.apache.gobblin.data.management.copy.hive.HiveDataset;
+import org.apache.gobblin.data.management.copy.hive.HiveDatasetFinder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
| [CopySource->[createRequestAllocator->[createRequestAllocator],submitUnfulfilledRequestEvents->[submitUnfulfilledRequestEventsHelper]]] | Imports a single object from a managed object. Imports all components of the Gobblin API that implement Copy. | the import ordering needs to consistent with the style file. |
@@ -134,8 +134,7 @@ static const OPENSSL_CTX_METHOD provider_store_method = {
static CRYPTO_ONCE provider_store_init_flag = CRYPTO_ONCE_STATIC_INIT;
DEFINE_RUN_ONCE_STATIC(do_provider_store_init)
{
- return OPENSSL_init_crypto(0, NULL)
- && (provider_store_index =
+ return (provider_store_index =
openssl_ctx_new_index(&provider_store_method)) != -1;
}
| [No CFG could be retrieved] | Creates a new provider store. - Get provider store object. | Why is this not needed? |
@@ -373,8 +373,6 @@ class Test_normalize_path(object):
# it's easiest just to skip this test on Windows altogether.
@pytest.mark.skipif("sys.platform == 'win32'")
def test_resolve_symlinks(self, tmpdir):
- print(type(tmpdir))
- print(dir(tmpdir))
orig_working_dir = os.getcwd()
os.chdir(tmpdir)
try:
| [TestUnpackArchives->[confirm_files->[mode],test_unpack_tgz->[confirm_files],test_unpack_zip->[confirm_files]],test_rmtree_retries_for_3sec->[Failer],TestTempDirectory->[test_deletes_readonly_files->[readonly_file,create_file]],test_rmtree_retries->[Failer]] | Test if symlinks are resolved. | This isn't related to this PR, so I would leave it alone. |
@@ -303,7 +303,8 @@ public class DefaultArchiveDeployer<T extends DeployableArtifact> implements Arc
private void addZombieApp(Artifact artifact) {
if (allResourcesExist(artifact.getResourceFiles())) {
try {
- artifactZombieMap.put(artifact.getArtifactName(), new ZombieArtifact(artifact.getResourceFiles()));
+ artifactZombieMap.put(artifact.getArtifactName(),
+ new ZombieArtifact(artifact.getResourceFiles(), artifact.getDescriptor().getDescriptorFile()));
} catch (Exception e) {
// ignore resource
}
| [DefaultArchiveDeployer->[createArtifact->[createArtifact],ZombieArtifact->[exists->[allResourcesExist],updatedZombieApp->[computeMaxTimestamp],computeMaxTimestamp],preTrackArtifact->[findArtifact],deployPackagedArtifact->[deployPackagedArtifact,undeployArtifact],redeploy->[createArtifact,addZombieApp,trackArtifact,logDeploymentFailure,undeploy],undeploy->[undeploy]]] | Add a ZombieArtifact to the ZombieMap. | When the artifact has redeploymentEnabled=false do not include the descriptor in the list |
@@ -88,7 +88,7 @@ namespace System.IO.Pipelines
_state = null;
}
- public override string ToString()
+ public override readonly string ToString()
{
return $"{nameof(IsCompleted)}: {IsCompleted}";
}
| [PipeCompletion->[IsCompletedOrThrow->[AggressiveInlining,Throw],Reset->[Assert],GetCallbacks->[Assert],AddCallback->[GetCallbacks,Add],TryComplete->[Capture,GetCallbacks],ToString->[nameof],nameof]] | Resets the state of a and returns it. | this assignment to `_state` means that readonly can't be used here. |
@@ -383,10 +383,10 @@ public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCo
return getXml(doc);
}
- private String getPathFromSourceFileDevText(Set<String> paths, String sourceFileDevText) {
- if (paths != null && sourceFileDevText != null) {
+ private String getPathFromSourceText(Set<String> paths, String sourceText) {
+ if (paths != null && sourceText != null) {
for (String path : paths) {
- if (sourceFileDevText.contains(path)) {
+ if (sourceText.contains(path)) {
return path;
}
}
| [LibvirtMigrateCommandWrapper->[execute->[createMigrationURI]]] | Replace the storage in the given xml description with the new one. This method is used to find a path in the source file of the migration. | What do you think about `if (paths != null && !StringUtils.isBlank(sourceText))`? |
@@ -521,7 +521,10 @@ func (s *ReadStep) Apply(preview bool) (resource.Status, StepCompleteFunc, error
}
complete := func() { s.event.Done(&ReadResult{State: s.new}) }
- return resource.StatusOK, complete, nil
+ if resourceError == nil {
+ return resourceStatus, complete, nil
+ }
+ return resourceStatus, complete, resourceError
}
// StepOp represents the kind of operation performed by a step. It evaluates to its string label.
| [Apply->[URN],Prefix->[Color],Plan,Type,Provider,URN] | Apply performs the read step. | It's interesting that the semantics here are to fail a deployment if we ever `Read` a resource that's in an unhealthy state. After thinking about it I think this makes sense, but it's also a little strange since Pulumi wasn't involved in resources that get read via `ReadStep`s. |
@@ -90,7 +90,7 @@ var (
StakingEpoch: big.NewInt(2),
PreStakingEpoch: big.NewInt(1),
QuickUnlockEpoch: big.NewInt(0),
- FiveSecondsEpoch: big.NewInt(0),
+ FiveSecondsEpoch: big.NewInt(17700),
EIP155Epoch: big.NewInt(0),
S3Epoch: big.NewInt(0),
ReceiptLogEpoch: big.NewInt(0),
| [Rules->[IsReceiptLog,IsEIP155,IsCrossLink,IsS3],GasTable->[IsS3]] | This function returns the configuration of the chain parameters that are used to run the Partner network This configuration is a bit of a hack to allow for the use of the chain ID field. | why set to 17700? |
@@ -631,10 +631,15 @@ public class TxDistributionInterceptor extends BaseDistributionInterceptor {
assert !mutationsIterator.hasNext();
}
- private static List<Mutation> getMutationsOnKey(TxInvocationContext ctx, Object key) {
+ private static List<Mutation> getMutationsOnKey(TxInvocationContext ctx, WriteCommand untilCommand, Object key) {
List<Mutation> mutations = new ArrayList<>();
// We don't use getAllModifications() because this goes remote and local mods should not affect it
for (WriteCommand write : ctx.getCacheTransaction().getModifications()) {
+ if (write == untilCommand) {
+ // We've reached this command in the modifications list; this happens when we're replaying a prepared
+ // transaction - see EntryWrappingInterceptor.wrapEntriesForPrepareAndApply
+ break;
+ }
if (write.getAffectedKeys().contains(key)) {
if (write instanceof FunctionalCommand) {
mutations.add(((FunctionalCommand) write).toMutation(key));
| [TxDistributionInterceptor->[remoteGet->[remoteGet],handleTxWriteCommand->[updateMatcherForRetry],RemoteGetAllForWriteHandler->[RemoteGetAllForWriteHandler],configure->[configure],getCommitNodes->[getCommitNodes],TxReadOnlyManyHelper->[copyForRemote->[getMutations]],BaseFunctionalWriteHelper->[copyForRemote->[getMutations]]]] | This method handles the remote keys. | I'm not sure this is relevant, but is it really true? To me it seems like we always call `getMutationsOnKey(ctx, command, key)` on the originator when we don't have the entry in the context. |
@@ -28,6 +28,11 @@ FLAG(uint64,
0,
"Add an optional microsecond delay between table scans");
+FLAG(bool,
+ compat_index_all_extension_columns,
+ true,
+ "Enable INDEX (and thereby constraints) on all extension tables");
+
SHELL_FLAG(bool, planner, false, "Enable osquery runtime planner output");
DECLARE_bool(disable_events);
| [No CFG could be retrieved] | Creates a new object with a next - ID assigned to a table or a constraint. The TableList class is used to determine which tables are in the table list. | I recommend having this as a hidden flag, and calling it `extensions_default_index` (odd but confirms to naming conventions). |
@@ -434,7 +434,7 @@ class PGBKCVOperation(Operation):
fn, args, kwargs = pickler.loads(self.spec.combine_fn)[:3]
self.combine_fn = curry_combine_fn(fn, args, kwargs)
if (getattr(fn.add_input, 'im_func', None)
- is core.CombineFn.add_input.im_func):
+ is core.CombineFn.add_input.__func__):
# Old versions of the SDK have CombineFns that don't implement add_input.
self.combine_fn_add_input = (
lambda a, e: self.combine_fn.add_inputs(a, [e]))
| [SimpleMapTaskExecutor->[execute->[start,finish,create_operation]],_TaggedReceivers->[__missing__->[NullReceiver]],ReadOperation->[start->[output]],Operation->[start->[ConsumerSet],output->[cast]],PGBKOperation->[flush->[output]],ConsumerSet->[receive->[cast]],PGBKCVOperation->[output_key->[output]],FlattenOperation->[process->[output]],create_operation->[ReadOperation,create_pgbk_op,InMemoryWriteOperation,CombineOperation,DoOperation,FlattenOperation],CombineOperation->[process->[output]],DoOperation->[start->[_read_side_inputs,start,_TaggedReceivers],finish->[finish],process->[receive]],DoFnRunnerReceiver->[receive->[process]],FakeCython] | Initialize the object with a . | does 'if (getattr(fn.add_input, 'im_func', None)' still work in python3? |
@@ -395,7 +395,7 @@ func makeRuntime(runtime *Runtime) (err error) {
}
if !foundRuntime {
return errors.Wrapf(ErrInvalidArg,
- "could not find a working runc binary (configured options: %v)",
+ "could not find a working binary (configured options: %v)",
runtime.config.RuntimePath)
}
| [Shutdown->[ID,Wrapf,Unlock,AllContainers,Shutdown,Close,Lock,StopWithTimeout,Errorf],GetConfig->[Copy,RUnlock,To,RLock],Info->[Wrapf,GetInsecureRegistries,storeInfo,GetRegistries,hostInfo],refresh->[ID,Wrapf,Unlock,AllContainers,Close,Lock,Errorf,AllPods,OpenFile,refresh,Refresh],generateName->[LookupContainer,Cause,LookupPod,GetRandomName],refreshRootless->[Run,Command],GetBackend,IsRootless,Unlock,Perm,TempDir,Setenv,refresh,GetRootlessUID,Mode,Copy,IsNotExist,Stat,ReadFile,GetLockfile,To,SetNamespace,Lock,Errorf,Debug,Debugf,Wrapf,Join,EvalSymlinks,Geteuid,GetStore,Shutdown,NewImageRuntimeFromStore,SkipStorageSetup,MkdirAll,refreshRootless,Sys,Getuid,IsDir,Sprintf,Decode,SetStore,InitCNI,Getenv,IsExist] | makeRuntime creates a new runtime based on the given configuration. up containers and storage. | Don't we have a path to the runtime? |
@@ -2231,7 +2231,8 @@ destroy_existing_container(struct migrate_pool_tls *tls, uuid_t cont_uuid)
DP_RC(rc));
}
- rc = ds_cont_tgt_destroy(tls->mpt_pool_uuid, cont_uuid);
+ rc = ds_cont_tgt_destroy_this_xstream(tls->mpt_pool_uuid,
+ cont_uuid);
if (rc != 0) {
D_ERROR("Migrate failed to destroy container "
"prior to reintegration: pool: "DF_UUID
| [No CFG could be retrieved] | Destroys an existing container. Adds a link to the container UUID table to mark the object ID as specified by the object. | You can not call destroy_this_xstream here. I think this is still on xstream 0. so it does need collective call to destroy container on all xstreams. I thought the original sulotion is correct, but you need somehow skip the cont_delete_ec_agg() for this "destory", which is only needed for normal container destory. |
@@ -158,6 +158,7 @@ dc_pool_alloc(unsigned int nr)
goto failed;
}
+ pool->dp_map_version_known = 1;
pool->dp_map_sz = pool_buf_size(nr);
return pool;
| [No CFG could be retrieved] | This function checks if the given number of components in the system is valid. This function is called from the daemon thread and is called by the daemon thread to find the. | just confirm, the initial version should be set as 0 or 1? (seems initially is zero, and every change will +1) |
@@ -8,7 +8,7 @@
<%= @presenter.phone_number_message %>
</p>
-<%= validated_form_for(:login_otp, method: :post) do %>
+<%= validated_form_for('', method: :post) do |f| %>
<% if @presenter.reauthn %>
<%= render 'two_factor_authentication/totp_verification/reauthn' %>
<% end %>
| [No CFG could be retrieved] | Renders a single - time authentication token. Renders a hidden field and a check box for the given nacn. | This feels _weird_ to me, but seems to work as a workaround to avoid SimpleForm inputs from prefixing the "model" name. Without this, the field below becomes `name="login_otp[remember_device]"`, meaning we have to change how we process the input on the server. My impression is that we just made up symbols to use here, since the form isn't actually associated with a model, so the name (or absence of a name) doesn't really matter. |
@@ -152,6 +152,9 @@ class LdapAuthorizer extends AuthorizerBase
}
$filter = '(' . Config::get('auth_ldap_prefix') . '*)';
+ if (Config::get('auth_ldap_Userlist_filter') != null) {
+ $filter = '(' . Config::get('auth_ldap_Userlist_filter') . ')';
+ }
// build group filter
$group_filter = '';
| [LdapAuthorizer->[ldapToUser->[getUserlevel],getUser->[getUserlist],bind->[getFullDn,connect]]] | get a list of users that are members of the current user. | This overrides `$filter` above, is that what you wanted to do? |
@@ -172,6 +172,13 @@ class Media implements AuditableInterface
*/
public function getCollection()
{
+ if (null === $this->collection) {
+ throw new \RuntimeException(sprintf(
+ 'Media entity %d is not associated with a collection',
+ $this->getId()
+ ));
+ }
+
return $this->collection;
}
| [Media->[removeFile->[removeElement]]] | Get the collection of the node. | Why is that needed? That should actually never be the case. |
@@ -34,10 +34,16 @@ func main() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- grpcServer, lis := cmd.NewGrpc(serverType, port)
+ grpcServer, lis := utils.NewGrpc(serverType, *port)
sds := sdsServer.NewSDSServer(keysDirectory)
envoyControlPlane.RegisterSecretDiscoveryServiceServer(grpcServer, sds)
- cmd.GrpcServe(ctx, grpcServer, lis, cancel, serverType)
+ go utils.GrpcServe(ctx, grpcServer, lis, cancel, serverType)
+
+ sigChan := make(chan os.Signal, 1)
+ signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
+ <-sigChan
+
+ glog.Info("Goodbye!")
}
func parseFlags() {
| [Flush,RegisterSecretDiscoveryServiceServer,Int,Error,Sprintf,Background,WithCancel,NewGrpc,Lookup,String,GrpcServe,Parse,NewFlagSet,Set,NewSDSServer] | main is the entry point for the command line interface. | as @vramakrishnan requested - we moved the gRPC tools in pkg/utils |
@@ -7,4 +7,5 @@ package tools
import (
_ "github.com/go-swagger/go-swagger/cmd/swagger"
+ _ "gitea.com/jolheiser/gitea-vet"
)
| [No CFG could be retrieved] | Import a single element from the package. | We can move this to `build/vendor.go` |
@@ -80,15 +80,7 @@ abstract class AbstractEventContext implements EventContext {
*/
@Override
public boolean isTerminated() {
- if (completed) {
- if (!childContexts.isEmpty()) {
- return childContexts.stream().filter(c -> !c.isTerminated()).count() == 0;
- }
-
- return true;
- }
-
- return false;
+ return completed ? childContexts.stream().allMatch(EventContext::isTerminated) : false;
}
/**
| [AbstractEventContext->[getChildContexts->[unmodifiableList],isTerminated->[isEmpty,count],error->[doError],success->[doSuccess],addChildContext->[add],isStreaming->[anyMatch,isEmpty]]] | Checks if the context has been terminated. | In line 51, shouldn't you be setting boolean after completing mono maybe? |
@@ -20,8 +20,11 @@ package beater
import (
"errors"
"fmt"
+<<<<<<< HEAD
"sync"
"syscall"
+=======
+>>>>>>> parent of a78a980da2... [Heartbeat] Setuid to regular user / lower capabilities when possible (#27878)
"time"
"github.com/elastic/beats/v7/heartbeat/config"
| [RunStaticMonitors->[NewFactory,Create,Stop,Start,Errorf,Info,Is],RunReloadableMonitors->[Check,Run,Error,Errorf],RunCentralMgmtMonitors->[MustRegisterList,NewRunnerList],makeAutodiscover->[QueryConfig,NewAutodiscover],Run->[Stop,Geteuid,RunStaticMonitors,RunReloadableMonitors,Getegid,RunCentralMgmtMonitors,Getgroups,NewReloader,Enabled,makeAutodiscover,Start,Info,runRunOnce],runRunOnce->[Warn,NewLogger,Close,NewSyncClient,Errorf,Info,Wait],NewWithLocation,LoadLocation,NewFactory,Done,Unpack,ConfigToStdMonitorFields,RunWrapped,Close,Errorf,Publish,Make,Get,Add] | This function returns a list of all possible types of events that have occurred on the network. New returns a new instance of the beat. | this looks like a merge error |
@@ -351,6 +351,11 @@ public class HoodieDefaultTimeline implements HoodieTimeline {
return details.apply(instant);
}
+ @Override
+ public boolean isEmpty(HoodieInstant instant) {
+ return getInstantDetails(instant).get().length == 0;
+ }
+
@Override
public String toString() {
return this.getClass().getName() + ": " + instants.stream().map(Object::toString).collect(Collectors.joining(","));
| [HoodieDefaultTimeline->[getTimelineOfActions->[filter,HoodieDefaultTimeline],getSavePointTimeline->[HoodieDefaultTimeline],getCleanerTimeline->[HoodieDefaultTimeline],nthFromLastInstant->[nthInstant,countInstants,empty],getRestoreTimeline->[HoodieDefaultTimeline],nthInstant->[countInstants,empty],findInstantsBefore->[HoodieDefaultTimeline],findInstantsAfterOrEquals->[HoodieDefaultTimeline],filterInflights->[HoodieDefaultTimeline],findInstantsAfter->[HoodieDefaultTimeline],filterPendingReplaceTimeline->[HoodieDefaultTimeline],filterCompletedAndCompactionInstants->[HoodieDefaultTimeline],getCompletedReplaceTimeline->[HoodieDefaultTimeline],isBeforeTimelineStarts->[firstInstant],getWriteTimeline->[HoodieDefaultTimeline],findInstantsBeforeOrEquals->[HoodieDefaultTimeline],filterPendingExcludingCompaction->[HoodieDefaultTimeline],findInstantsInRange->[HoodieDefaultTimeline],getRollbackTimeline->[HoodieDefaultTimeline],filterInstantsByAction->[filter],filterPendingCompactionTimeline->[HoodieDefaultTimeline],filter->[filter,HoodieDefaultTimeline],getRollbackAndRestoreTimeline->[getTimelineOfActions],lastInstant->[nthInstant,countInstants,empty],getDeltaCommitTimeline->[HoodieDefaultTimeline],filterCompletedInstants->[HoodieDefaultTimeline],filterInflightsAndRequested->[HoodieDefaultTimeline],filterPendingRollbackTimeline->[HoodieDefaultTimeline]]] | Get details of the given HoodieInstant. | can you help me understand what are the cases this could happen? |
@@ -1359,7 +1359,7 @@ public class ReferenceCountedOpenSslEngine extends SSLEngine implements Referenc
// As rejectRemoteInitiatedRenegotiation() is called in a finally block we also need to check if we shutdown
// the engine before as otherwise SSL.getHandshakeCount(ssl) will throw an NPE if the passed in ssl is 0.
// See https://github.com/netty/netty/issues/7353
- if (!isDestroyed() && SSL.getHandshakeCount(ssl) > 1 &&
+ if (!isDestroyed() && !clientMode && SSL.getHandshakeCount(ssl) > 1 &&
// As we may count multiple handshakes when TLSv1.3 is used we should just ignore this here as
// renegotiation is not supported in TLSv1.3 as per spec.
!SslProtocols.TLS_v1_3.equals(session.getProtocol()) && handshakeState == HandshakeState.FINISHED) {
| [ReferenceCountedOpenSslEngine->[handshakeException->[shutdown],needPendingStatus->[isOutboundDone,isInboundDone],getSSLParameters->[getSSLParameters],AsyncTaskDecorator->[run->[run]],retain->[retain],sslReadErrorResult->[shutdownWithError,needWrapAgain],getOcspResponse->[getOcspResponse],setKeyMaterial->[setKeyMaterial],getDelegatedTask->[AsyncTaskDecorator],wrap->[release,shutdown,singleSrcBuffer,writePlaintextData,isBytesAvailableEnoughForWrap,resetSingleSrcBuffer,wrap],closeInbound->[shutdown],newResult->[shutdown,newResult],TaskDecorator->[run->[run]],selectApplicationProtocol->[selectApplicationProtocol],writeEncryptedData->[release],setSessionId->[setSessionId],beginHandshake->[calculateMaxWrapOverhead],checkSniHostnameMatch->[checkSniHostnameMatch],calculateMaxWrapOverhead->[maxEncryptedPacketLength0],rejectRemoteInitiatedRenegotiation->[shutdown],release->[release],setSSLParameters->[setVerify,setSSLParameters],newResultMayFinishHandshake->[newResult],setVerify->[setVerify],handshake->[shutdownWithError,handshakeException,needWrapAgain,pendingStatus,checkEngineClosed],closeOutbound->[shutdown],unwrap->[readPlaintextData,release,sslPending0,newResultMayFinishHandshake,singleDstBuffer,newResult,unwrap,handleUnwrapException,resetSingleDstBuffer,resetSingleSrcBuffer,writeEncryptedData,singleSrcBuffer],doSSLShutdown->[shutdown],getHandshakeStatus->[pendingStatus],readPlaintextData->[release],shutdownWithError->[shutdownWithError,shutdown],setOcspResponse->[setOcspResponse],toJavaCipherSuitePrefix->[isEmpty],isEndPointVerificationEnabled->[isEmpty],writePlaintextData->[release],refCnt->[refCnt],sslPending0->[sslPending],mayFinishHandshake->[mayFinishHandshake,handshake],touch->[touch],DefaultOpenSslSession->[handshakeFinished->[isDestroyed,isEmpty,calculateMaxWrapOverhead,toJavaCipherSuite],getPeerCertificateChain->[isEmpty],getPeerPort->[getPeerPort],sessionId->[isDestroyed],getProtocol->[isDestroyed],getPeerPrincipal->[getPeerCertificates],notifyUnbound->[newSSLSessionBindingEvent],getLastAccessedTime->[getCreationTime],getPacketBufferSize->[maxEncryptedPacketLength],getValueNames->[isEmpty],putValue->[newSSLSessionBindingEvent],getPeerCertificates->[isEmpty],getPeerHost->[getPeerHost]],isSessionReused->[isSessionReused],setClientAuth->[setVerify]]] | If the peer has not finished the renegotiation is allowed. | It looks like this would allow unlimited renegotiation for non-boringssl on client-side. Am I reading this right? |
@@ -17,6 +17,10 @@ class Openssl(Package):
parallel = False
def install(self, spec, prefix):
+ # OpenSSL uses a variable APPS in its Makefile. If it happens to be set
+ # in the environment, then this will override what is set in the
+ # Makefile, leading to build errors.
+ del env['APPS']
if spec.satisfies("=darwin-x86_64") or spec.satisfies("=ppc64"):
# This needs to be done for all 64-bit architectures (except Linux,
# where it happens automatically?)
| [Openssl->[install->[join_path,satisfies,Executable,filter_file,config,make],depends_on,version]] | Installs the nagios compiler. | This will raise a `KeyError` if `APPS` is not set. Can you use `env.pop('APPS', None)`? |
@@ -140,6 +140,14 @@ func (snap *Snapshot) VerifyIntegrity() error {
}
urns[urn] = state
+
+ // Also register this state with all of it's alias URNs. Note that there is a period of time between
+ // registering a parent resoruce and registering it's children where the child's `parent` URN will refer to
+ // the aliased name, not the new name. By the end of a successful deployment, these references should all
+ // be updated.
+ for _, alias := range state.Aliases {
+ urns[alias] = state
+ }
}
}
| [NewMagic->[Sprintf,Sum256],VerifyIntegrity->[IsProviderType,NewMagic,ParseReference,Errorf,NewReference]] | VerifyIntegrity checks that the snapshot is valid and that all the resources are not duplicated. Check if there are any missing dependencies and if there are duplicates. | I'll admit, realizing that we needed this makes me nervous. I have not been able to trigger any problems this loosening of the checkpoint integrity causes - but it does feel worrisome that there is now a period of time where `parent` (and probably `dependsOn` and maybe other) references stored in the checkpoint will not resolve correctly if looked up directly. Technically, all the information is present to look them up via `aliases`, but that's not what any normal code will do when interpreting a checkpoint file. @pgavlin Thoughts on what we should do here? |
@@ -231,6 +231,9 @@ public class ErrorHandlerTestCase extends AbstractIntegrationTestCase {
public Event process(Event event) throws MuleException {
Throwable exception = (Throwable) event.getVariable("exception").getValue();
if (exception instanceof MuleException) {
+ if (exception instanceof MessagingException) {
+ exception = new MessagingException(event, exception);
+ }
throw (MuleException) exception;
} else if (exception instanceof RuntimeException) {
throw (RuntimeException) exception;
| [ErrorHandlerTestCase->[callAndThrowException->[callAndThrowException]]] | Process the event. | when can it happen that a MessagingException doesn't have the event set? |
@@ -62,9 +62,9 @@ class Jetpack_Simple_Payments {
function register_gutenberg_block() {
if ( $this->is_enabled_jetpack_simple_payments() ) {
- jetpack_register_block( 'simple-payments' );
+ register_block_type( 'jetpack/simple-payments' );
} else {
- jetpack_set_extension_unavailability_reason( 'simple-payments', 'missing_plan' );
+ Jetpack_Gutenberg::set_extension_unavailable( 'jetpack/simple-payments', 'missing_plan' );
}
}
| [Jetpack_Simple_Payments->[parse_shortcode->[get_blog_id,is_enabled_jetpack_simple_payments],is_enabled_jetpack_simple_payments->[get_blog_id],init_hook_action->[register_scripts_and_styles,register_shortcode]]] | Register the gutenberg block. | Same as above, I think we should check for the `register_block_type` before to use it. Another alternative would be to wrap all those calls in `if ( Jetpack_Gutenberg::is_gutenberg_available() )` checks, like we do for the Contact Form. |
@@ -207,6 +207,10 @@ class Comment < ApplicationRecord
end
end
+ def hover_publish_date
+ created_at.strftime("%B %e %Y %I:%M %P %Z")
+ end
+
def sharemeow_link
user_image = ProfileImage.new(user)
user_image_link = Rails.env.production? ? user_image.get_link : user_image.get_external_link
| [Comment->[sharemeow_link->[title],wrap_timestamps_if_video_present!->[path],bust_cache->[path]]] | Returns a readable publish date sequence for the user. | How about naming this method `publish_date`? To me, `hover_` couples this method to the way it's being presented to the user _(via a hovering action)_. `publish_date` would allow us to have a more generic name, which would still apply if at some point we'd like to display this same value in a different UI manner. Moreover, we already have `readable_publish_date`. This longer version might make sense to consider it the full `publish_date`. Thoughts? |
@@ -94,7 +94,7 @@ class SparseReshapeTest(test.TestCase):
self.assertAllEqual((2, 3 * 4), sp_output.shape)
def testSameShape(self):
- with self.session(use_gpu=False) as sess:
+ with self.session(use_gpu=True) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [5, 6])
| [EmptySparseTensorReshapeTest->[testImpliedReshapeEmpty1DTensor->[_MakeAndReshapeTensor],testImpliedReshapeEmpty2DTensor->[_MakeAndReshapeTensor],testImpliedDimTogetherWithZeroDimCausesError->[_MakeAndReshapeTensor],testImpliedReshapeEmpty3DTensor->[_MakeAndReshapeTensor],testImpliedReshapeEmpty4DTensor->[_MakeAndReshapeTensor]],SparseReshapeTest->[testFeedDownRankWithInferredDim->[_SparseTensorPlaceholder,_SparseTensorValue_2x3x4],testRaisesIfInferredShapeNotPossible->[_SparseTensorValue_2x3x4],testFeedDownRank->[_SparseTensorPlaceholder,_SparseTensorValue_2x3x4],testProvideStaticallyMismatchedSizes->[_SparseTensorValue_5x6],testFeedMismatchedSizes->[_SparseTensorPlaceholder,_SparseTensorValue_5x6],testFeedUpRank->[_SparseTensorPlaceholder,_SparseTensorValue_5x6],testStaticShapeInfoPreserved->[_SparseTensorValue_5x6],testFeedMultipleInferredDims->[_SparseTensorPlaceholder,_SparseTensorValue_5x6],testFeedDenseReshapeSemantics->[_SparseTensorPlaceholder],testFeedPartialShapes->[_SparseTensorPlaceholder],testSameShape->[_SparseTensorValue_5x6],testUpRank->[_SparseTensorValue_5x6],testPropagatesFullyKnownDenseShapeWhenShapePartiallyKnown->[_SparseTensorValue_2x3x4],testFeedUpRankWithInferredDim->[_SparseTensorPlaceholder,_SparseTensorValue_5x6],testFeedNewShapeSameRankWithInferredDim->[_SparseTensorPlaceholder,_SparseTensorValue_5x6],testFeedMismatchedSizesWithInferredDim->[_SparseTensorPlaceholder,_SparseTensorValue_5x6],testRaisesIfMoreThanOneInferredDim->[_SparseTensorValue_2x3x4],testFeedSameShapeWithInferredDim->[_SparseTensorPlaceholder,_SparseTensorValue_5x6],testWorksWellWithTfShape->[_SparseTensorPlaceholder,_SparseTensorValue_5x6],testFeedNewShapeSameRank->[_SparseTensorPlaceholder,_SparseTensorValue_5x6],testStaticShapeInfoPreservedWithInferredDims->[_SparseTensorValue_2x3x4],testFeedSameShape->[_SparseTensorPlaceholder,_SparseTensorValue_5x6]]] | Test that the sparse tensor values are the same shape as the input. | You no longer need to set `use_gpu=True` explicitly, it default to True. |
@@ -17,7 +17,8 @@ APP_NAME = 'login.gov'.freeze
module Upaya
class Application < Rails::Application
- AppConfig.setup(YAML.safe_load(File.read(Rails.root.join('config', 'application.yml'))))
+ configuration = AppConfigReader.new.read_configuration
+ AppConfig.setup(configuration)
config.load_defaults '6.1'
config.active_record.belongs_to_required_by_default = false
| [Application->[allow,belongs_to_required_by_default,queue_adapter,default_options,join,load_path,domain_name,use,unknown_asset_fallback,time_zone,safe_load,require,origins,to_s,enable_rate_limiting,per_form_csrf_tokens,resource,default_url_options,default_locale,read,present?,try,delete,insert_before,available_locales,load_defaults,setup,urlsafe_csrf_tokens],groups,freeze,require_relative,require] | This module is used to setup the application. The name of the that should be displayed. | could we consider writing out a copy of the resulting config back to the filesystem? I'd like the ability to inspect what config a box has, but if we don't write it out to disk, new values in S3 could override what the existing running processes actually have? Maybe we can set appropriate perms on the file so it's not world-readable? |
@@ -97,8 +97,12 @@ evoked[0].plot_field(maps, time=0.170)
# Compute forward model
# Make source space
-src = mne.setup_source_space('spm', spacing='oct6', subjects_dir=subjects_dir,
- overwrite=True)
+src_fname = data_path + '/subjects/spm/bem/spm-oct-6-src.fif'
+if not os.path.isfile(src_fname):
+ src = mne.setup_source_space('spm', src_fname, spacing='oct6',
+ subjects_dir=subjects_dir, overwrite=True)
+else:
+ src = mne.read_source_spaces(src_fname)
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem)
| [plot_overlay,plot_events,create_eog_epochs,filter,show_view,make_inverse_operator,make_forward_solution,show,dict,compute_covariance,convert_forward_solution,apply,evoked,epochs_cln,make_field_map,plot,append,print,find_events,data_path,find_bads_eog,plot_components,average,Raw,setup_source_space,set_time,apply_inverse,ICA,Epochs,plot_scores,pick_types] | Plot the noise covariance of the object DSPM map in the current directory. | Also snuck in this few-line edit that speeds up our `plot_spm_faces_dataset.py` example |
@@ -39,7 +39,13 @@ def export_raw(fname, raw, fmt='auto', verbose=None):
from ._eeglab import _export_raw
_export_raw(fname, raw)
elif fmt == 'edf':
- raise NotImplementedError('Export to EDF format not implemented.')
+ from ._edf import _export_raw
+ physical_range = kwargs.get('physical_range', 'auto')
+ _export_raw(fname, raw, physical_range, fmt)
+ elif fmt == 'bdf':
+ from ._edf import _export_raw
+ physical_range = kwargs.get('physical_range', 'auto')
+ _export_raw(fname, raw, physical_range, fmt)
elif fmt == 'brainvision':
raise NotImplementedError('Export to BrainVision not implemented.')
| [export_epochs->[_export_epochs,_infer_check_export_fmt,NotImplementedError],_infer_check_export_fmt->[splitext,append,_validate_type,fmt,ValueError,items,join,next,lower],export_evokeds->[NotImplementedError,isinstance,info,_infer_check_export_fmt,export_evokeds_mff],export_raw->[_infer_check_export_fmt,_export_raw,NotImplementedError]] | Exports a raw sequence of objects to a file in external formats. | deduplicate please (`elif fmt in ('edf', 'bdf')`) |
@@ -242,7 +242,7 @@ public final class DynamicConfigurationProvider extends LifecycleAwareConfigurat
private boolean isExpired(ConfigurationInstance configuration) {
ConfigurationStats stats = configuration.getStatistics();
- return stats.getRunningSources() == 0 && stats.getInflightOperations() == 0
+ return stats.getRunningSources() == 0 && stats.getInflightOperations() == 0 && stats.getActiveComponents() == 0
&& expirationPolicy.isExpired(stats.getLastUsedMillis(), MILLISECONDS);
}
| [DynamicConfigurationProvider->[start->[start],registerConfiguration->[registerConfiguration],createConfiguration->[createConfiguration],isExpired->[isExpired]]] | Checks if the given configuration instance is expired. | same as before, it should be enough to just check for `stats.getActiveComponents()` |
@@ -291,6 +291,17 @@ export class AmpAdNetworkDoubleclickImpl extends AmpA4A {
/** @private {number} */
this.ifi_ = 0;
+
+ /** @private {boolean} */
+ this.isFluid_ = false;
+
+ /** @private {?string} */
+ this.fluidImpressionUrl_ = null;
+ }
+
+ /** @override */
+ isLayoutSupported(layout) {
+ return layout == Layout.FLUID || isLayoutSizeDefined(layout);
}
/** @override */
| [AmpAdNetworkDoubleclickImpl->[extractSize->[getPublisherSpecifiedRefreshInterval,width,height,extractAmpAnalyticsConfig,get,setGoogleLifecycleVarsFromHeaders,Number],getBlockParameters_->[serializeTargeting_,dev,user,isInManualExperiment,assign,join,googleBlockParameters,getMultiSizeDimensions,map],constructor->[resolver,experimentFeatureEnabled,extensionsFor,rejector,getMode,promise,SRA],tearDownSlot->[promise,rejector,removeElement,resolver],executeRtc_->[resolve,timerFor,status,text,xhrFor,user,append,textContent,isSecureUrl,message,verifyRtcConfigMember,now,tryParseJson,isObject,getElementById],shouldPreferentialRenderWithoutCrypto->[experimentFeatureEnabled,CANONICAL_HTTP_EXPERIMENT],initLifecycleReporter->[googleLifecycleReporterFactory],onCreativeRender->[height,dev,addCsiSignalsToAmpAnalyticsConfig,insertAnalyticsElement,isReportingEnabled,setStyles,width],generateAdKey_->[getAttribute,domFingerprintPlain,stringHash32],shouldSendRequestWithoutRtc->[resolve,verifyRtcConfigMember,RTC_FAILURE,parseUrl,reject,match],buildCallback->[getExperimentBranch,dev,getVisibilityState,PAUSED,addExperimentIdToElement,onVisibilityChanged,viewerForDoc,EXPERIMENT,randomlySelectUnsetExperiments],populateAdUrlState->[tryParseJson,Number],getSlotSize->[Number],layoutCallback->[user,getEnclosingContainerTypes],isValidElement->[querySelector,isGoogleAdsA4AValidEnvironment],fireDelayedImpressions->[split,dev,dict,isSecureUrl,createElementWithAttributes],getAdUrl->[getPageLevelParameters_,resolve,all,googleAdUrl,dev,now,assign,isExperimentOn],groupSlotsForSra->[groupAmpAdsByType],onNetworkFailure->[dev,maybeAppendErrorParameter],mergeRtc->[then,resolve,rtcResponse,RTC_SUCCESS,rtcTotalTime,message,success,parseUrl,deepMerge],delayAdRequestEnabled->[experimentFeatureEnabled,DELAYED_REQUEST],initiateSraRequests->[all,dev,shift,lineDelimitedStreamer,attemptCollapse,SAFEFRAME,map,metaJsonCreativeGrouper,hasAdPromise,resetAdUrl,element,length,isCancellation,sraResponseRejector,keys,xhrFor,checkStillCurrent,assignAdUrlToError,sraResponseResolver,constructSRARequest_,forEach,utf8Encode]],dev,isInManualExperiment,join,encodeURIComponent,map,isArray,googlePageParameters,registerElement,initialSize_,devicePixelRatio,getAttribute,extension,truncAndTimeUrl,adKey_,user,constructSRABlockParameters,now,assign,element,length,serializeItem_,push,getPageLevelParameters_,split,serializeTargeting_,keys,getFirstInstanceValue_,jsonTargeting_,extractFn,forEach,combiner] | private private static final int AMP_AD_KEY_LENGTH = 8 ; Private method for setting up a promise. | Can we set this.isFluid_ here and save having to expose getLayout within src/custom-element.js? |
@@ -67,14 +67,12 @@ func resourceAwsCloudFrontDistribution() *schema.Resource {
"forwarded_values": {
Type: schema.TypeSet,
Required: true,
- Set: forwardedValuesHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cookies": {
Type: schema.TypeSet,
Required: true,
- Set: cookiePreferenceHash,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
| [Printf,Set,DeleteDistribution,Code,ListTagsForResource,GetDistribution,WaitForState,Id,CreateDistributionWithTags,StringInSlice,Errorf,String,SetId,Get,RetryableError,UpdateDistribution,NonRetryableError,Retry] | A state that is used to configure a resource based on a specific configuration block. Required fields in the response schema are required to be present in the response. | Missed during conversion of `Deprecated` to `Removed` in previous pull request. |
@@ -64,7 +64,7 @@ except ImportError:
from distutils.command.build_ext import build_ext
if not all(os.path.exists(path) for path in [
compress_source, crypto_source, chunker_source, hashindex_source,
- platform_linux_source, platform_freebsd_source]):
+ platform_linux_source, platform_freebsd_source]) and not on_rtd:
raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')
| [detect_lz4->[exists,open,read,join],Sdist->[make_distribution->[super,extend],__init__->[super,glob,compile,Exception]],detect_openssl->[exists,open,read,join],,detect_lz4,all,join,Exception,insert,detect_openssl,replace,startswith,Extension,append,print,read,exists,open,get,ImportError,setup,exit] | Detects OpenSSL prefixes. | move the "not on_rtd" to the beginning, it easier to read and more efficient also. |
@@ -40,14 +40,16 @@ class BeamSearch:
max_steps: int = 50,
beam_size: int = 10,
per_node_beam_size: int = None,
+ sampler: Sampler = None,
) -> None:
self._end_index = end_index
self.max_steps = max_steps
self.beam_size = beam_size
self.per_node_beam_size = per_node_beam_size or beam_size
+ self.sampler = sampler
@staticmethod
- def reconstruct_sequences(predictions, backpointers):
+ def _reconstruct_sequences(predictions, backpointers):
# Reconstruct the sequences.
# shape: [(batch_size, beam_size, 1)]
reconstructed_predictions = [predictions[-1].unsqueeze(2)]
| [BeamSearch->[search->[new_step->[old_step],,step,size,topk,where,reversed,warn,gather,view,ConfigurationError,dim,cat,range,predictions,list,append,cast,len,items,reconstruct_sequences,float,isfinite,signature,reshape,new_full,unsqueeze],reconstruct_sequences->[predictions,append,len,range,gather],no_grad]] | Initializes the object with the given end_index beam_size and per_node_beam. | Missing a description in the class docstring. |
@@ -0,0 +1,7 @@
+# frozen_string_literal: true
+
+class AddColumnQualityToAsset < ActiveRecord::Migration[5.1]
+ def change
+ add_column :assets, :quality, :integer
+ end
+end
| [No CFG could be retrieved] | No Summary Found. | I would call this column file_image_quality to be consistent with rest of the fields |
@@ -340,8 +340,10 @@ func formatImageStreamTags(out *tabwriter.Writer, stream *imageapi.ImageStream)
}
switch {
- case !hasSpecTag || tagRef.From == nil:
- fmt.Fprintf(out, " pushed image\n")
+ case !hasSpecTag:
+ fmt.Fprintf(out, " empty spec tag\n")
+ case tagRef.From == nil:
+ fmt.Fprintf(out, " tag without source image\n")
case tagRef.From.Kind == "ImageStreamTag":
switch {
case tagRef.Reference:
| [ParseDockerImageReference,PrioritizeTags,FollowTagReference,Now,IsZero,Set,Has,Flush,Error,MatchString,Init,NewString,Len,HumanDuration,MustCompile,Sub,Join,Fprintln,BuildConfigs,WebHookURL,LatestObservedTagGeneration,Split,Fprintf,Sprintf,List,String,Insert] | missing tags - tags that are not scheduled - tags are not included in the image. Updates automatically from a registry. | This isn't empty spec tag. This is "no spec tag" |
@@ -48,3 +48,16 @@ UIR_ARGS = ["always", "set", "Content-Security-Policy",
HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
"Upgrade-Insecure-Requests": UIR_ARGS}
+
+AUTOHSTS_STEPS = [60, 300, 900, 3600, 21600, 43200, 86400]
+"""AutoHSTS increase steps: 5min, 15min, 1h, 6h, 12h, 24h"""
+
+AUTOHSTS_PERMANENT = 31536000
+"""Value for the last max-age of HSTS"""
+
+AUTOHSTS_FREQ = 36000
+"""Minimum time since last increase to perform a new one"""
+
+MANAGED_COMMENT = "DO NOT REMOVE - Managed by Certbot"
+MANAGED_COMMENT_ID = MANAGED_COMMENT+", VirtualHost id: {0}"
+"""Managed by Certbot comments and the VirtualHost identification template"""
| [resource_filename] | Set the header arguments for the HTTP request. | nit: We're missing the first step of 1 min here. |
@@ -433,10 +433,12 @@ class StorageCommonBlobTestAsync(StorageTestCase):
content = await stream.content_as_bytes()
# Assert
- self.assertEqual(content, self.byte_data[:6])
+ self.assertEqual(content, self.byte_data[:5])
@record
def test_get_blob_with_range(self):
+ if TestMode.need_recording_file(self.test_mode):
+ pytest.skip("Issue with the recording")
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_get_blob_with_range())
| [StorageCommonBlobTestAsync->[_test_get_blob_metadata_fail->[_setup,_create_block_blob],_test_lease_blob_with_proposed_lease_id->[_setup,_create_block_blob],_test_get_blob_metadata->[_setup,_create_block_blob],test_shared_write_access_blob->[_test_shared_write_access_blob],_test_get_account_information_with_container_name->[_setup],_test_no_sas_private_blob->[_setup,_create_block_blob],test_lease_blob_acquire_and_renew->[_test_lease_blob_acquire_and_renew],_test_snapshot_blob->[_setup,_create_block_blob],_test_lease_blob_acquire_and_release->[_setup,_create_block_blob],_test_shared_read_access_blob_with_content_query_params->[_setup,_create_block_blob],_test_get_blob_with_existing_blob->[_setup,_create_block_blob],test_get_blob_properties_fail->[_test_get_blob_properties_fail],test_soft_delete_blob_including_all_snapshots->[_test_soft_delete_blob_including_all_snapshots],test_get_blob_with_lease->[_test_get_blob_with_lease],_test_no_sas_public_blob->[_get_container_reference,_setup],test_set_blob_properties_with_existing_blob->[_test_set_blob_properties_with_existing_blob],test_download_to_stream_with_credential->[_test_download_to_stream_with_credential],_create_block_blob->[_get_blob_reference],_create_remote_block_blob->[_get_blob_reference],_test_get_blob_properties_fail->[_setup,_create_block_blob],_test_download_to_stream_with_credential->[_create_remote_block_blob,_setup,_create_remote_container],_test_shared_read_access_blob->[_setup,_create_block_blob],test_delete_blob_snapshot->[_test_delete_blob_snapshot],test_copy_blob_async_private_blob_no_sas->[_test_copy_blob_async_private_blob_no_sas],test_get_blob_with_existing_blob->[_test_get_blob_with_existing_blob],_test_soft_delete_single_blob_snapshot->[_enable_soft_delete,_setup,_assert_blob_not_soft_deleted,_assert_blob_is_soft_deleted,_disable_soft_delete,_create_block_blob],_test_upload_to_url_text_with_credential->[_setup,_get_blob_reference],_test_soft_delete_with_leased_blob->[_enable_soft_delete,_setup,_assert_blob_not_soft_deleted,_assert_blob_is_soft_deleted,_disable_soft_delete,_create_block_blob],test_download_to_file_with_existing_file_overwrite->[_test_download_to_file_with_existing_file_overwrite],_test_set_blob_properties_with_blob_settings_param->[_setup,_create_block_blob],test_upload_to_url_bytes_with_sas->[_test_upload_to_url_bytes_with_sas],_test_get_blob_with_range->[_setup,_create_block_blob],test_snapshot_blob->[_test_snapshot_blob],_test_lease_blob_break_period->[_setup,_create_block_blob],_test_abort_copy_blob_with_synchronous_copy_fails->[_setup,_create_block_blob],_test_upload_to_url_bytes_with_credential->[_setup,_get_blob_reference],test_get_account_information_with_container_name->[_test_get_account_information_with_container_name],test_upload_to_url_bytes_with_existing_blob_overwrite->[_test_upload_to_url_bytes_with_existing_blob_overwrite],test_blob_container_not_exists->[_test_blob_container_not_exists],_test_soft_delete_only_snapshots_of_blob->[_enable_soft_delete,_setup,_assert_blob_not_soft_deleted,_assert_blob_is_soft_deleted,_disable_soft_delete,_create_block_blob],test_create_blob_with_special_chars->[_test_create_blob_with_special_chars],_test_blob_exists->[_setup,_create_block_blob],_test_delete_blob_snapshot->[_setup,_create_block_blob],_test_no_server_encryption->[_setup,_create_block_blob],test_blob_exists->[_test_blob_exists],_test_blob_not_exists->[_setup,_get_blob_reference],_test_blob_snapshot_not_exists->[_setup,_create_block_blob],test_create_blob_with_lease_id->[_test_create_blob_with_lease_id],test_lease_blob_change_lease_id->[_test_lease_blob_change_lease_id],test_account_sas->[_test_account_sas],test_soft_delete_single_blob_snapshot->[_test_soft_delete_single_blob_snapshot],_test_set_blob_metadata_with_upper_case->[_setup,_create_block_blob],test_blob_not_exists->[_test_blob_not_exists],test_soft_delete_with_leased_blob->[_test_soft_delete_with_leased_blob],_test_download_to_file_with_sas->[_create_remote_block_blob,_setup,_create_remote_container],_test_get_user_delegation_key->[_generate_oauth_token],_test_upload_to_url_bytes_with_existing_blob_overwrite->[_setup,_get_blob_reference],_test_lease_blob_with_duration->[_setup,_create_block_blob],test_get_blob_properties_server_encryption->[_test_get_blob_properties_server_encryption],_test_delete_blob_with_existing_blob->[_setup,_create_block_blob],_test_get_blob_with_lease->[_setup,_create_block_blob],test_lease_blob_acquire_and_release->[_test_lease_blob_acquire_and_release],_test_unicode_get_blob_unicode_name->[_setup],test_create_blob_with_metadata->[_test_create_blob_with_metadata],test_abort_copy_blob_with_synchronous_copy_fails->[_test_abort_copy_blob_with_synchronous_copy_fails],test_shared_read_access_blob_with_content_query_params->[_test_shared_read_access_blob_with_content_query_params],_test_get_account_information_with_blob_sas->[_setup,_create_block_blob],setUp->[AiohttpTestTransport],_test_lease_blob_change_lease_id->[_setup,_create_block_blob],test_upload_to_url_file_with_credential->[_test_upload_to_url_file_with_credential],test_copy_blob_with_existing_blob->[_test_copy_blob_with_existing_blob],test_upload_to_url_text_with_credential->[_test_upload_to_url_text_with_credential],_test_download_to_file_with_existing_file->[_create_remote_block_blob,_setup,_create_remote_container],_test_get_blob_properties_with_leased_blob->[_setup,_create_block_blob],_test_soft_delete_blob_without_snapshots->[_enable_soft_delete,_setup,_assert_blob_not_soft_deleted,_assert_blob_is_soft_deleted,_disable_soft_delete,_create_block_blob],_test_delete_blob_snapshots->[_setup,_create_block_blob],_test_shared_write_access_blob->[_setup,_create_block_blob],_test_create_blob_blob_unicode_data->[_setup,_get_blob_reference],test_set_blob_metadata_with_upper_case->[_test_set_blob_metadata_with_upper_case],test_no_sas_public_blob->[_test_no_sas_public_blob],_test_get_blob_properties_server_encryption->[_setup,_create_block_blob],test_download_to_file_with_sas->[_test_download_to_file_with_sas],test_get_account_information_with_blob_sas->[_test_get_account_information_with_blob_sas],test_blob_snapshot_not_exists->[_test_blob_snapshot_not_exists],test_delete_blob_snapshots->[_test_delete_blob_snapshots],test_sas_signed_identifier->[_test_sas_signed_identifier],_test_get_account_information_with_blob_name->[_setup],test_soft_delete_only_snapshots_of_blob->[_test_soft_delete_only_snapshots_of_blob],test_get_account_information_with_blob_name->[_test_get_account_information_with_blob_name],_test_copy_blob_async_private_blob_no_sas->[_create_remote_block_blob,_setup,_create_remote_container],_test_soft_delete_blob_including_all_snapshots->[_enable_soft_delete,_setup,_assert_blob_not_soft_deleted,_assert_blob_is_soft_deleted,_disable_soft_delete,_create_block_blob],_test_get_blob_server_encryption->[_setup,_create_block_blob],test_lease_blob_acquire_twice_fails->[_test_lease_blob_acquire_twice_fails],test_public_access_blob->[_test_public_access_blob],_test_upload_to_url_file_with_credential->[_setup,_get_blob_reference],_test_get_account_information_with_container_sas->[_setup],test_get_blob_with_non_existing_blob->[_test_get_blob_with_non_existing_blob],test_get_blob_metadata_fail->[_test_get_blob_metadata_fail],test_abort_copy_blob->[_test_abort_copy_blob],test_download_to_file_with_credential->[_test_download_to_file_with_credential],test_sas_access_blob->[_test_sas_access_blob],test_get_account_information->[_test_get_account_information],test_get_blob_server_encryption->[_test_get_blob_server_encryption],_test_delete_blob_with_snapshots->[_setup,_create_block_blob],_test_upload_to_url_bytes_with_existing_blob->[_setup,_get_blob_reference],test_delete_blob_with_existing_blob->[_test_delete_blob_with_existing_blob],_test_shared_delete_access_blob->[_setup,_create_block_blob],_test_get_blob_properties->[_setup,_create_block_blob],test_download_to_file_with_existing_file->[_test_download_to_file_with_existing_file],_test_abort_copy_blob->[_setup,_wait_for_async_copy],_test_set_blob_properties_with_existing_blob->[_setup,_create_block_blob],_test_create_blob_with_special_chars->[_setup],test_get_blob_metadata->[_test_get_blob_metadata],test_no_sas_private_blob->[_test_no_sas_private_blob],test_shared_read_access_blob->[_test_shared_read_access_blob],test_get_account_information_with_container_sas->[_test_get_account_information_with_container_sas],test_create_blob_blob_unicode_data->[_test_create_blob_blob_unicode_data],_test_list_blobs_server_encryption->[_setup,_create_block_blob],test_blob_snapshot_exists->[_test_blob_snapshot_exists],_test_copy_blob_async_private_blob_with_sas->[_create_remote_block_blob,_setup,_create_remote_container,_wait_for_async_copy],test_get_blob_with_snapshot->[_test_get_blob_with_snapshot],_test_get_blob_with_non_existing_blob->[_setup,_get_blob_reference],_test_blob_snapshot_exists->[_setup,_create_block_blob],test_lease_blob_break_period->[_test_lease_blob_break_period],_test_create_blob_with_metadata->[_setup,_get_blob_reference],test_upload_to_url_bytes_with_existing_blob->[_test_upload_to_url_bytes_with_existing_blob],_test_upload_to_url_bytes_with_sas->[_setup,_get_blob_reference],_test_token_credential->[AiohttpTestTransport,_setup,_generate_oauth_token],test_get_blob_with_snapshot_previous->[_test_get_blob_with_snapshot_previous],test_list_blobs_server_encryption->[_test_list_blobs_server_encryption],test_no_server_encryption->[_test_no_server_encryption],test_get_blob_with_range->[_test_get_blob_with_range],_test_account_sas->[_setup,_create_block_blob],test_soft_delete_blob_without_snapshots->[_test_soft_delete_blob_without_snapshots],_test_lease_blob_acquire_and_renew->[_setup,_create_block_blob],_test_public_access_blob->[_get_container_reference,_setup],_test_sas_signed_identifier->[_setup,_create_block_blob],test_create_blob_with_question_mark->[_test_create_blob_with_question_mark],test_copy_blob_async_private_blob_with_sas->[_test_copy_blob_async_private_blob_with_sas],test_get_user_delegation_key_async->[_test_get_user_delegation_key],test_lease_blob_with_duration->[_test_lease_blob_with_duration],test_token_credential->[_test_token_credential],_test_blob_container_not_exists->[_get_container_reference,_setup,_get_blob_reference],test_set_blob_properties_with_blob_settings_param->[_test_set_blob_properties_with_blob_settings_param],_test_get_blob_with_snapshot_previous->[_setup,_create_block_blob],test_lease_blob_with_proposed_lease_id->[_test_lease_blob_with_proposed_lease_id],_test_get_account_information->[_setup],_test_get_blob_properties_with_snapshot->[_setup,_create_block_blob],test_delete_blob_with_snapshots->[_test_delete_blob_with_snapshots],test_get_blob_properties->[_test_get_blob_properties],test_get_blob_properties_with_snapshot->[_test_get_blob_properties_with_snapshot],_test_download_to_file_with_existing_file_overwrite->[_create_remote_block_blob,_setup,_create_remote_container],_test_create_blob_with_question_mark->[_setup],test_shared_delete_access_blob->[_test_shared_delete_access_blob],test_get_blob_properties_with_leased_blob->[_test_get_blob_properties_with_leased_blob],test_unicode_get_blob_unicode_name->[_test_unicode_get_blob_unicode_name],_test_delete_blob_with_non_existing_blob->[_setup,_get_blob_reference],_test_create_blob_with_lease_id->[_setup,_create_block_blob],_test_lease_blob_acquire_twice_fails->[_setup,_create_block_blob],_test_download_to_file_with_credential->[_create_remote_block_blob,_setup,_create_remote_container],_test_get_blob_with_snapshot->[_setup,_create_block_blob],_test_copy_blob_with_existing_blob->[_setup,_create_block_blob],test_upload_to_url_bytes_with_credential->[_test_upload_to_url_bytes_with_credential],_test_sas_access_blob->[_setup,_create_block_blob],test_delete_blob_with_non_existing_blob->[_test_delete_blob_with_non_existing_blob]]] | A test to get a blob with a specific lease. | Do you know what caused the problem for recording? ^_^ |
@@ -18,6 +18,7 @@
#ifdef RGB_MATRIX_ENABLE
+#ifdef REV1
const is31_led PROGMEM g_is31_leds[DRIVER_LED_TOTAL] = {
{0, CS28_SW1, CS30_SW1, CS29_SW1}, /* RGB10 */
{0, CS28_SW2, CS30_SW2, CS29_SW2}, /* RGB11 */
| [No CFG could be retrieved] | Creates a object based on the contents of the passed in object. Returns an array of the values of the last three color codes that are not recognized by the. | Everything from Lines 21-120 should be placed in a `keyboards/melgeek/z70ultra/rev1/rev1.c` file. |
@@ -198,9 +198,9 @@ func (repo *Repository) GetCommitByPath(relpath string) (*Commit, error) {
// CommitsRangeSize the default commits range size
var CommitsRangeSize = 50
-func (repo *Repository) commitsByRange(id SHA1, page int) (*list.List, error) {
- stdout, err := NewCommand("log", id.String(), "--skip="+strconv.Itoa((page-1)*CommitsRangeSize),
- "--max-count="+strconv.Itoa(CommitsRangeSize), prettyLogFormat).RunInDirBytes(repo.Path)
+func (repo *Repository) commitsByRange(id SHA1, page, pageSize int) (*list.List, error) {
+ stdout, err := NewCommand("log", id.String(), "--skip="+strconv.Itoa((page-1)*pageSize),
+ "--max-count="+strconv.Itoa(pageSize), prettyLogFormat).RunInDirBytes(repo.Path)
if err != nil {
return nil, err
}
| [GetBranchCommit->[GetCommit,GetBranchCommitID],CommitsBetweenIDs->[CommitsBetween,GetCommit],GetBranchCommitID->[GetRefCommitID],getCommitByPathWithID->[getCommit],GetTagCommit->[GetTagCommitID,GetCommit],getCommitsBefore->[commitsBefore],getCommitsBeforeLimit->[commitsBefore],GetCommit->[getCommit,ConvertToSHA1]] | commitsByRange returns list of commits with given SHA1 in given page range. | This may preclude a cool technique (I've literally just considered): How do you know if there are more pages if you can't just get a simple count from the start? If you request one more than you actually want and get it you actually know there's at least one more page. The use of page and pagesize means that only things which can be easily totally enumerated can be paged - an ability to know if there is at least one more thing might be helpful where a pre count can't be easily determined. |
@@ -350,8 +350,16 @@ namespace ILCompiler.PEWriter
/// we're performing the same transformation on Windows where it is a no-op.
/// </summary>
/// <param name="outputStream"></param>
- private void UpdateSectionRVAs(Stream outputStream)
+ private void UpdateSectionRVAs(Stream outputStream, int? updateSectionAlign)
{
+ if (updateSectionAlign.HasValue)
+ {
+ outputStream.Seek(DosHeaderSize + PESignatureSize + COFFHeaderSize + OffsetOfSectionAlign, SeekOrigin.Begin);
+ byte[] alignBytes = BitConverter.GetBytes(updateSectionAlign.Value);
+ Debug.Assert(alignBytes.Length == sizeof(int));
+ outputStream.Write(alignBytes, 0, alignBytes.Length);
+ }
+
int peHeaderSize =
OffsetOfChecksum +
sizeof(int) + // Checksum
| [R2RPEBuilder->[SetCorHeader->[SetCorHeader],UpdateSectionRVAs->[Write],SetWin32Resources->[SetWin32Resources],AddObjectData->[AddObjectData],AddSections->[AddSections],SetDebugDirectory->[SetDebugDirectory],GetSymbolFilePosition->[GetSymbolFilePosition],ApplyMachineOSOverride->[Write],SetPEHeaderTimeStamp->[Write]]] | Update the section RVA and the size of the image. | We already have similar PE header patch-up functions, `ApplyMachineOSOverride` and `SetPEHeaderTimeStamp` in this file. Could you follow their pattern? |
@@ -199,7 +199,7 @@ public class Analyzer extends DefaultTraversalVisitor<Node, AnalysisContext> {
@Override
protected Node visitAliasedRelation(AliasedRelation node, AnalysisContext context) {
String structuredDataSourceName = ((Table) node.getRelation()).getName().getSuffix()
- .toUpperCase();
+ ;
if (metaStore.getSource(structuredDataSourceName) ==
null) {
throw new KQLException(structuredDataSourceName + " does not exist.");
| [Analyzer->[visitSelect->[IllegalArgumentException,getFromDataSources,of,getSelectItems,getRightAlias,getName,KQLException,QualifiedNameReference,get,getLeftAlias,getExpression,isEmpty,getRight,addSelectItem,fields,name,getJoin],fetchKeyFieldName->[KQLException,getFieldName,getSuffix],visitCast->[process,getExpression],visitJoin->[process,getSchema,KQLException,getExpression,setJoin,getType,getTopicName,getSource,name,getKeyField,StructuredDataSourceNode,fetchKeyFieldName,getDataSourceType,PlanNodeId,JoinNode,getLeft,getRight,getAlias,getSuffix,get,toUpperCase],visitQualifiedNameReference->[visitExpression],analyzeWhere->[setWhereExpression],analyzeWindowExpression->[setWindowExpression],visitGroupBy->[getGroupingElements],analyzeGroupBy->[addAll,get,getGroupingElements],visitTable->[equalsIgnoreCase,getParentType,KQLStream,getSuffix,getName,setIntoKafkaTopicName,KQLException,setInto,get,endsWith,KQLSTDOUT,substring,toString,setIntoFormat,length,startsWith,setIntoAvroSchemaFilePath],visitAliasedRelation->[add,getSource,KQLException,toUpperCase],visitQuerySpecification->[equalsIgnoreCase,KQLStream,process,getIntoFormat,KQLJsonTopicSerDe,KQLAvroTopicSerDe,getIntoAvroSchemaFilePath,analyzeWindowExpression,getSelect,getIntoKafkaTopicName,getInto,AnalysisContext,isPresent,KQLTopic,KQLCsvTopicSerDe,getFromDataSources,getName,analyzeWhere,analyzeGroupBy,get,getKqlTopicSerDe,setInto]]] | Visit an AliasedRelation node. This method will check if the node is a known relation. | seems like this should be on line 201 |
@@ -1,5 +1,8 @@
<?php
+$pkgs_id= array();
+$pkgs_db_id= array();
+
// RPM
if (!empty($agent_data['rpm'])) {
echo "\nRPM Packages: ";
| [No CFG could be retrieved] | This function parses the RPM data and returns an array of package IDs. Package info. | a little picky, but could you change it to: `pkgs_id = [];` first, to match code guidelines, and initialize arrays in new style :-) |
@@ -830,8 +830,7 @@ int BIO_lookup(const char *host, const char *service,
if (se == NULL) {
#ifndef OPENSSL_SYS_WINDOWS
- BIOerr(BIO_F_BIO_LOOKUP, ERR_R_SYS_LIB);
- ERR_add_error_data(1, hstrerror(h_errno));
+ SYSerr(SYS_F_GETSERVBYNAME, errno);
#else
SYSerr(SYS_F_GETSERVBYNAME, WSAGetLastError());
#endif
| [No CFG could be retrieved] | region Private functions The function for creating a linked list of hostent elements. | Why not `1000 + h_errno` here? |
@@ -232,6 +232,11 @@ public class Patterns
{
return property("subquery", LateralJoinNode::getSubquery);
}
+
+ public static Property<LateralJoinNode, Lookup, Expression> filter()
+ {
+ return property("filter", LateralJoinNode::getFilter);
+ }
}
public static class Limit
| [Patterns->[output->[typeOf],Apply->[correlation->[property]],Values->[rows->[property]],filter->[typeOf],window->[typeOf],join->[typeOf],aggregation->[typeOf],assignUniqueId->[typeOf],topN->[typeOf],applyNode->[typeOf],Limit->[count->[property]],rowNumber->[typeOf],Sample->[sampleRatio->[property],sampleType->[property]],Aggregation->[groupingColumns->[property],step->[property]],enforceSingleRow->[typeOf],semiJoin->[typeOf],tableScan->[typeOf],values->[typeOf],sample->[typeOf],markDistinct->[typeOf],sources->[property,collect,toImmutableList],Join->[type->[property]],limit->[typeOf],sort->[typeOf],project->[typeOf],TopN->[step->[property],count->[property]],delete->[typeOf],exchange->[typeOf],indexSource->[typeOf],source->[getOnlyElement,resolve,of,size,optionalProperty,empty,getSources],LateralJoin->[correlation->[property],subquery->[property]],lateralJoin->[typeOf],tableWriterNode->[typeOf],spatialJoin->[typeOf],Exchange->[scope->[property]],tableFinish->[typeOf],union->[typeOf]]] | subquery - > subquery - > count. | you could also add pattern for empty critieria (equal to `TRUE_LITERAL`), as it looks to be used quite frequently. |
@@ -122,6 +122,13 @@ public class ConfigKey<T> {
if (obj instanceof ConfigKey) {
ConfigKey<?> that = (ConfigKey<?>)obj;
return this._name.equals(that._name);
+ }
+ return false;
+ }
+
+ public boolean isSameKeyAs(Object obj) {
+ if(this.equals(obj)) {
+ return true;
} else if (obj instanceof String) {
String key = (String)obj;
return key.equals(_name);
| [ConfigKey->[equals->[toString,equals],valueIn->[value],value->[isDynamic,defaultValue,key],hashCode->[hashCode],valueOf->[multiplier,valueOf,type]]] | Compares this ConfigKey with another ConfigKey. | `this.getClass().equals(obj.getClass())` is preferred over `instanceof` because `instanceof` only verifies that the two objects are the compatible types, not that the classes themselves are loaded from a common class loader. If two objects have the same type but their underlying classes are loaded from different class loaders, casting the objects will fail. Given that web containers play games with class loaders and often load the same jar multiple times, unexpected behavior and runtime exceptions (e.g. apparent duplicate Map entries, ClassCastExceptions, etc) occurs. |
@@ -402,8 +402,15 @@ public class HistoryLog extends JFrame {
}
@VisibleForTesting
- static String parsePlayerNameFromDiceRollMessage(final String message) {
- return message.contains(" roll ") ? message.substring(0, message.indexOf(" roll ")) : message;
+ static String parseHitDifferentialKeyFromDiceRollMessage(final String message) {
+ final Pattern diceRollPattern = Pattern.compile("^(\\w+) roll(?: (\\w+))? dice");
+ final Matcher matcher = diceRollPattern.matcher(message);
+ if (matcher.find()) {
+ return matcher.group(1) + " " + Optional.ofNullable(matcher.group(2)).orElse("regular");
+ }
+
+ final int lastColonIndex = message.lastIndexOf(" :");
+ return (lastColonIndex != -1) ? message.substring(0, lastColonIndex) : message;
}
/**
| [HistoryLog->[printRemainingTurn->[toString,getPlayerId],printTerritorySummary->[printTerritorySummary,toString,getPlayerId],getProduction->[getProduction],getPlayerId->[getPlayerId],toString->[toString],printProductionSummary->[toString],printDiceStatistics->[toString]]] | Parse the player name from a dice roll message. | Can player names contain spaces? If so, this regex will need to be updated accordingly. |
@@ -77,7 +77,7 @@ def download_pairs(datadir, pairs: List[str]) -> bool:
"""For each pairs passed in parameters, download 1 and 5 ticker intervals"""
for pair in pairs:
try:
- for interval in [1, 5]:
+ for interval in [1, 5, 30, 60, 1440]:
download_backtesting_testdata(datadir, pair=pair, interval=interval)
except BaseException:
logger.info('Failed to download the pair: "{pair}", Interval: {interval} min'.format(
| [download_backtesting_testdata->[make_testdata_path],load_data->[load_tickerdata_file]] | Download the pairs passed in parameters. | In this case it will make sense this function to download only the ticker expected. |
@@ -2075,7 +2075,7 @@ func (pt *ProgramTester) prepareDotNetProject(projinfo *engine.Projinfo) error {
version := r.Replace(file)
err = pt.runCommand("dotnet-add-package",
- []string{dotNetBin, "add", "package", dep, "-v", version, "-s", localNuget}, cwd)
+ []string{dotNetBin, "add", "package", dep, "-v", version}, cwd)
if err != nil {
return fmt.Errorf("failed to add dependency on %s: %w", dep, err)
}
| [runVirtualEnvCommand->[runCommand],pythonCmd->[getPythonBin],testEdit->[PreviewAndUpdate,query],preparePythonProjectWithPipenv->[runPipenvCommand],prepareNodeJSProject->[runYarnCommand],yarnLinkPackageDeps->[runYarnCommand],TestLifeCycleInitAndDestroy->[TestLifeCyclePrepare,TestCleanUp],TestLifeCycleDestroy->[GetStackNameWithOwner,GetDebugUpdates,runPulumiCommand],runPythonCommand->[pythonCmd,runCommand],installPipPackageDeps->[runVirtualEnvCommand,runPipenvCommand],pipenvCmd->[getPipenvBin],performExtraRuntimeValidation->[GetStackName,runPulumiCommand],copyTestToTemporaryDirectory->[GetStackName,getBin,runYarnCommand],prepareGoProject->[getGoBin,runCommand],prepareDotNetProject->[getDotNetBin,runCommand],query->[GetDebugUpdates,runPulumiCommand],GetStackNameWithOwner->[GetStackName],runPipenvCommand->[pipenvCmd,runCommand],preparePythonProject->[runPythonCommand,runVirtualEnvCommand],runPulumiCommand->[runCommand,pulumiCmd,getPipenvBin],runYarnCommand->[yarnCmd,runCommand],TestPreviewUpdateAndEdits->[GetDebugUpdates,runPulumiCommand],String->[String],yarnCmd->[getYarnBin],pulumiCmd->[getBin,GetDebugLogLevel],PreviewAndUpdate->[GetDebugUpdates,runPulumiCommand],exportImport->[runPulumiCommand],TestLifeCycleInitialize->[GetStackName,GetStackNameWithOwner,runPulumiCommand],prepareProjectDir->[getProjinfo,prepareProject],GetStackName] | prepareDotNetProject prepares a dotnet project for use with the program. | I agree that this is probably the problem. It works locally on my Mac, so my initial suspicion is that `localNuget` points to the wrong place on the Mac test suite. But we use `localNuget` to get the version, so it must be correct. --- Either way, we can revert to unblock the release and then fix the test. |
@@ -46,9 +46,15 @@ public interface ShardSpec
ShardSpecLookup getLookup(List<ShardSpec> shardSpecs);
/**
- * Get the possible range of each dimension for the rows this shard contains.
+ * Get dimensions who have possible range for the rows this shard contains.
*
- * @return map of dimensions to its possible range. Dimensions with unknown possible range are not mapped
+ * @return list of dimensions who has its possible range. Dimensions with unknown possible range are not listed
*/
- Map<String, RangeSet<String>> getDomain();
+ List<String> getDomainDimensions();
+
+ /**
+ * if given domain ranges are not possible in this shard, return false; otherwise return true;
+ * @return possibility of in domain
+ */
+ boolean possibleInDomain(Map<String, RangeSet<String>> domain);
}
| [No CFG could be retrieved] | Get the domain map. | What is the motivation for removing the domain method? It might be nice to have that around otherwise, ShardSpec can never return back a domain |
@@ -226,10 +226,16 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, userID string, re
// We expect to be always able to convert the label matchers back to Prometheus ones.
// In case we fail (unexpected) the error will not include the matchers, but the core
// logic doesn't break.
- matchers, _ := ingester_client.FromLabelMatchers(req.Matchers)
return nil, validation.LimitError(fmt.Sprintf(errMaxChunksPerQueryLimit, util.LabelMatchersToString(matchers), chunksLimit))
}
}
+ for _, series := range resp.Chunkseries {
+ //Add series, with fingerprint inside of limiter
+ limitErr := queryLimiter.AddFingerPrint(series.Labels, matchers)
+ if limitErr != nil {
+ return nil, limitErr
+ }
+ }
result.Chunkseries = append(result.Chunkseries, resp.Chunkseries...)
result.Timeseries = append(result.Timeseries, resp.Timeseries...)
| [queryIngesterStream->[QueryStream],queryIngesters->[Query]] | queryIngesterStream sends a stream of samples from multiple ingesters. Parse the query results and merge any missing chunks and time series This function will return a TimeSeries object with the Chunkseries and Timeseries objects populated. | We should do the same for `resp.Timeseries` too, no? |
@@ -38,6 +38,7 @@ module Admin
def update
@tag = Tag.find(params[:id])
if @tag.update(tag_params)
+ ::Tags::AliasRetagWorker.perform_async(@tag.id) if tag_alias_updated?
flash[:success] = "#{@tag.name} tag successfully updated!"
else
flash[:error] = "The tag update failed: #{@tag.errors_as_sentence}"
| [TagsController->[new->[new],create->[new],update->[update]]] | Updates the tag with the given id. | There may be a lot of tags, so an async worker seemed like the right choice. The worker is supposed to be idempotent, so it's possible to restart it if it gets interrupted for any reason. |
@@ -47,6 +47,11 @@ public class DagManagerUtils {
return new FlowId().setFlowGroup(flowGroup).setFlowName(flowName);
}
+ static long getFlowExecutionId(Dag<JobExecutionPlan> dag) {
+ return Long.parseLong(dag.getNodes().get(0).getValue().getJobSpec().getConfigAsProperties()
+ .getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY));
+ }
+
static long getFlowExecId(Dag<JobExecutionPlan> dag) {
Config jobConfig = dag.getStartNodes().get(0).getValue().getJobSpec().getConfig();
return jobConfig.getLong(ConfigurationKeys.FLOW_EXECUTION_ID_KEY);
| [DagManagerUtils->[getJobSpec->[getJobSpec],generateFlowIdInString->[getFlowId],getNext->[getExecutionStatus],getExecutionStatus->[getExecutionStatus],getFailureOption->[getJobConfig],generateDagId->[getFlowId,getFlowExecId],getFullyQualifiedDagName->[getFlowId,getFlowExecId]]] | Get the flow id from the Dag. | This already exists! We have getFlowExecId() method in DagManagerUtils. |
@@ -510,11 +510,13 @@ public class VertxCoreRecorder {
}
public ThreadFactory createThreadFactory() {
+ ClassLoader tccl = Thread.currentThread().getContextClassLoader();
AtomicInteger threadCount = new AtomicInteger(0);
return runnable -> {
VertxThread thread = VertxThreadFactory.INSTANCE.newVertxThread(runnable,
"executor-thread-" + threadCount.getAndIncrement(), true, 0, null);
thread.setDaemon(true);
+ thread.setContextClassLoader(tccl);
return thread;
};
}
| [VertxCoreRecorder->[destroy->[destroy],bossSupplier->[get->[get]],setEventBusOptions->[setEventBusOptions],deleteDirectory->[deleteDirectory],VertxSupplier->[get->[initialize]],executionContextHandler->[runWith->[run]],calculateEventLoopThreads->[calculateDefaultIOThreads],getRandomDirectory->[getRandomDirectory],setAddressResolverOptions->[setAddressResolverOptions]]] | Creates a thread factory that creates a VertxThread. | @stuartwdouglas this change doesn't look related? Is it something you want in? |
@@ -856,6 +856,17 @@ describe('input', function() {
expect(inputElm).toBeValid();
});
+ it('should allow four or more digits in year', function() {
+ var inputElm = helper.compileInput('<input type="week" ng-model="value" ng-model-options="{timezone: \'UTC\'}"/>');
+
+ helper.changeInputValueTo('10123-W03');
+ expect(+$rootScope.value).toBe(Date.UTC(10123, 0, 21));
+
+ $rootScope.$apply(function() {
+ $rootScope.value = new Date(Date.UTC(10321, 0, 21));
+ });
+ expect(inputElm.val()).toBe('10321-W03');
+ });
it('should use UTC if specified in the options', function() {
var inputElm = helper.compileInput('<input type="week" ng-model="value" ng-model-options="{timezone: \'UTC\'}" />');
| [No CFG could be retrieved] | Determines the value of the input element and checks that it is not null. Input for the next Nth Nth Nth Nth Nth Nth Nth. | You should use a different value. |
@@ -36,10 +36,10 @@ class BaseStateSchema(ObjectSchema):
context = fields.Dict(key=fields.Str(), values=JSONCompatible(), allow_none=True)
message = fields.String(allow_none=True)
- _result = Nested(StateResultSchema, allow_none=False, value_selection_fn=get_safe)
+ _result = Nested(StateResultSchema, allow_none=False, value_selection_fn=None)
cached_inputs = fields.Dict(
key=fields.Str(),
- values=Nested(StateResultSchema, value_selection_fn=get_safe),
+ values=Nested(StateResultSchema, value_selection_fn=None),
allow_none=True,
)
| [ScheduledSchema->[DateTime],RetryingSchema->[Int],get_safe->[get],CachedSchema->[DateTime,JSONCompatible],QueuedSchema->[DateTime],MappedSchema->[create_object->[range,super,pop],Integer,Nested],BaseStateSchema->[create_object->[super,pop],Str,Dict,JSONCompatible,String,Nested],MetaStateSchema->[Nested],LoopedSchema->[Int]] | Create a new object from the given data. | On a pairing call with @cicdw there was an implication that somewhere around here (or maybe it was just that his memory about it was triggered around here) we need to be worried about maintaining deserialization of old style `State._result` in pipeline logic, so calling that out here in case we want to have the discussion at this location. |
@@ -1539,12 +1539,13 @@ class _BaseRaw(ProjMixin, ContainsMixin, UpdateChannelsMixin,
def plot_psd(self, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
proj=False, n_fft=2048, picks=None, ax=None,
color='black', area_mode='std', area_alpha=0.33,
- n_overlap=0, dB=True, show=True, n_jobs=1, verbose=None):
+ n_overlap=0, dB=True, average=False, show=True,
+ n_jobs=1, verbose=None):
return plot_raw_psd(self, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
proj=proj, n_fft=n_fft, picks=picks, ax=ax,
color=color, area_mode=area_mode,
area_alpha=area_alpha, n_overlap=n_overlap,
- dB=dB, show=show, n_jobs=n_jobs)
+ dB=dB, average=average, show=show, n_jobs=n_jobs)
@copy_function_doc_to_method_doc(plot_raw_psd_topo)
def plot_psd_topo(self, tmin=0., tmax=None, fmin=0, fmax=100, proj=False,
| [_start_writing_raw->[append],ToDataFrameMixin->[to_data_frame->[_get_check_picks]],_check_update_montage->[append],_write_raw->[_write_raw],_BaseRaw->[notch_filter->[notch_filter],apply_function->[_check_fun],_preload_data->[_read_segment],crop->[_update_times],__setitem__->[_parse_get_set_params],resample->[_update_times,resample],append->[_read_segment,_update_times],estimate_rank->[time_as_index],apply_hilbert->[apply_function],__getitem__->[_read_segment,_parse_get_set_params],save->[time_as_index]]] | Plot a 2D histogram with a topological sort of unbiased polynomial. | API change, should default to `True` or `None` with deprecation cycle |
@@ -331,7 +331,7 @@ EVENTS = {
"TAKE CONTROL",
"Lane Departure Detected",
AlertStatus.userPrompt, AlertSize.mid,
- Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt, 1., 2., 3.),
+ Priority.LOW, VisualAlert.steerRequired, AudibleAlert.chimePrompt1, 1., 2., 3.),
},
# ********** events only containing alerts that display while engaged **********
| [calibration_incomplete_alert->[Alert],below_steer_speed_alert->[Alert],no_gps_alert->[Alert],wrong_car_mode_alert->[NoEntryAlert],NoEntryAlert,EngagementAlert,ImmediateDisableAlert,SoftDisableAlert,Alert] | Events that occur when a car is unrecognized. The list of alert events that are not valid. | why did this change? |
@@ -559,6 +559,13 @@ public abstract class AbstractConfig implements Serializable {
postProcessRefresh();
}
+ private void invokeSetParameters(Map<String, String> values) {
+ Map<String, String> map = invokeGetParameters(getClass(), this);
+ map = map == null ? new HashMap<>() : map;
+ map.putAll(values);
+ invokeSetParameters(getClass(), this, map);
+ }
+
private boolean isIgnoredAttribute(Class<? extends AbstractConfig> clazz, String propertyName) {
Method getter = null;
String capitalizePropertyName = propertyName.substring(0, 1).toUpperCase() + propertyName.substring(1);
| [AbstractConfig->[equals->[getBeanInfo,calculateAttributeFromGetter,isWritableProperty,equals],getPrefixes->[getId,getPluralTagName],refresh->[invokeSetParameters,invokeGetParameters,getSubProperties,getId,getPrefixes,isParametersSetter,extractPropertyName,convert],hashCode->[hashCode],getBeanInfo->[getBeanInfo],getTypePrefix->[getTagName],getMetaData->[appendAttributes],toString->[getTagName,calculateAttributeFromGetter,toString],getPluralTagName->[getTagName],appendParameters->[appendParameters]]] | refresh method. Checks if the property is ignored. | It is better to check argument `Map<String, String> values`, If it is null or empty, just return |
@@ -333,6 +333,9 @@ getent group daos_admins >/dev/null || groupadd -r daos_admins
%{_libdir}/*.a
%changelog
+* Fri Dec 13 2019 Jeff Olivier <jeffrey.v.olivier@intel.com> - 0.7.0-2
+- Remove openmpi, pmix, and hwloc build requirement, use packages
+
* Thu Dec 05 2019 Johann Lombardi <johann.lombardi@intel.com> - 0.7.0-1
- Version bump up to 0.7.0
| [No CFG could be retrieved] | python - 2. 7 Universal version of a sequence number. | Adding `hwloc`[`-devel`] build requirement here though, aren't we? |
@@ -299,7 +299,7 @@ module Engine
bergslagen_bonus(icons),
orefields_bonus(icons),
sveabolaget_bonus(route, stops),
- gkb_bonus(stops)].map { |b| b[:revenue] }.each { |r| revenue += r }
+ gkb_bonus(route, stops)].map { |b| b[:revenue] }.each { |r| revenue += r }
return revenue unless route.train.name == 'E'
| [G18SJ->[perform_nationalization->[select],setup->[select],nationalize_major->[transfer_home_token,remove_reservation,transfer_non_home_tokens],remove_main_line_bonus->[main_line_lay?]]] | Returns the revenue for a given route and stops. | route already has stops, so you don't need to pass it in separately right? |
@@ -255,6 +255,14 @@ EVENTS: Dict[int, Dict[str, Union[Alert, Callable[[Any, messaging.SubMaster, boo
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
},
+ EventName.startupOneplus: {
+ ET.PERMANENT: Alert(
+ "WARNING: Original EON deprecated",
+ "Upgrade to comma two",
+ AlertStatus.userPrompt, AlertSize.mid,
+ Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., 15.),
+ },
+
EventName.invalidLkasSetting: {
ET.PERMANENT: Alert(
"Stock LKAS is turned on",
| [calibration_incomplete_alert->[Alert],below_steer_speed_alert->[Alert],no_gps_alert->[Alert],wrong_car_mode_alert->[NoEntryAlert],NormalPermanentAlert,NoEntryAlert,EngagementAlert,ImmediateDisableAlert,SoftDisableAlert,Alert] | A list of AlertEvents that are fired when a branch is overcome Cruise Error Enable Community Features in Developer Settings. | Maybe instead of "Update to comma two" say "Device will no longer update" |
@@ -1900,6 +1900,13 @@ class TestVersionSubmitDistribution(TestSubmitBase):
response, reverse('devhub.submit.version.agreement', args=[self.addon.slug])
)
+ def test_site_permission_not_allowed(self):
+ self.addon.update(type=amo.ADDON_SITE_PERMISSION)
+ response = self.client.get(self.url)
+ assert response.status_code == 403
+ response = self.client.post(self.url)
+ assert response.status_code == 403
+
class TestVersionSubmitAutoChannel(TestSubmitBase):
"""Just check we chose the right upload channel. The upload tests
| [TestAddonSubmitDetails->[test_submit_success_required_with_content_optimization->[is_success,get_addon,get_dict],test_set_privacy_nomsg->[is_success,get_addon,get_dict],test_source_submission_notes_shown->[generate_source_zip,post],test_submit_categories_max->[post,get_dict],test_submit_categories_add->[is_success,get_addon,get_dict],test_submit_categories_addandremove->[post,get_addon,get_dict],setUp->[get_addon],test_submit_success_optional_fields->[is_success,get_addon,get_dict],test_submit_categories_remove->[post,get_addon,get_dict],test_submit_success_optional_fields_with_content_optimization->[is_success,get_addon,get_dict],test_submit_categories_required->[post,get_dict],test_submit_success_required->[is_success,get_addon,get_dict],test_set_builtin_license_no_log->[is_success,get_addon,get_dict],test_license_error->[post,get_dict],test_source_submission_notes_not_shown_by_default->[post]],TestStaticThemeSubmitDetails->[setUp->[get_addon],test_submit_success_optional_fields->[is_success,get_addon,get_dict],test_submit_categories_set->[is_success,get_addon,get_dict],test_submit_categories_change->[post,get_addon,get_dict],test_submit_success_required->[is_success,get_addon,get_dict],test_set_builtin_license_no_log->[is_success,get_addon,get_dict],test_license_error->[post,get_dict]],VersionSubmitUploadMixin->[test_static_theme_wizard->[post,get_next_url],test_addon_version_is_blocked->[post],test_static_theme_wizard_unsupported_properties->[post,get_next_url],test_unique_version_num->[post],test_same_version_if_previous_is_awaiting_review->[post],test_same_version_if_previous_is_deleted->[post],post->[post],test_missing_compatibility_apps->[post],test_same_version_if_previous_is_rejected->[post]],TestVersionSubmitDetails->[test_submit_details_unlisted_should_redirect->[get_addon],test_can_cancel_review->[post,get_addon],test_public_addon_stays_public_even_if_had_missing_metadata->[post],test_submit_empty_is_okay->[post,get_addon],setUp->[get_addon],test_submit_static_theme_should_redirect->[get_addon],test_submit_success->[post,get_addon]],TestSubmitBase->[get_version->[get_addon]],DetailsPageMixin->[test_name_summary_lengths_long->[is_success],test_submit_details_unlisted_should_redirect->[get_addon],test_can_cancel_review->[post,get_addon],test_submit_slug_invalid->[post],test_submit_slug_required->[post],test_submit_summary_length->[post],test_summary_auto_cropping_content_optimization->[is_success,get_addon],test_name_auto_cropping_content_optimization->[is_success,get_addon],is_success->[post,get_addon],test_submit_summary_symbols_only->[post,get_addon],test_nomination_date_set_only_once->[get_version,get_addon,is_success],test_submit_name_length->[post],test_submit_summary_required->[post],test_name_summary_lengths_short->[is_success],test_name_summary_lengths_content_optimization->[post,is_success,get_addon],test_submit_name_existing->[is_success],test_submit_name_symbols_only->[post,get_addon]],TestVersionSubmitAutoChannel->[test_unlisted_last_uses_unlisted_upload->[post],test_listed_last_uses_listed_upload->[post],test_no_versions_redirects_to_distribution->[post]],TestAddonSubmitSource->[test_no_source->[post,get_version],test_with_bad_source_not_an_actual_archive->[generate_source_garbage,post,get_version],test_submit_source_targz->[post,generate_source_tar,get_version],test_with_non_compressed_tar->[post,generate_source_tar,get_version],test_say_yes_but_dont_submit_source_fails->[post,get_version],test_submit_source_tgz->[post,generate_source_tar,get_version],test_submit_source_in_memory_upload->[generate_source_zip,post,get_version],test_with_bad_source_extension->[generate_source_zip,post,get_version],test_submit_source->[generate_source_zip,post,get_version],test_submit_source_in_memory_upload_with_targz->[post,generate_source_tar,get_version],test_say_no_but_submit_source_anyway_fails->[generate_source_zip,post,get_version],test_logging_failed_validation->[post,get_version],test_no_logging_without_source->[post],test_with_bad_source_broken_archive->[generate_source_zip,post,get_version],test_submit_source_tarbz2->[post,generate_source_tar,get_version],setUp->[get_version],test_with_bad_source_broken_archive_compressed_tar->[post,generate_source_tar,get_version],post->[post],test_logging->[generate_source_zip,post,get_version]],TestAddonSubmitUpload->[test_unlisted_name_not_unique->[post,get_addon_count],test_redirect_back_to_agreement_if_restricted_by_reputation->[post],test_static_theme_wizard_listed->[post],test_missing_compatible_apps->[post],test_static_theme_submit_unlisted->[post],test_static_theme_submit_listed->[post],test_name_not_unique_between_types->[post,get_addon_count],test_new_addon_is_already_blocked->[post],post->[post],test_static_theme_wizard_unlisted->[post],test_success_listed->[post],test_success_unlisted->[post],test_redirect_back_to_agreement_if_restricted->[post],test_unique_name->[post]],TestVersionSubmitUploadUnlisted->[test_success->[post,get_next_url]],TestVersionSubmitUploadListed->[test_langpack_requires_permission->[post],test_theme_experiment_inside_webext_upload_without_permission->[post],test_success->[post,get_next_url],test_experiment_inside_webext_upload_without_permission->[post],test_incomplete_addon_now_nominated->[post],test_redirect_if_addon_is_invisible->[post]],TestVersionSubmitDistribution->[test_listed_not_available_if_addon_is_invisible->[post],test_unlisted_redirects_to_next_step->[post],test_unlisted_redirects_to_next_step_if_addon_is_invisible->[post],test_listed_redirects_to_next_step->[post]],TestVersionSubmitSource->[setUp->[get_version,get_addon]],TestVersionSubmitFinish->[setUp->[get_addon]]] | Test if a device has read agreement. | nit: a mixin for this? Seems a bit un-DRY-y to have the same chunk of code 4 times. |
@@ -53,7 +53,7 @@ Condition::Pointer AugmentedLagrangianMethodFrictionlessComponentsMortarContactC
PropertiesType::Pointer pProperties,
GeometryType::Pointer pMasterGeom) const
{
- return Kratos::make_intrusive< AugmentedLagrangianMethodFrictionlessComponentsMortarContactCondition<TDim,TNumNodes, TNormalVariation > >( NewId, pGeom, pProperties, pMasterGeom );
+ return Kratos::make_intrusive< AugmentedLagrangianMethodFrictionlessComponentsMortarContactCondition<TDim,TNumNodes, TNormalVariation, TNumNodesMaster > >( NewId, pGeom, pProperties, pMasterGeom );
}
/************************************* DESTRUCTOR **********************************/
| [No CFG could be retrieved] | Creates a new AugmentedLagrangianMethodFrictionlessComponentsMort. | Out of curiosity, why do you need to template it to the number of nodes of the master? |
@@ -9,6 +9,6 @@ namespace Dynamo.Graph.Nodes.CustomNodes
{
IEnumerable<CustomNodeInfo> AddUninitializedCustomNodesInPath(string path, bool isTestMode, bool isPackageMember = false);
Guid GuidFromPath(string path);
- bool TryGetFunctionWorkspace(Guid id, bool isTestMode, out ICustomNodeWorkspaceModel ws);
+ bool TryGetFunctionWorkspace(Guid id, bool isTestMode, out ICustomNodeWorkspaceModel ws, Engine.LibraryServices libraryServices = null);
}
}
| [No CFG could be retrieved] | AddUninitializedCustomNodesInPath adds any custom nodes that are not initialized in the given. | is library services actually optional? |
@@ -254,6 +254,8 @@ class ApiHandler(RequestHandler):
# Redirect initial poster/banner thumb to default images
if which[0:6] == 'poster':
default_image_name = 'poster.png'
+ elif which[0:6] == 'fanart':
+ default_image_name = 'fanart.png'
else:
default_image_name = 'banner.png'
| [CMD_History->[run->[_epResult->[],_historyDate_to_dateTimeForm,_rename_element,_get_status_Strings,_responds,_get_quality_string],__init__->[check_params,__init__]],CMD_SickBeardGetMessages->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_SickBeardSearchTVDB->[__init__->[check_params,__init__]],CMD_Shows->[run->[_epResult->[],_responds,_get_quality_string,CMD_ShowCache],__init__->[check_params,__init__]],CMD_Show->[run->[_epResult->[],_responds,_get_quality_string,_mapQuality],__init__->[check_params,__init__]],CMD_ShowUpdate->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_Backlog->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_EpisodeSetStatus->[run->[_epResult->[_get_status_Strings],_responds,_epResult,ApiError],__init__->[check_params,__init__]],CMD_SickBeardSearchIndexers->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_SickBeardCheckScheduler->[run->[_epResult->[],_responds,_ordinal_to_dateForm],__init__->[__init__]],CMD_ShowGetQuality->[run->[_epResult->[],_responds,_mapQuality],__init__->[check_params,__init__]],CMD_ShowAddExisting->[run->[_epResult->[],_responds,CMD_SickBeardSearchIndexers],__init__->[check_params,__init__]],CMD_SickBeardRestart->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_SickBeardSetDefaults->[run->[_epResult->[],_responds,ApiError],__init__->[check_params,__init__]],CMD_HistoryTrim->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_Help->[run->[_epResult->[],_responds,get],__init__->[check_params,__init__]],CMD_SickBeardGetRootDirs->[run->[_epResult->[],_getRootDirs,_responds],__init__->[__init__]],CMD_SickBeardCheckVersion->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_SickBeard->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_SickBeardPing->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_ShowCache->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_ShowAddNew->[run->[_epResult->[],_responds,CMD_SickBeardSearchIndexers,ApiError],__init__->[check_params,__init__]],ApiCall->[check_params->[get]],CMD_Logs->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_ShowPause->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_ShowSeasons->[run->[_epResult->[],_responds,_get_quality_string,_get_status_Strings],__init__->[check_params,__init__]],CMD_HistoryClear->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_SickBeardGetDefaults->[run->[_epResult->[],_responds,_mapQuality],__init__->[__init__]],CMD_SickBeardUpdate->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_ShowRefresh->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_ShowGetBanner->[run->[_epResult->[],showPoster],__init__->[check_params,__init__]],CMD_SickBeardSearchTVRAGE->[__init__->[check_params,__init__]],CMD_ShowSeasonList->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_SickBeardShutdown->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_Episode->[run->[_epResult->[],ApiError,_get_status_Strings,_sizeof_fmt,_responds,_get_quality_string],__init__->[check_params,__init__]],CMD_Failed->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],TVDBShorthandWrapper->[__init__->[check_params,__init__]],CMD_SickBeardDeleteRootDir->[run->[_epResult->[],_getRootDirs,_responds],__init__->[check_params,__init__]],CMD_ShowStats->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_SickBeardAddRootDir->[run->[_epResult->[],_getRootDirs,_responds],__init__->[check_params,__init__]],ApiHandler->[call_dispatcher->[get]],CMD_PostProcess->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_ShowGetPoster->[run->[_epResult->[],showPoster],__init__->[check_params,__init__]],CMD_SickBeardPauseBacklog->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_Exceptions->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_ShowDelete->[run->[_epResult->[],_responds],__init__->[check_params,__init__]],CMD_EpisodeSearch->[run->[_epResult->[],_responds,_get_quality_string],__init__->[check_params,__init__]],CMD_ComingEpisodes->[run->[_epResult->[],_responds,_get_quality_string],__init__->[check_params,__init__]],CMD_ShowsStats->[run->[_epResult->[],_responds],__init__->[__init__]],CMD_ShowSetQuality->[run->[_epResult->[],_responds,_get_quality_string],__init__->[check_params,__init__]],CMD_SubtitleSearch->[run->[_epResult->[],_responds],__init__->[check_params,__init__]]] | Display poster image. | I added this from `webserve.py`. I'm not sure if it is require in the API or not. |
@@ -124,12 +124,12 @@ void VtkOutput::PrintOutput()
/***********************************************************************************/
/***********************************************************************************/
-void VtkOutput::WriteModelPartToFile(const ModelPart& rModelPart, const bool IsSubModelPart)
+void VtkOutput::WriteModelPartToFile(const ModelPart& rModelPart, const bool IsSubModelPart, const std::string& rOutputFilename)
{
Initialize(rModelPart);
// Make the file stream object
- const std::string output_file_name = GetOutputFileName(rModelPart, IsSubModelPart);
+ const std::string output_file_name = (rOutputFilename == "") ? GetOutputFileName(rModelPart, IsSubModelPart) : rOutputFilename;
std::ofstream output_file;
if (mFileFormat == VtkOutput::FileFormat::VTK_ASCII) {
output_file.open(output_file_name, std::ios::out | std::ios::trunc);
| [WriteConditionResultsToFile->[IsCompatibleVariable],PrintOutput->[PrepareGaussPointResults],WriteNodalResultsToFile->[IsCompatibleVariable],WriteModelPartWithoutNodesToFile->[WriteModelPartToFile],WriteElementResultsToFile->[IsCompatibleVariable]] | Write ModelPart to file. | Why completely replacing the file name , but why not using the input as a prefix to the name from GetOutputFileName ? |
@@ -206,6 +206,15 @@ func (t *tether) Start() error {
}
extraconfig.Encode(t.sink, t.config)
+ log.Info("Populating ext4 block device list now.")
+ deviceManager := fs.Ext4DeviceManager{}
+ DeviceMap, err := deviceManager.GetDeviceByLbel(linuxBlockDevicePath)
+ log.Infof("Found ext4 block devices : %s", DeviceMap)
+
+ if err != nil {
+ log.Errorf("error while trying to identify mountable volumes: %s", err)
+ }
+
//process the filesystem mounts - this is performed after networks to allow for network mounts
for k, v := range t.config.Mounts {
if v.Source.Scheme != "label" {
| [Register->[Infof],launch->[Join,Unlock,Error,Begin,WriteFile,Sprintf,New,SessionLog,End,ProcessEnv,Start,MultiReader,Lock,Errorf,Base,MkdirAll,EncodeWithPrefix,Debugf],handleSessionExit->[Join,Unlock,Unix,Begin,Sprintf,Now,Remove,End,Close,UTC,Lock,HandleSessionExit,Base,Wait,EncodeWithPrefix],Flush->[End,Encode,Begin],cleanup->[Stop,Infof,Begin,Cleanup,Warnf,End,SetOutput,stopReaper],Stop->[End,Begin],removeChildPid->[Lock,Unlock],UpdateNetworkEndpoint->[End,Errorf,Begin],lenChildPid->[Lock,Unlock],Start->[Unlock,Apply,Warnf,MountLabel,Signal,Encode,Info,EncodeWithPrefix,launch,Error,New,End,Lock,Errorf,Reload,Debugf,cleanup,Infof,SetHostname,Begin,Sprintf,Decode,Background,setup],setup->[Join,Infof,Begin,childReaper,Sprintf,End,Start,SetOutput,MultiWriter,Log,Errorf,Base,Getpid,WriteFile,Setup],forkHandler->[Println,Begin,End,Notify,Errorf,Fork,Info],Reload->[Infof],Join,MapSink,New,GetLevel,Encode,Info,Debugf] | Start the tether loop process the sessions and launch the processes if necessary Reloads the config from all extensions. | Very `ext` specific. How do you intend to extend this to support NFS or NTFS? We should hide these implementations in `BlockDevices`. |
@@ -1,14 +1,14 @@
module SessionTimeoutWarningHelper
def session_timeout_frequency
- (AppConfig.env.session_check_frequency || 150).to_i
+ IdentityConfig.store.session_check_frequency
end
def session_timeout_start
- (AppConfig.env.session_check_delay || 30).to_i
+ IdentityConfig.store.session_check_delay
end
def session_timeout_warning
- (AppConfig.env.session_timeout_warning_seconds || 30).to_i
+ IdentityConfig.store.session_timeout_warning_seconds
end
def timeout_refresh_path
| [time_left_in_session->[t,distance_of_time_in_words],session_timeout_start->[to_i],session_timeout_frequency->[to_i],timeout_refresh_path->[html_safe],session_timeout_warning->[to_i],session_modal->[new,user_fully_authenticated?]] | timeouts for session check. | This and `session_timeout_start` had their intended defaults switched I think? `application.yml.default` has a frequency of 30 and a delay of 150, which is the inverse of what it has here. Since these configurations were set in all environments, it should be safe to remove the fallback values in the class, even though they're different. |
@@ -19,12 +19,16 @@ import (
func (wh *mutatingWebhook) createPatch(pod *corev1.Pod, req *v1beta1.AdmissionRequest, proxyUUID uuid.UUID) ([]byte, error) {
namespace := req.Namespace
+ // Tracks the success of the current certificate issued for envoy to connect to XDS
+ success := true
// Issue a certificate for the proxy sidecar - used for Envoy to connect to XDS (not Envoy-to-Envoy connections)
cn := catalog.NewCertCommonNameWithProxyID(proxyUUID, pod.Spec.ServiceAccountName, namespace)
+ defer certXdsIssueTimeTrack(time.Now(), cn.String(), &success)
log.Info().Msgf("Patching POD spec: service-account=%s, namespace=%s with certificate CN=%s", pod.Spec.ServiceAccountName, namespace, cn)
bootstrapCertificate, err := wh.certManager.IssueCertificate(cn, constants.XDSCertificateValidityPeriod)
if err != nil {
log.Error().Err(err).Msgf("Error issuing bootstrap certificate for Envoy with CN=%s", cn)
+ success = false
return nil, err
}
| [createPatch->[IssueCertificate,Msg,Error,FormatBool,Itoa,NewCertCommonNameWithProxyID,Sprintf,createEnvoyBootstrapConfig,Marshal,Msgf,isMetricsEnabled,String,ExpectProxy,Inc,Info,Err],Marshal,Msgf,PatchResponseFromRaw,Err] | createPatch creates a patch for the given pod This function is used to add the Envoy sidecar to the pod spec and add the. | Could you rename this to `certIssuanceSuccess`, otherwise in the context of the the `createPatch` function, the existing variable name can be confusing. |
@@ -468,6 +468,17 @@ public class ESAuditBackend extends AbstractAuditBackend implements AuditBackend
return syncLogCreationEntries(provider, repoId, path, recurs);
}
+ public SearchResponse search(SearchRequest request) {
+ String[] indices = request.indices();
+ if (indices == null && indices.length != 1) {
+ throw new IllegalStateException("Search on audit must include index name: " + request);
+ }
+ if (!getESIndexName().equals(indices[0])) {
+ throw new IllegalStateException("Search on audit must be on audit index: " + request);
+ }
+ return esClient.search(request);
+ }
+
protected QueryBuilder buildFilter(PredicateDefinition[] predicates, DocumentModel searchDocumentModel) {
if (searchDocumentModel == null) {
| [ESAuditBackend->[buildQuery->[getSearchRequestBuilder],syncLogCreationEntries->[syncLogCreationEntries],buildSearchQuery->[getSearchRequestBuilder,buildFilter],ensureUIDSequencer->[getSearchRequestBuilder],nativeQuery->[buildQuery,buildLogEntries],expandQueryVariables->[expandQueryVariables],onApplicationStarted->[isMigrationDone,getClient],queryLogsByPage->[getSearchRequestBuilder,queryLogsByPage,buildLogEntries],getApplicationStartedOrder->[getApplicationStartedOrder],getClient->[getClient]]] | This method is overridden to provide a filter that can be used to filter the log creation entries This method is used to build a filterBuilder based on the parameters of the predicate. | `indices == null && indices.length != 1` is wrong, do you mean `||` ? |
@@ -89,6 +89,7 @@ public class PackageStateJson {
this.arch = archIn;
this.packageStateId = Optional.empty();
this.versionConstraintId = Optional.empty();
+ this.packageEvr = evrIn;
}
/**
| [PackageStateJson->[convertToPackageState->[getName,getEpoch,getArch,getVersion,getRelease]]] | PackageStateJson - A JSON representation of a single package state. This method returns the package name. | Why do we still store epoch, version and release as single values? Could we just change the getters to return packageEvr.getXXX() ? |
@@ -0,0 +1,5 @@
+<?php
+$hardware = snmp_get($device, 'deviceDescr.0', '-Ovq', 'IOMEGANAS-MIB');
+$version = 'v1';
+$serial = 'N/A';
+?>
| [No CFG could be retrieved] | No Summary Found. | How come this is hardcoded to v1? |
@@ -493,6 +493,8 @@ type IdentityProvider struct {
UseAsChallenger bool `json:"challenge"`
// UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against
UseAsLogin bool `json:"login"`
+ // MappingMethod determines how identities from this provider are mapped to users
+ MappingMethod string `json:"mappingMethod"`
// Provider contains the information about how to set up a specific identity provider
Provider runtime.RawExtension `json:"provider"`
}
| [No CFG could be retrieved] | The master public URL is the public URL for logging and metrics. | Make an enumerated `string` type to make the intent obvious. |
@@ -692,7 +692,7 @@ ds_mgmt_drpc_pool_extend(Drpc__Call *drpc_req, Drpc__Response *drpc_resp)
D_GOTO(out_list, rc = -DER_NOMEM);
rc = ds_mgmt_pool_extend(uuid, svc_ranks, rank_list, "pmem",
- req->scmbytes, req->nvmebytes,
+ scm_bytes, nvme_bytes,
req->n_faultdomains, req->faultdomains);
if (rc != 0)
| [No CFG could be retrieved] | This function unpacks the inner request from the drpc request and then calls ds_mgmt Handle the extended pool request. | So is the long-term plan to modify these APIs to accept a uint64_t array? |
@@ -133,7 +133,8 @@ namespace Microsoft.Internal
{
IEnumerable<MethodInfo> declaredMethods = type.GetDeclaredMethods();
- Type baseType = type.BaseType;
+ Type? baseType = type.BaseType;
+ Debug.Assert(baseType != null);
if (baseType.UnderlyingSystemType != typeof(object))
{
return declaredMethods.Concat(baseType.GetAllMethods());
| [ReflectionServices->[GetAllMethods->[GetAllMethods],GetAllFields->[GetAllFields],IsVisible->[IsVisible],Assembly->[Assembly],GetDisplayName->[GetDisplayName]]] | Get all methods of a given type. | Why not declare it non-nullable and bang the `type.BaseType` call given it is only used one time after this. |
@@ -4,10 +4,8 @@ import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
-
__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']
-
model_urls = {
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
| [SqueezeNet->[__init__->[Fire]],squeezenet1_1->[SqueezeNet],squeezenet1_0->[SqueezeNet]] | Creates a class which initializes the object with the given parameters. Initialize the SqueezeNet. | can you revert those unnecessary line changes and space changes in this file? |
@@ -270,8 +270,17 @@ describe('amp-app-banner', () => {
});
});
- it('should remove banner if safari', () => {
+ it('should remove banner if safari and not embedded', () => {
+ isSafari = true;
+ isEmbedded = false;
+ return getAppBanner().then(banner => {
+ expect(banner.parentElement).to.be.null;
+ });
+ });
+
+ it('should remove banner if safari and embedded', () => {
isSafari = true;
+ isEmbedded = true;
return getAppBanner().then(banner => {
expect(banner.parentElement).to.be.null;
});
| [No CFG could be retrieved] | Determines that the given banner is present in the DOM and that it is not missing. Android and Chrome require that the following methods are called. | hmmm according to the description here and below we will remove banner as long as it is safari? Really |
@@ -78,6 +78,7 @@ STAGE_DEFINITION = {
Any(str, OUT_PSTAGE_DETAILED_SCHEMA)
],
Optional(StageParams.PARAM_PLOTS): [Any(str, PLOT_PSTAGE_SCHEMA)],
+ Optional(StageParams.PARAM_DVCLIVE): [Any(str, DVCLIVE_PSTAGE_SCHEMA)],
}
FOREACH_IN = {
| [Optional,Any,Schema,Required] | A stage definition is a schema of the same type as the pipeline. | Since there can only be 1 dvclive entry, maybe we could limit it in schema too in addition to your assert? |
@@ -139,9 +139,9 @@ class Indexable_Post_Type_Presentation extends Indexable_Presentation {
/**
* Filter: 'wpseo_opengraph_show_publish_date' - Allow showing publication date for other post types.
*
- * @api bool Whether or not to show publish date.
- *
* @param string $post_type The current URL's post type.
+ *
+ * @api bool Whether or not to show publish date.
*/
if ( ! apply_filters( 'wpseo_opengraph_show_publish_date', false, $this->post_type->get_post_type( $this->context->post ) ) ) {
return '';
| [Indexable_Post_Type_Presentation->[generate_twitter_creator->[get],generate_og_description->[strip_shortcodes,get_the_excerpt],generate_robots->[get_base_values,after_generate,is_indexable],generate_title->[get],generate_og_article_author->[get_the_author_meta],generate_meta_description->[get],generate_twitter_description->[get_the_excerpt],generate_og_article_published_time->[get_post_type]]] | Generate the published time for the Open Graph article. | What is the reason for this change? As far as I can remember we first show the `@api` and then the `@param`. Can you revert this? |
@@ -25,6 +25,7 @@ class EmailDigest
private
def get_users
+ # TODO: [@msarit] update query to call email_digest_periodic from correct table
User.registered.where(email_digest_periodic: true).where.not(email: "")
end
end
| [EmailDigest->[send_periodic_digest_email->[send_periodic_digest_email]]] | get users not registered with email digest periodic. | I need help reworking this query; `email_digest_periodic` now lives in `users_notification_setting`, not `users`. My attempts to call `.joins(:users_notification_settings)` on `User` did not work. I really need to strengthen my DB skills. @citizen428 @rhymes |
@@ -9295,4 +9295,15 @@ LowererMD::LowerTypeof(IR::Instr* typeOfInstr)
typeOfInstr->InsertBefore(helperLabel);
typeOfInstr->InsertAfter(doneLabel);
m_lowerer->LowerUnaryHelperMem(typeOfInstr, IR::HelperOp_Typeof);
-}
\ No newline at end of file
+}
+
+#if DBG
+//
+// Helps in debugging of fast paths.
+//
+void LowererMD::GenerateDebugBreak( IR::Instr * insertInstr )
+{
+ IR::Instr *int3 = IR::Instr::New(Js::OpCode::DEBUGBREAK, insertInstr->m_func);\
+ insertInstr->InsertBefore(int3);\
+}
+#endif
\ No newline at end of file
| [No CFG could be retrieved] | typeOf is the type of helper that is being used. | Better call it debugBreakInstr? Also remove the tailing `\`? |
@@ -216,7 +216,10 @@ class LanguageModelingAdapter(Adapter):
def process(self, raw, identifiers=None, frame_meta=None):
raw_output = self._extract_predictions(raw, frame_meta)
result = []
- for identifier, token_output in zip(identifiers, raw_output[self.logits_out]):
+ logits = raw_output[self.logits_out]
+ if len(logits.shape) == 4:
+ logits = logits[0]
+ for identifier, token_output in zip(identifiers, logits):
result.append(LanguageModelingPrediction(identifier, token_output))
return result
| [MachineTranslationAdapter->[process->[int,append,_clean,argwhere,get,MachineTranslationPrediction,_extract_predictions,ValueError,transpose,format,zip,enumerate],parameters->[NumberField,super,PathField,update],configure->[split,dict,len,get_value_from_config,read_txt,enumerate]],LanguageModelingAdapter->[process->[_extract_predictions,LanguageModelingPrediction,append,zip],parameters->[StringField,super,update],configure->[get_value_from_config]],BertTextClassification->[process->[normal,append,matmul,_extract_predictions,zip,zeros,ClassificationPrediction],parameters->[NumberField,StringField,super,update],configure->[get_value_from_config]],QuestionAnsweringAdapter->[process->[QuestionAnsweringPrediction,append,_extract_predictions,flatten,zip],parameters->[StringField,super,update],configure->[get_value_from_config]],_clean->[strip,sub,join,split],QuestionAnsweringEmbeddingAdapter->[process->[_extract_predictions,QuestionAnsweringEmbeddingPrediction,append,zip],parameters->[StringField,super,update],configure->[get_value_from_config]],NonAutoregressiveMachineTranslationAdapter->[process->[append,lstrip,MachineTranslationPrediction,_extract_predictions,decode,replace,zip,values],parameters->[PathField,update,BoolField,super,StringField],configure->[SentencePieceBPETokenizer,str,get_value_from_config,isinstance,raise_error]],UnsupportedPackage] | Process the raw prediction and return a sequence of LanguageModelingPrediction objects. | could you please provide example which shape expected and how it works? |
@@ -76,6 +76,11 @@ import {vsyncFor} from './vsync';
* \/
* State: <IN VIEWPORT>
*
+ * The preconnectCallback is called when the systems thinks it is good
+ * to preconnect to hosts needed by an element. It will never be called
+ * before buildCallback and it might be called multiple times including
+ * after layoutCallback.
+ *
* Additionally whenever the dimensions of an element might have changed
* AMP remeasures its dimensions and calls `onLayoutMeasure` on the
* element instance. This can be used to do additional style calculations
| [No CFG could be retrieved] | A base class for all custom DOM elements that are defined in the mix. Replies the layout and resources for a given window object. | can we add it to the state diagram :smile_cat: |
@@ -93,6 +93,11 @@ static int msblob2key_decode(void *vctx, OSSL_CORE_BIO *cin, int selection,
void *key = NULL;
int ok = 0;
+ if (in == NULL) {
+ ERR_raise(ERR_LIB_PEM, ERR_R_MALLOC_FAILURE);
+ goto end;
+ }
+
if (BIO_read(in, hdr_buf, 16) != 16) {
ERR_raise(ERR_LIB_PEM, PEM_R_KEYBLOB_TOO_SHORT);
goto next;
| [msblob2key_ctx_st->[OPENSSL_zalloc],void->[OPENSSL_free,ossl_rsa_set0_libctx,PROV_LIBCTX_OF],int->[OSSL_PARAM_construct_utf8_string,ossl_do_blob_header,OSSL_PARAM_construct_octet_string,ossl_pw_set_ossl_passphrase_cb,data_cb,adjust_key,export,BIO_read,read_public_key,ossl_bio_new_from_core_bio,ossl_blob_length,OSSL_PARAM_construct_int,ERR_set_mark,OPENSSL_malloc,ossl_prov_get_keymgmt_export,read_private_key,ERR_raise,OSSL_PARAM_construct_end,OPENSSL_free,memset,BIO_free,free_key,ERR_pop_to_mark],IMPLEMENT_MSBLOB] | Decode a key blob using the key - based library. Reads a key from the stream. private function of the next key in the chain. | You can just return 0 here. all the calls after the end label are no-ops in this case. Also drop the ERR_raise() as BIO_new that is called inside the ossl_bio_new_from_core_bio() will raise an error anyway. |
@@ -54,6 +54,11 @@ public class SketchBufferAggregator implements BufferAggregator
createNewUnion(buf, position, false);
}
+ /**
+ * This method uses locks because it can be used during indexing,
+ * and Druid can call aggregate() and get() concurrently
+ * https://github.com/apache/incubator-druid/pull/3956
+ */
@Override
public void aggregate(ByteBuffer buf, int position)
{
| [SketchBufferAggregator->[getMemory->[get],createNewUnion->[get],relocate->[get,createNewUnion],get->[get],getOrCreateUnion->[get]]] | Initializes the union with the given buffer and position. | Please make proper sentences with punctuation. |
@@ -48,13 +48,11 @@ namespace Microsoft.Xna.Framework.Audio
static SoundEffectInstancePool()
{
- // Reduce garbage generation by allocating enough capacity for the maximum playing instances
-#if WINDOWS || (WINRT && !WINDOWS_PHONE) || LINUX || WEB || ANGLE
- _playingInstances = new List<SoundEffectInstance>();
-#else
- _playingInstances = new List<SoundEffectInstance>(MAX_PLAYING_INSTANCES);
-#endif
- _pooledInstances = new List<SoundEffectInstance>();
+ // Reduce garbage generation by allocating enough capacity for
+ // the maximum playing instances or at least some reasonable value.
+ var maxInstances = MAX_PLAYING_INSTANCES < 1024 ? MAX_PLAYING_INSTANCES : 1024;
+ _playingInstances = new List<SoundEffectInstance>(maxInstances);
+ _pooledInstances = new List<SoundEffectInstance>(maxInstances);
}
/// <summary>
| [SoundEffectInstancePool->[Remove->[Add],Add->[Add],Update->[Add]]] | private static class _playingInstances _pooledInstances Adds the specified instance to the list of playing instances if it is a pooled instance. If. | Could use `Math.Min(MAX_PLAYING_INSTANCES, 1024);` But its the same really :) |
@@ -47,6 +47,12 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *ContainerCLIOpts, args []string
if err != nil {
return err
}
+ if s.ResourceLimits == nil {
+ s.ResourceLimits = &specs.LinuxResources{}
+ }
+ if s.ResourceLimits.Memory == nil {
+ s.ResourceLimits.Memory = &specs.LinuxMemory{}
+ }
if m := c.Memory; len(m) > 0 {
ml, err := units.RAMInBytes(m)
if err != nil {
| [Duration,RAMInBytes,Wrap,HasPrefix,ValidateSysctls,UsernsMode,ParseUlimit,GetAllLabels,ParseBool,New,ParseIDMapping,Errorf,SplitN,Wrapf,Join,ParseSlice,Base,ParseUint,ParseFile,validate,Unmarshal,Environ,FromHumanSize,ParseSignal,ParseDuration] | FillOutSpecGen fills out the specgen. SpecGenerator from command line flags. RAMInBytes returns the next reserved memory value in the resource limits. | Do we need logic to clear this for the rootless v1 case? |
@@ -431,14 +431,14 @@ def test_api_profit(botclient, mocker, ticker, fee, markets, limit_buy_order, li
'latest_trade_date': 'just now',
'latest_trade_timestamp': ANY,
'profit_all_coin': 6.217e-05,
- 'profit_all_fiat': 0,
+ 'profit_all_fiat': ANY,
'profit_all_percent': 6.2,
'profit_all_percent_mean': 6.2,
'profit_all_ratio_mean': 0.06201058,
'profit_all_percent_sum': 6.2,
'profit_all_ratio_sum': 0.06201058,
'profit_closed_coin': 6.217e-05,
- 'profit_closed_fiat': 0,
+ 'profit_closed_fiat': ANY,
'profit_closed_percent': 6.2,
'profit_closed_ratio_mean': 0.06201058,
'profit_closed_percent_mean': 6.2,
| [test_api_performance->[assert_response,client_get],test_api_trades->[assert_response,client_get],test_api_status->[assert_response,client_get],test_api_balance->[assert_response,client_get],test_api_count->[assert_response,client_get],test_api_stop_workflow->[client_post,assert_response],test_api_show_config->[assert_response,client_get],test_api_reloadconf->[client_post,assert_response],test_api_blacklist->[assert_response,client_post,client_get],test_api_stopbuy->[client_post,assert_response],test_api_version->[assert_response,client_get],test_api_forcebuy->[client_post,assert_response],test_api_edge_disabled->[assert_response,client_get],test_api_daily->[assert_response,client_get],test_api_profit->[assert_response,client_get],test_api_not_found->[client_post,assert_response],test_api_unauthorized->[client_get,assert_response],test_api_forcesell->[client_post,assert_response],test_api_token_refresh->[client_post,assert_response],test_api_token_login->[client_post,assert_response],test_api_whitelist->[assert_response,client_get]] | Test API profit. This function returns a dict of all the values of all the properties of a node in the. | I think instead of using ANY i would use the real outcome (which is `0.76748865` in both instances). It's mocked in conftest, and price is set to 12345.0. |
@@ -2349,6 +2349,12 @@ def batch_norm(input,
moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.
do_model_average_for_mean_and_var(bool, Default False): Do model average for mean and variance or not.
fuse_with_relu (bool): if True, this OP performs relu after batch norm.
+ use_global_stats(bool, Default False): Whether to use global mean and
+ variance. In inference or test mode, set use_global_stats to true
+ or is_test to true, and the behavior is equivalent.
+ In train mode, when setting use_global_stats True, the global mean
+ and variance are also used during train time,
+ the BN acts as scaling and shiffting.
Returns:
Variable: A tensor variable which is the result after applying batch normalization on the input.
| [ctc_greedy_decoder->[topk],image_resize->[_is_list_or_turple_],sequence_first_step->[sequence_pool],logical_xor->[_logical_op],elementwise_pow->[_elementwise_op],elementwise_min->[_elementwise_op],elementwise_max->[_elementwise_op],logical_not->[_logical_op],conv2d->[_get_default_param_initializer],logical_and->[_logical_op],sequence_last_step->[sequence_pool],elementwise_add->[_elementwise_op],elementwise_sub->[_elementwise_op],lstm_unit->[fc],elementwise_mul->[_elementwise_op],logical_or->[_logical_op],elementwise_div->[_elementwise_op],dice_loss->[reduce_sum,one_hot,reduce_mean],matmul->[__check_input],resize_bilinear->[image_resize],resize_nearest->[image_resize],image_resize_short->[image_resize],conv3d->[_get_default_param_initializer]] | Batch normalization of a single . A base function for training or training batch normalization. Creates a single n - dimensional cross - validation cross - validation on the input. Append activation to the batch_norm_out if the sequence has no missing values. | what does it mean by "global"? All BN use the same mean and variance? Or the BN use the moving mean and variable? |
@@ -464,6 +464,15 @@ func (t *tether) Start() error {
}
defer t.cleanup()
+ defer func() {
+ // NOTE: this must not be checked in - results in unknown states as
+ // we're suppressing the panic from rolling up the stack
+ e := recover()
+ if e != nil {
+ log.Errorf("Logging panic: %s: %s", e, debug.Stack())
+ }
+ }()
+
// initial entry, so seed this
t.reload <- struct{}{}
for range t.reload {
| [Start->[processSessions,initializeSessions,cleanup,setup,setHostname,setLogLevel,setNetworks,reloadExtensions,setMounts],handleSessionExit->[cleanupSession],launch->[loggingLocked,cleanupSession]] | Start is the main loop of the tether This is a strict ordering of the sessions. | Should this be removed now? Or is this here to help with the CI run? |
@@ -275,7 +275,14 @@ public abstract class Trigger<J extends Item> implements Describable<Trigger<?>>
if (t.tabs.check(cal)) {
LOGGER.log(Level.CONFIG, "cron triggered {0}", p);
try {
+ long begin_time = System.currentTimeMillis();
t.run();
+ long end_time = System.currentTimeMillis();
+ if ((end_time - begin_time) > CRON_THRESHOLD) {
+ LOGGER.log(Level.WARNING, "cron trigger " + t.getClass().getName() + ".run() triggered by {0} spent too much time ({1}) in its execution, " +
+ "other timers can be affected",
+ new Object[] {p, Util.getTimeSpanString(end_time - begin_time)});
+ }
} catch (Throwable e) {
// t.run() is a plugin, and some of them throw RuntimeException and other things.
// don't let that cancel the polling activity. report and move on.
| [Trigger->[getProjectActions->[getProjectAction],for_->[all,getDescriptor],checkTriggers->[run->[run],run]]] | Check all triggers and trigger them if they are not already in the system. check if there is a missing config in the job. | We should either use plain old string concatenation or parameter replacement. Using both give us disadvantages from both worlds. |
@@ -710,6 +710,7 @@ module.exports = class binance extends Exchange {
},
},
},
+ 'option': {},
},
'commonCurrencies': {
'BCC': 'BCC', // kept for backward-compatibility https://github.com/ccxt/ccxt/issues/4848
| [No CFG could be retrieved] | This method returns an array of base - number units in a sequence of 16 - bit integers This method returns an array of possible currencies. | This doesn't work |
@@ -50,6 +50,14 @@ public final class ServletContextPath {
this.contextPath = contextPath;
}
+ /**
+ * Returns a concatenation of a servlet context path stored in the given {@code context} and a
+ * given {@code spanName}.
+ *
+ * <p>If there is no servlet path stored in the context, returns {@code spanName}. Servlet context
+ * path and span name are concatenated verbatim without any attempt to ensure proper usage of path
+ * separators. The latter is the responsibility of the caller of this method.
+ */
public static String prepend(Context context, String spanName) {
ServletContextPath servletContextPath = context.get(CONTEXT_KEY);
if (servletContextPath != null) {
| [ServletContextPath->[init->[ServletContextPath]]] | Prepend the span name to the context path if it exists. | While this is completely true it is also somewhat useless. It would be better to describe that span name should be either an empty string or a string starting with a slash. Normalizing or enforcing this scheme in the method is also an option. |
@@ -228,8 +228,14 @@ def linear_regression_raw(raw, events, event_id=None, tmin=-.1, tmax=1,
solver : str | function
Either a function which takes as its inputs the sparse predictor
matrix X and the observation matrix Y, and returns the coefficient
- matrix b; or a string. If str, must be ``'cholesky'``, in which case
- the solver used is ``linalg.solve(dot(X.T, X), dot(X.T, y))``.
+ matrix b; or a string.
+ If a string, if ``'cholesky'``, the solver used is
+ ``linalg.solve(dot(X.T, X), dot(X.T, y))``. If ``'incremental'``,
+ an incremental solver is used that may be slightly numerically off
+ and is usually much slower, but can be used for very large tasks with
+ many variables and/or high sampling rates in case they overload the
+ default solver.
+
Returns
-------
| [_clean_rerp_input->[nonzero,unique,tocsr,range,setdiff1d,_reject_data_segments],linear_regression_raw->[solver->[,solve],_clean_rerp_input,_prepare_rerp_data,ValueError,isinstance,format,solver,_make_evokeds,_prepare_rerp_preds],_prepare_rerp_data->[int,pick_info,len,copy,set,ValueError,pick_types],linear_regression->[namedtuple,EvokedArray,warn,RuntimeError,info,enumerate,get_data,isgenerator,copy,zeros,range,product,len,lm,ValueError,isinstance,_fit_lm,next,pick_types,array],_make_evokeds->[dict,float,EvokedArray],_fit_lm->[sqrt,log10,dict,product,empty_like,range,abs,dot,cdf,diag,len,ValueError,clip,sign,zip,finfo,inv,lstsq,reshape],_prepare_rerp_preds->[,list,int,hstack,dict,append,get,asarray,len,where,ValueError,isinstance,in1d,ones,dia_matrix]] | Linear regression - based evoked potentials. Required parameters for the n - window regression. Returns a dictionary of evoked objects with the ER [ F / P ]s. | indicate recommendation for sparse input only? |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.