patch
stringlengths
18
160k
callgraph
stringlengths
4
179k
summary
stringlengths
4
947
msg
stringlengths
6
3.42k
@@ -984,15 +984,16 @@ public class DagManager extends AbstractIdleService { } if (!requesterCheck) { - throw new IOException(requesterMessage); + throw new IOException(requesterMessage.toString()); } } /** * Increment quota by one for the given map and key. + * We need synchronization on this method because quotaMap is shared among all the {@link DagManagerThread}s. * @return true if quota is not reached for this user or user is whitelisted, false otherwise. */ - private boolean incrementMapAndCheckQuota(Map<String, Integer> quotaMap, String user, DagNode<JobExecutionPlan> dagNode) { + synchronized private boolean incrementMapAndCheckQuota(Map<String, Integer> quotaMap, String user, DagNode<JobExecutionPlan> dagNode) { String key = DagManagerUtils.getUserQuotaKey(user, dagNode); int jobCount = quotaMap.getOrDefault(key, 0);
[DagManager->[DagManagerThread->[killJobIfOrphaned->[cancelDagNode],slaKillIfNeeded->[cancelDagNode],onFlowSuccess->[toString],cleanUp->[toString,hasRunningJobs,deleteJobState],onJobFinish->[submitNext],initialize->[toString],getRunningJobsCounter->[toString],onFlowFailure->[toString],cleanUpDag->[cleanUp]],setActive->[createDagStateStore,addDag],FailedDagRetentionThread->[run->[cleanUp]],handleKillFlowEvent->[killFlow]]]
Checks if the quota for the given DagNode is exceeded. if jobCount is greater than quota for user.
the `requesterToJobCount` (aka `quotaMap`) is already a `ConcurrentHashMap`. seems reasonable synchronization granularity, sine AFAICT every requester's quota is separate. synchronizing here (on the `DMThread` instance anyway wouldn't ensure atomicity *across* different threads. you're right on the need for concurrency control though. to ensure atomic check-then-set semantics (and never drop/miss an increment), use the three-param version of `.replace`, rather than `.put`. (`AtomicInteger.compareAndSet` would afford the same, but is overkill, since CHM already effectively synchronizes on `key`.) to account for the potential rejection to `.replace`, you'll need a loop within.
@@ -296,6 +296,15 @@ class GDriveFileSystem(FSSpecWrapper): # pylint:disable=abstract-method return super()._with_bucket(path) + def _strip_bucket(self, entry): + try: + bucket, path = entry.split("/", 1) + except ValueError: + # If there is no path attached, only returns + # the bucket (top-level). + bucket, path = entry, None + return path or bucket + def upload_fobj(self, fobj, to_info, **kwargs): rpath = self._with_bucket(to_info) self.makedirs(os.path.dirname(rpath))
[GDriveFileSystem->[upload_fobj->[upload_fobj,_with_bucket],fs->[_validate_credentials,GDriveAuthError]]]
Create a new object with the given path and bucket.
we have similar implementation in the `ObjectFSWrapper` ? what is the difference here?
@@ -30,10 +30,16 @@ public class FileSystemLimiterKey extends SharedLimiterKey { public static final String RESOURCE_LIMITED_PREFIX = "filesystem"; private final URI uri; + public final String serviceName; public FileSystemLimiterKey(URI uri) { - super(RESOURCE_LIMITED_PREFIX + "/" + getFSIdentifier(uri)); + this(uri, null); + } + + public FileSystemLimiterKey(URI uri, String serviceName) { + super(RESOURCE_LIMITED_PREFIX + "/" + getFSIdentifier(uri) + "/" + serviceName); this.uri = uri; + this.serviceName = serviceName; } private static String getFSIdentifier(URI uri) {
[FileSystemLimiterKey->[hashCode->[hashCode],equals->[equals]]]
Get the FS identifier for the given URI.
When `serviceName==null`, the last token should not be present, right?
@@ -65,6 +65,13 @@ public class HttpConfiguration { @ConfigItem public OptionalInt ioThreads; + /** + * If this is true then only a virtual channel will be set up for vertx web. + * We have this switch for testing purposes. + */ + @ConfigItem(defaultValue = "false") + public boolean virtual; + public int determinePort(LaunchMode launchMode) { return launchMode == LaunchMode.TEST ? testPort : port; }
[No CFG could be retrieved]
Determines the port for the given launch mode.
defaultValue not needed. It will be automatically default to false.
@@ -27,7 +27,10 @@ class OpenaiTransformerBytePairIndexer(TokenIndexer[int]): encoder: Dict[str, int] = None, byte_pairs: List[Tuple[str, str]] = None, n_ctx: int = 512, - model_path: str = None) -> None: + model_path: str = None, + namespace: str = 'openai_transformer') -> None: + self._namespace = namespace + self._added_to_vocabulary = False too_much_information = model_path and (encoder or byte_pairs) too_little_information = not model_path and not (encoder and byte_pairs)
[OpenaiTransformerBytePairIndexer->[tokens_to_indices->[byte_pair_encode]]]
Initialize a BPE - like object from a. bpe file. Context manager for the context manager.
I started out wondering why there wasn't a docstring modification to add this parameter. Then I clicked up and realized there was no parameter section in the docstring. Then I wondered what in the world `n_ctx` meant, and what it was doing (`num_contexts`, probably? `num_context_words`? but what does that have to do with BPE?). After looking through the code and remembering a prior conversation, I _think_ it's here because the model can only handle 512 tokens at a time (`max_sequence_length`?), and you're hosed if you have more than that, and you're catching it here to give an early error message...?
@@ -11,7 +11,8 @@ class PyDill(PythonPackage): homepage = "https://github.com/uqfoundation/dill" url = "https://pypi.io/packages/source/d/dill/dill-0.2.6.zip" - + + version('0.2.7', sha256='ddda0107e68e4eb1772a9f434f62a513c080c7171bd0dd6fb65d992788509812') version('0.2.6', sha256='6c1ccca68be483fa8c66e85a89ffc850206c26373aa77a97b83d8d0994e7f1fd') version('0.2.5', sha256='e82b3db7b9d962911c9c2d5cf2bb4a04f43933f505a624fb7dc5f68b949f0a5c') version('0.2.4', sha256='db68929eef0e886055d6bcd86f830141c1f653ddbf5d081c086e9d1c45efb334')
[PyDill->[depends_on,version]]
Create a single object of type with all of the available version numbers. requires on - build.
Can you change this extension to `dill-0.2.7.tar.gz`? That will allow `spack checksum` to pick up new versions in the future.
@@ -206,6 +206,13 @@ class JsonConfigSource implements ConfigSourceInterface }); } + /** + * @param string $method + * @param mixed|array $args + * @param callable $fallback + * + * @return void + */ protected function manipulateJson($method, $args, $fallback) { $args = func_get_args();
[JsonConfigSource->[addProperty->[manipulateJson],addRepository->[manipulateJson],removeLink->[manipulateJson],getName->[getPath],removeRepository->[manipulateJson],manipulateJson->[validateSchema,write,arrayUnshiftRef,read,exists,getContents,getPath],addLink->[manipulateJson],removeProperty->[manipulateJson],addConfigSetting->[manipulateJson],removeConfigSetting->[manipulateJson]]]
Removes a link from the configuration file. Updates composer. json with a new version if it doesn t exist. check if file is empty and set chmod to 0600.
`mixed|array` is strictly the same than `mixed`
@@ -158,7 +158,12 @@ def compute_epochs_psd(epochs, picks=None, fmin=0, fmax=np.inf, n_fft=256, exclude='bads') n_fft, n_overlap = _check_nfft(len(epochs.times), n_fft, n_overlap) - data = epochs.get_data()[:, picks] + if tmin is not None or tmax is not None: + time_mask = _time_mask(epochs.times, tmin, tmax) + else: + time_mask = Ellipsis + + data = epochs.get_data()[:, picks][..., time_mask] if proj: proj, _ = make_projector_info(epochs.info) if picks is not None:
[compute_raw_psd->[int,make_projector_info,parallel,parallel_func,info,arange,len,my_pwelch,time_as_index,float,_check_nfft,dot,array],_pwelch->[welch_fun],_compute_psd->[psd,array],compute_epochs_psd->[int,make_projector_info,parallel_func,get_data,arange,len,zip,empty,array_split,my_pwelch,info,float,pick_types,_check_nfft,dot,parallel]]
Compute power spectral density with average periodograms. Compute the missing window sizes for all nanoseconds.
FYI if you want to maximize efficiency, we should allow `_time_mask(..., allow_slice=False)` since here you can do `allow_slice=True` to avoid one copy. We could also consider doing it for `picks`. It would avoid memory copies. Something to consider if you're trying to boost efficiency here.
@@ -304,7 +304,8 @@ namespace ProtoFFI if (null != func) { func.IsStatic = true; - RegisterFunctionPointer(func.Name, f, null, func.ReturnType); + if (!IsReflectionContext) + RegisterFunctionPointer(func.Name, f, null, func.ReturnType); classnode.Procedures.Add(func); } }
[CLRModuleType->[ParseMethod->[isPropertyAccessor,GetProtoCoreType,isOverloadedOperator],FunctionDefinitionNode->[SupressesImport,GetProtoCoreType],GetProtoCoreType->[GetProtoCoreType],ClassDeclNode->[SetTypeAttributes],ParseArgumentDeclaration->[GetProtoCoreType],RegisterFunctionPointer->[GetFunctionPointers],SupressesImport->[SupressesImport],ParseFieldDeclaration->[GetImportedType,SupressesImport]],FFIMethodAttributes->[TryGetTypeAttributes,SetTypeAttributes],CLRDLLModule->[CodeBlockNode->[GetTypes,GetEmptyTypes,SupressesImport,EnsureDisposeMethod],Type->[GetTypes,GetImplemetationType],GetTypes->[GetTypes],FFIFunctionPointer->[GetFunctionPointers],GetFunctionPointers->[GetProtoCoreType,GetFunctionPointers]]]
Parse an enum type.
please use braces - this code is very old, that is why it does not follow the code guide.
@@ -234,6 +234,9 @@ namespace System.Threading ? AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.EnableWorkerTracking", false) : GetEnableWorkerTrackingNative(); + internal static readonly bool EnableDispatchAutoreleasePool = + AppContextConfigHelper.GetBooleanConfig("System.Threading.ThreadPool.EnableDispatchAutoreleasePool", false); + [MethodImpl(MethodImplOptions.InternalCall)] internal static extern bool CanSetMinIOCompletionThreads(int ioCompletionThreads);
[ThreadPool->[EnsureGateThreadRunning->[EnsureGateThreadRunning],UnsafeQueueNativeOverlapped->[PostQueuedCompletionStatus],GetMaxThreads->[GetMaxThreads],GetOrCreateThreadLocalCompletionCountObject->[GetOrCreateThreadLocalCompletionCountObject],NotifyWorkItemComplete->[NotifyWorkItemComplete],SetMinThreads->[SetMinThreads],GetMinThreads->[GetMinThreads],SetMaxThreads->[SetMaxThreads],RegisteredWaitHandle->[Unregister->[],SetNativeRegisteredWaitHandle->[],SetNativeRegisteredWaitHandle,OnBeforeRegister],ReportThreadStatus->[ReportThreadStatus],NotifyWorkItemProgress->[NotifyWorkItemProgress],GetAvailableThreads->[GetAvailableThreads]],CompleteWaitThreadPoolWorkItem->[CompleteWait->[CompleteWait]],RegisteredWaitHandle->[Unregister->[IsValidHandle],SetNativeRegisteredWaitHandle->[IsValidHandle],IsValidHandle]]
private static final int WORKER_COUNTER_INDEX = 0 ;.
Nit: Static fields should have `s_` prefix. It should be method or a property. It would be nice if the naming is consistent with WorkerTracing switch right above this.
@@ -114,8 +114,9 @@ def fetch_all(): def fetch_all_recordio(path): - for module_name in filter(lambda x: not x.startswith("__"), - dir(paddle.dataset)): + for module_name in [ + x for x in dir(paddle.dataset) if not x.startswith("__") + ]: if "convert" in dir( importlib.import_module("paddle.dataset.%s" % module_name)) and \ not module_name == "common":
[fetch_all_recordio->[must_mkdirs],download->[md5file],convert->[write_data,reader],must_mkdirs]
Fetch all recordio modules from path.
(x for x in ...)
@@ -109,8 +109,9 @@ public class QuartzScheduler implements Scheduler { for (Scheduled scheduled : method.getSchedules()) { String name = triggerNameSequence.getAndIncrement() + "_" + method.getInvokerClassName(); JobBuilder jobBuilder = JobBuilder.newJob(InvokerJob.class) - .withIdentity(name, Scheduler.class.getName()).usingJobData(INVOKER_KEY, - method.getInvokerClassName()); + .withIdentity(name, Scheduler.class.getName()) + .usingJobData(INVOKER_KEY, method.getInvokerClassName()) + .requestRecovery(); ScheduleBuilder<?> scheduleBuilder; String cron = scheduled.cron().trim();
[QuartzScheduler->[start->[start],InvokerJob->[execute->[getPreviousFireTime->[getPreviousFireTime],getNextFireTime->[getNextFireTime]]]]]
Creates a new job based on the given bundle. Checks if the scheduled task is empty.
So previously the `shouldRecover` property was false. What are the implications of enabling it by default?
@@ -0,0 +1,7 @@ +from .models import Page + + +def pages_visible_to_user(user): + if user.is_authenticated and user.is_active and user.is_staff: + return Page.objects.all() + return
[No CFG could be retrieved]
No Summary Found.
Missing `return Page.objects.public()`?
@@ -251,6 +251,9 @@ func runServ(c *cli.Context) error { if err = private.UpdateDeployKeyUpdated(key.ID, repo.ID); err != nil { fail("Internal error", "UpdateDeployKey: %v", err) } + + os.Setenv(models.EnvPusherName, username) + os.Setenv(models.EnvPusherID, fmt.Sprintf("%d", repo.OwnerID)) } else { user, err = private.GetUserByKeyID(key.ID) if err != nil {
[DelLogger,GetPublicKeyByID,NewWithClaims,GetUserByKeyID,Now,Close,Setenv,UpdatePublicKeyUpdated,Encode,IsSet,Exit,Add,HasDeployKey,UpdateDeployKeyUpdated,Error,ShowSubcommandHelp,Args,Compare,NewEncoder,DumpMemProfileForUsername,MustInt64,DumpCPUProfileForUsername,NewGitLogger,Run,Bool,SplitN,TrimSpace,HasSuffix,Join,Fprintln,Unix,Contains,InitWiki,ToLower,CheckUnitUser,SignedString,Split,MkdirAll,Trim,Command,BinVersion,Fprintf,TrimSuffix,Sprintf,GetRepositoryByOwnerAndName,Replace,Fatal,String,Getenv,StrTo,NewContext]
check key permission if checks if user has access to requested level and if it has access to requested level.
What about using `repo.Owner.NameToLower` (I think), just for consistency?
@@ -1995,8 +1995,8 @@ describes.realWin('CustomElement', {amp: true}, (env) => { it('should NOT turn on when enters viewport but already laid out', () => { stubInA4A(false); const toggle = env.sandbox.spy(element, 'toggleLoading'); - element.layoutCount_ = 1; element.viewportCallback(true); + element.layoutCount_ = 1; clock.tick(1000); expect(toggle).to.have.not.been.called; });
[No CFG could be retrieved]
Checks that the element is in viewport. should toggle loading off after layout complete.
This should probably be its own unit test (one for each conditional). The outer conditional check should check that both `toggleLoading` and `timer.delay` are not called.
@@ -1219,7 +1219,7 @@ module Engine @graph end - def upgrade_cost(tile, hex, entity) + def upgrade_cost(tile, hex, entity, spender) ability = entity.all_abilities.find do |a| a.type == :tile_discount && (!a.hexes || a.hexes.include?(hex.name))
[Base->[end_game!->[format_currency],par_price_str->[format_currency],current_entity->[current_entity],log_cost_discount->[format_currency],float_corporation->[format_currency],after_par->[format_currency,place_home_token,all_companies_with_ability],remove_train->[remove_train],init_hexes->[abilities],token_string->[count_available_tokens],place_home_token->[home_token_locations],initialize_actions->[filtered_actions],rust_trains!->[rust,rust?],close_corporation->[current_entity],liquidity->[liquidity],round_description->[total_rounds],crowded_corps->[train_limit],meta->[meta],trains->[trains],initialize->[init_optional_rules],total_emr_buying_power->[emergency_issuable_cash,liquidity],close_companies_on_train!->[abilities],rust->[remove_train],process_single_action->[setup,process_action],init_company_abilities->[shares,abilities],action_processed->[close_corporation],buy_train->[trains],payout_companies->[format_currency],train_limit->[train_limit],ability_right_time?->[current_entity],log_share_price->[format_currency],event_close_companies!->[abilities],active_step->[active_step],check_programmed_actions->[player_log]],load->[load]]
Returns the graph of nodes for the given entity.
could be less change if you did entity, spender = nil spender ||= entity
@@ -660,6 +660,12 @@ public class SaltUtils { String key = result.keySet().iterator().next(); serverAction.setResultMsg(result.get(key).getAsJsonObject().get("comment").getAsString()); } + else if (action instanceof BaseVirtualizationPoolAction) { + // Tell VirtNotifications that we got a pool action change, passing action + VirtNotifications.spreadActionUpdate(action); + // Intentionally don't get only the comment since the changes value could be interesting + serverAction.setResultMsg(getJsonResultWithPrettyPrint(jsonResult)); + } else { serverAction.setResultMsg(getJsonResultWithPrettyPrint(jsonResult)); }
[SaltUtils->[updateSystemInfo->[updateSystemInfo],createImagePackageFromSalt->[createImagePackageFromSalt,parsePackageEvr],prerequisiteIsCompleted->[prerequisiteIsCompleted],decodeSaltErr->[decodeStdMessage],applyChangesFromStateApply->[applyChangesFromStateModule],packageToKey->[packageToKey],SaltUtils]]
Updates the server action with the given parameters. Create the result of the action based on the action status. This method is called when the action is executed successfully. This method is called when a channel task is executed.
How big is typically the job result (in case of both success and error)? (I suppose it's pretty small.)
@@ -227,6 +227,14 @@ class _PackageBuilder(object): return node.pref +def remove_folder_raising(folder): + try: + rmdir(folder) + except OSError as e: + raise ConanException("%s\n\nCouldn't remove folder, might be busy or open\n" + "Close any app using it, and retry" % str(e)) + + def _handle_system_requirements(conan_file, pref, cache, out): """ check first the system_reqs/system_requirements.txt existence, if not existing check package/sha1/
[_PackageBuilder->[build_package->[_package,_build,_get_build_folder,_prepare_sources],_get_build_folder->[build_id]],build_id->[build_id],BinaryInstaller->[install->[_build],_build_package->[_PackageBuilder,build_package],_build->[_handle_system_requirements,raise_package_not_found_error],_handle_node_cache->[_node_concurrently_installed]]]
Handle system requirements.
Make this function _private_, (`_remove_folder_raising`) or move it to `conans.client.tools.files`
@@ -11,7 +11,7 @@ namespace System.Diagnostics.CodeAnalysis /// This allows tools to understand which methods are unsafe to call when removing unreferenced /// code from an application. /// </remarks> - [AttributeUsage(AttributeTargets.Method | AttributeTargets.Constructor, Inherited = false)] + [AttributeUsage(AttributeTargets.Method | AttributeTargets.Constructor | AttributeTargets.Class, Inherited = false)] #if SYSTEM_PRIVATE_CORELIB public #else
[RequiresUnreferencedCodeAttribute->[Constructor,Method]]
RequiresUnreferencedCodeAttribute provides a simple interface to the requires unreferenced code attribute.
Why just class, rather than also, say, struct? We should review the API change as part of API review, though mainly just to ensure we've properly discussed exactly what targets we want to enable, not because anyone would have a real problem with it. cc: @terrajobst
@@ -103,6 +103,9 @@ var $$AnimateQueueProvider = ['$animateProvider', function($animateProvider) { var activeAnimationsLookup = new $$HashMap(); var disabledElementsLookup = new $$HashMap(); var animationsEnabled = null; + // $document might be mocked and won't include a real document. + // Providing an empty object will prevent property read errors + var rawDocument = $document[0] || {}; function postDigestTaskFactory() { var postDigestCalled = false;
[No CFG could be retrieved]
Creates a function that checks if a new animation has already been running and if so calls it Watch for the last un - downloaded template.
"... mocked out in tests ..."
@@ -622,7 +622,9 @@ def test_task_runner_performs_retries_for_short_delays(client): global_list.append(0) raise ValueError("oops") + client.get_task_run_info.side_effect = [MagicMock(version=i) for i in range(4, 7)] res = CloudTaskRunner(task=noop).run( + context={"task_run_version": 1}, state=None, upstream_states={}, executor=prefect.engine.executors.LocalExecutor(),
[test_task_runner_sets_mapped_state_prior_to_executor_mapping->[MyExecutor]]
Test task runner does retries for short delays.
Previously this test failed because `versions = [1, 2, 3, 1, 2]`
@@ -37,6 +37,7 @@ import ( "github.com/vmware/govmomi/vim25/types" "github.com/vmware/vic/cmd/vic-machine/common" "github.com/vmware/vic/lib/config" + "github.com/vmware/vic/lib/config/executor" "github.com/vmware/vic/lib/install/data" "github.com/vmware/vic/pkg/errors" "github.com/vmware/vic/pkg/trace"
[basics->[NoteIssue],getDatastore->[NoteIssue],registries->[NoteIssue],certificate->[NoteIssue],Validate->[ListIssues,datacenter],sessionValid->[checkSessionSet,NoteIssue],managedbyVC->[NoteIssue],credentials->[NoteIssue],certificateAuthorities->[NoteIssue],compatibility->[sessionValid,NoteIssue],ValidateTarget->[ListIssues,datacenter],getAllDatastores->[getDatastore],checkDatastoresAreWriteable->[getAllDatastores,NoteIssue]]
This function is used to import a single object from a VMware environment. returns a Validator that validates a given .
this import is not used
@@ -91,8 +91,9 @@ angular.module('zeppelinWebApp').controller('NavCtrl', function($scope, $rootSco }); }; - $scope.search = function() { - $location.url(/search/ + $scope.searchTerm); + $scope.search = function(searchTerm) { + console.log('search term ', searchTerm); + $location.url(/search/ + searchTerm); }; function loadNotes() {
[No CFG could be retrieved]
function to show the login dialog.
Can you remove console.log from here.
@@ -170,6 +170,7 @@ export class AmpAdXOriginIframeHandler { this.element_.warnOnMissingOverflow = false; } this.handleResize_( + data['id'], data['height'], data['width'], source,
[No CFG could be retrieved]
Initializes the object with the given id. Handle one - time request to get the rendered entity identifier.
I'm not very familiar with this code. Can you explain when this has an id or not, and why there is no id for `renderStartMsgHandler_`?
@@ -51,9 +51,15 @@ class WPSEO_GooglePlus { * Output the Google+ specific description */ public function description() { + $desc = ''; if ( is_singular() ) { $desc = WPSEO_Meta::get_value( 'google-plus-description' ); + } + elseif ( is_category() || is_tag() || is_tax() ) { + $desc = WPSEO_Taxonomy_Meta::get_meta_without_term( 'google-plus-description' ); + } + if ( ! empty( $desc ) && is_string( $desc ) ) { /** * Filter: 'wpseo_googleplus_desc' - Allow developers to change the Google+ specific description output *
[No CFG could be retrieved]
Get the description of the node.
The empty check seems not necessary when you use is_string
@@ -18,6 +18,7 @@ #include "processes/find_nodal_neighbours_process.h" #include "custom_processes/spr_error_process.h" #include "utilities/variable_utils.h" +#include "utilities/stl_io.h" namespace Kratos {
[No CFG could be retrieved]
Provides a function to create a new object. Variable for the superconvergent stresses.
Does this file belong in this PR?
@@ -26,6 +26,10 @@ import paddle.fluid as fluid import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.executor import Executor +from models.model_base import get_decay_learning_rate +from models.model_base import get_regularization +from models.model_base import set_error_clip +from models.model_base import set_gradient_clip def lstm_step(x_t, hidden_t_prev, cell_t_prev, size):
[lstm_step->[linear],seq_to_seq_net->[lstm_decoder_with_attention->[lstm_step,simple_attention],lstm_decoder_with_attention,bi_lstm_encoder],get_model->[seq_to_seq_net]]
LSTM step of the LSTM algorithm.
model_base is not uploaded?
@@ -29,7 +29,7 @@ class TagAdjustment < ApplicationRecord end def article_tag_list - errors.add(:tag_id, "selected for removal is not a current live tag.") if adjustment_type == "removal" && article.tag_list.exclude?(tag_name) + errors.add(:tag_id, "selected for removal is not a current live tag.") if adjustment_type == "removal" && article.tag_list.none? { |tag| tag.casecmp(tag_name).zero? } errors.add(:base, "4 tags max per article.") if adjustment_type == "addition" && article.tag_list.count > 3 end end
[TagAdjustment->[user_permissions->[add,has_privilege_to_adjust?],article_tag_list->[add,exclude?,count],has_privilege_to_adjust?->[has_role?],validate,has_many,validates,belongs_to]]
Checks if the current tag is not a live tag and if it is a max number of.
Shoutout to Rubocop for this sorcery...I had it as `downcase`
@@ -604,6 +604,8 @@ public class TxInterceptor<K, V> extends DDAsyncInterceptor implements JmxStatis log.tracef("Rolling back remote transaction %s because either already completed (%s) or originator no longer in the cluster (%s).", globalTransaction, alreadyCompleted, originatorMissing); } + // The rollback command only marks the transaction as completed in invokeAsync() + txTable.markTransactionCompleted(globalTransaction, false); RollbackCommand rollback = commandsFactory.buildRollbackCommand(command.getGlobalTransaction()); return invokeNextAndFinally(ctx, rollback, (rCtx, rCommand, rv1, throwable1) -> { RemoteTransaction remoteTx = ((TxInvocationContext<RemoteTransaction>) rCtx).getCacheTransaction();
[TxInterceptor->[visitReadOnlyKeyCommand->[enlistIfNeeded],TransactionAwareCloseableIterator->[close->[close],getNextFromIterator->[getKey,fromEntry,next,hasNext]],visitReadOnlyManyCommand->[enlistIfNeeded],TransactionAwareKeyCloseableIterator->[remove->[remove]],enlist->[enlist],visitEntrySetCommand->[spliterator->[size,spliterator],iterator->[iterator]],visitKeySetCommand->[spliterator->[spliterator],iterator->[iterator]],TransactionAwareEntryCloseableIterator->[getKey->[getKey],remove->[getKey,remove]],replayRemoteTransactionIfNeeded->[handlePrepareCommand]]]
Verify if the remote transaction is not already in the cluster. rollback the transaction if any.
Is this the only place that marks transaction for rollback when the lock command fails? Or is it up to the user to handle exception being thrown by the operation and mark the tx?
@@ -35,7 +35,7 @@ class StyleTransferConverter(BaseFormatConverter): return parameters def configure(self): - self.image_dir = self.get_value_from_config('images_dir') + self.image_dir = Path(self.get_value_from_config('images_dir')) def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs): content_check_errors = [] if check_content else None
[StyleTransferConverter->[convert->[list,StyleTransferAnnotation,iterdir,append,ConverterReturn],parameters->[super,PathField,update],configure->[get_value_from_config]]]
Configure the class.
Why is this needed? I thought `images_dir` being a `PathField` meant that the value would already be a `Path`.
@@ -282,7 +282,8 @@ export class Resource { * for details. */ build() { - if (this.blacklisted_ || !this.element.isUpgraded()) { + if (this.blacklisted_ || !this.element.isUpgraded() + || !this.resources_.shouldBuildNow()) { return; } try {
[No CFG could be retrieved]
Replies the unique identifier of a resource. Replies the owner element of the resource.
This check can be moved up into `Resources#buildOrScheduleBuildForResource_`
@@ -65,10 +65,12 @@ public class Timestamps public static final long PICOSECONDS_PER_DAY = PICOSECONDS_PER_HOUR * 24; public static final long SECONDS_PER_MINUTE = 60; public static final long MINUTES_PER_HOUR = 60; + public static final long HOURS_PER_DAY = 24; + public static final long SECONDS_PER_DAY = SECONDS_PER_MINUTE * MINUTES_PER_HOUR * HOURS_PER_DAY; private Timestamps() {} - static long round(long value, int magnitude) + public static long round(long value, int magnitude) { return roundDiv(value, POWERS_OF_TEN[magnitude]) * POWERS_OF_TEN[magnitude]; }
[Timestamps->[epochMicrosToMillisWithRounding->[roundDiv]]]
round value to the nearest power of 2.
inline (let's define when becomes useufl)
@@ -275,6 +275,10 @@ export class AmpImg extends BaseElement { /** @override */ unlayoutCallback() { + if (AmpImg.V1()) { + return; + } + if (this.unlistenError_) { this.unlistenError_(); this.unlistenError_ = null;
[No CFG could be retrieved]
Private methods - functions - functions - functions private static final String ABAAACAkQBADs = ;.
There's a "Interrupt retrieval of incomplete images to free network resources" optimization below. Do we want to support that in V1?
@@ -240,7 +240,11 @@ class Pyboard: setattr(Pyboard, "exec", Pyboard.exec_) def execfile(filename, device='/dev/ttyACM0', baudrate=115200, user='micro', password='python'): - pyb = Pyboard(device, baudrate, user, password) + try: + pyb = Pyboard(device, baudrate, user, password) + except PyboardError as er: + print(er) + return pyb.enter_raw_repl() output = pyb.execfile(filename) stdout_write_bytes(output)
[Pyboard->[exec_->[exec_raw,PyboardError],exit_raw_repl->[write],enter_raw_repl->[inWaiting,read,write,read_until,PyboardError],follow->[read_until,PyboardError],execfile->[exec_,read],__init__->[TelnetToSerial],exec_raw->[follow,exec_raw_no_follow],read_until->[inWaiting,read],close->[close],exec_raw_no_follow->[read,write,read_until,PyboardError]],TelnetToSerial->[close->[close],write->[write],__init__->[PyboardError]],execfile->[exit_raw_repl,enter_raw_repl,stdout_write_bytes,execfile,Pyboard,close],main->[execbuffer->[exit_raw_repl,enter_raw_repl,stdout_write_bytes,exec_raw,Pyboard,close],execbuffer,stdout_write_bytes,close,Pyboard,follow,read],main]
Execute a file on the device.
Since execfile should be executed from within a python script I'd say it should raise an exception on error so that it can be handled by the caller.
@@ -98,7 +98,7 @@ class SuluAdminExtension extends Extension implements PrependExtensionInterface 'fos_js_routing', [ 'routes_to_expose' => [ - 'c?get_.*', + '.+\.c?get_.*', 'sulu_admin.metadata', ], ]
[SuluAdminExtension->[prepend->[getParameter,prependExtensionConfig,hasExtension],load->[setParameter,addTag,getAlias,processConfiguration,loadFieldTypeOptions,getDefinition,load],loadFieldTypeOptions->[addMethodCall]]]
Prepends a configuration with the configuration chain. get_localizations - get_localizations - get_teasers - get_.
I would make the prefix optional. `(.+\.)?c?get_.*`
@@ -20,14 +20,12 @@ describe CustomField, type: :model do it { should belong_to :user } it { should belong_to :team } it { should belong_to(:last_modified_by).class_name('User') } - it { should have_many :sample_custom_fields } end describe 'Should be a valid object' do before do @user = create :user, email: 'example_one@adsf.com' @team = create :team - @samples_table = create :samples_table, user: @user, team: @team end it { should validate_presence_of :name }
[class_name,is_at_most,to,create,have_db_column,build,describe,in_array,to_not,build_stubbed,before,eq,validate_presence_of,have_many,it,require,belong_to,should]
Private helper methods missing_custom_field_stubbed is stubbed so we can t use it directly.
Again, I think whole `custom_field` stuff should be deleted, including this spec.
@@ -87,16 +87,14 @@ public class DeleteViewCommand extends CLICommand { throw e; } - final String errorMsg = String.format(view_s + ": " + e.getMessage()); - stderr.println(errorMsg); + stderr.println(view_s + ": " + e.getMessage()); errorOccurred = true; - continue; } } if (errorOccurred) { throw new AbortException(CLI_LISTPARAM_SUMMARY_ERROR_TEXT); } - return 0; + return CLIReturnCodeStandard.OK; } }
[DeleteViewCommand->[run->[IllegalArgumentException,getViewName,getMessage,AbortException,size,IllegalStateException,checkPermission,ViewOptionHandler,getOwner,println,format,addAll,deleteView,getDisplayName,getView,canDelete],getShortDescription->[DeleteViewCommand_ShortDescription]]]
Delete view with duplicates.
Why removing this?
@@ -831,11 +831,12 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named usernsMode := "" if c.config.UserNsCtr != "" { usernsMode = fmt.Sprintf("container:%s", c.config.UserNsCtr) - } else { + } else if ctrSpec.Linux != nil { // Locate the spec's user namespace. // If there is none, it's default - the empty string. // If there is one, it's "private" if no path, or "ns:" if // there's a path. + for _, ns := range ctrSpec.Linux.Namespaces { if ns.Type == spec.UserNamespace { if ns.Path != "" {
[inspectLocked->[ID,Wrapf,GetDriverData,Layer,getContainerInspectData,Container],getInspectMounts->[ID,Wrapf,Volume,Driver,MountPoint],generateInspectContainerConfig->[Hostname,Join],getContainerInspectData->[specFromState,Error,IsInfra,sortUserVolumes,getInspectMounts,generateInspectContainerConfig,Dependencies,getContainerNetworkInfo,String,generateInspectContainerHostConfig,Errorf,rwSize,GetHealthCheckLog,rootFsSize],generateInspectContainerHostConfig->[Join,Sprintf,FindDeviceNodes,New,Warnf,LastCap,List,ToUpper,String,Split],Inspect->[inspectLocked,syncContainer,Lock,Unlock]]
generateInspectContainerHostConfig generates the container host config Initialize a single node in the system This method is used to populate the object variables that are not defined in the spec. This function is used to populate the host config with default values.
Are these if's really coverity issues?
@@ -232,4 +232,11 @@ public class ParagraphTest { } + + @Test + public void deserializeParagraphFromDifferentDateFormat() { + Paragraph paragraph = Note.GSON + .fromJson("{\"dateUpdated\":\"2017-09-01 00:01:02.345\"}", Paragraph.class); + System.out.println(paragraph.dateUpdated); + } }
[ParagraphTest->[returnUnchangedResultsWithDifferentUser->[any,jobRun,mock,assertNotEquals,anyString,toString,getRequiredReplName,thenReturn,add,onOutputUpdateAll,Paragraph,newArrayList,AuthenticationInfo,getUserParagraph,assertEquals,spy,getUser,anyList,getListener,setAuthenticationInfo,getScriptBody,getRepl,InterpreterResultMessage],should_extract_variable_from_angular_object_registry->[extractVariablesFromAngularRegistry,build,thenReturn,mock,get,getId,Paragraph,assertEquals,put],scriptBodyWithoutReplName->[getScriptBody,assertEquals],replNameEndsWithWhitespace->[assertEquals,getRequiredReplName],replNameAndNoBody->[getScriptBody,assertEquals,getRequiredReplName],returnDefaultParagraphWithNewUser->[assertNotNull,getReturn,setResult,getUserParagraph,Paragraph,assertEquals],scriptBodyWithReplName->[getScriptBody,assertEquals],replSingleCharName->[getScriptBody,assertEquals,getRequiredReplName]]]
mock the results of a command that return changed results with different user. This method is called when a job is being run. It will check if the two paragraphs.
Can we have a better unit test for that? i dont think `System.out.println` is enough. can we use something like `assert`?
@@ -659,7 +659,7 @@ func (fbo *folderBranchOps) commitHeadLocked( id := fbo.id() log := fbo.log go func() { - err := diskMDCache.Commit(ctx, id, rev) + err := diskMDCache.Commit(context.Background(), id, rev) if err != nil { log.CDebugf(ctx, "Error commiting revision %d: %+v", rev, err) }
[handleTLFBranchChange->[setHeadSuccessorLocked,isUnmergedLocked,id,Lock,handleUnflushedEditNotifications,Unlock,setBranchIDLocked],SetInitialHeadFromServer->[kickOffPartialSyncIfNeeded,kickOffRootBlockFetch,waitForRootBlockFetch,id,Lock,validateHeadLocked,identifyOnce,getHead,Unlock,setInitialHeadTrustedLocked],runUnlessShutdown->[newCtxWithFBOID],setMtimeLocked->[notifyAndSyncOrSignal,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename,nowUnixNano],processMissedLookup->[makeFakeDirEntry],setHeadSuccessorLocked->[id,Lock,setHeadLocked,Unlock,setInitialHeadTrustedLocked],getUnmergedMDUpdatesLocked->[id,getCurrMDRevision],pathFromNodeForRead->[pathFromNodeHelper],updateLastGetHeadTimestamp->[Lock,Unlock],makeFakeDirEntry->[String,nowUnixNano],applyMDUpdates->[Lock,applyMDUpdatesLocked,Unlock],getCachedDirOpsCount->[Lock,Unlock],canonicalPath->[String,pathFromNodeForRead,PathType],onMDFlush->[newCtxWithFBOID,handleMDFlush],getHead->[commitHeadLocked,updateLastGetHeadTimestamp],finalizeResolution->[Lock,finalizeResolutionLocked,Unlock],getProtocolSyncConfigUnlocked->[getProtocolSyncConfig],commitHeadLocked->[id],getDirChildren->[pathFromNodeForRead,getMDForReadNeedIdentify],finalizeMDRekeyWriteLocked->[setHeadSuccessorLocked,Lock,waitForJournalLocked,Unlock,setBranchIDLocked],makeRecentFilesSyncConfig->[id],createEntryLocked->[syncDirUpdateOrSignal,checkForUnlinkedDir,id,branch,canonicalPath,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename,nowUnixNano],handleEditNotifications->[sendEditNotifications,makeEditNotifications],SetInitialHeadToNew->[initMDLocked,Lock,identifyOnce,Unlock],makeEncryptedPartialPathsLocked->[id,getProtocolSyncConfig],maybeUnembedAndPutBlocks->[id],getMDForWriteLockedForFilename->[getMDForWriteOrRekeyLocked],setInitialHeadTrustedLocked->[setHeadLocked],doFastForwardLocked->[setHeadSuccessorLocked,kickOffRootBlockFetch,waitForRootBlockFetch],locallyFinalizeTLF->[id,Lock,Unlock,setHeadSuccessorLocked],blockUnmergedWrites->[Lock],doFavoritesOp->[deleteFromFavorites,addToFavoritesByHandle,addToFavorites],finalizeResolutionLocked->[newCtxWithFBOID,id,Lock,notifyOneOpLocked,finalizeBlocks,handleEditNotifications,handleUnflushedEditNotifications,setHeadConflictResolvedLocked,Unlock,setBranchIDLocked],getAndApplyNewestUnmergedHead->[setHeadSuccessorLocked,id,Lock,notifyBatchLocked,Unlock],checkNodeForRead->[branch,GetTLFHandle,checkNode],ClearPrivateFolderMD->[id,Lock,Unlock],SetSyncConfig->[kickOffRootBlockFetch,id,Lock,kickOffPartialSync,branch,getHead,Unlock,makeEncryptedPartialPathsLocked],waitForJournalLocked->[id],rekeyLocked->[getAndApplyMDUpdates,finalizeMDRekeyWriteLocked,isUnmergedLocked,getMDForRekeyWriteLocked,getHead],removeEntryLocked->[checkForUnlinkedDir,notifyAndSyncOrSignal,unrefEntryLocked],SetEx->[checkNodeForWrite,doMDWriteWithRetryUnlessCanceled,setExLocked],GetEditHistory->[id,getHead],kickOffPartialSync->[Lock,Unlock,doPartialSync],notifyOneOpLocked->[id,pathFromNodeForRead,getUnlinkPathBeforeUpdatingPointers,searchForNode],getProtocolSyncConfig->[id],getMDForReadHelper->[getMDForRead],unstageLocked->[getAndApplyMDUpdates,isUnmergedLocked,id,finalizeMDWriteLocked,notifyBatchLocked,undoUnmergedMDUpdatesLocked,getSuccessorMDForWriteLocked,maybeUnembedAndPutBlocks],checkNodeForWrite->[String,checkNode],notifyOneOp->[Lock,Unlock,notifyOneOpLocked],setExLocked->[notifyAndSyncOrSignal,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename,nowUnixNano],getMDForReadNeedIdentify->[getMDForReadHelper],getCurrMDRevision->[getCurrMDRevisionLocked],getMDForMigrationLocked->[getMDForWriteOrRekeyLocked],finalizeGCOp->[setBranchIDLocked,setHeadSuccessorLocked,Lock,finalizeBlocks,getSuccessorMDForWriteLocked,Unlock,maybeUnembedAndPutBlocks],commitFlushedMD->[id,kickOffPartialSyncIfNeeded,kickOffRootBlockFetch],Rename->[renameLocked,checkNodeForWrite,doMDWriteWithRetryUnlessCanceled],Lock->[Lock],setHeadPredecessorLocked->[setHeadLocked],CreateDir->[checkNodeForWrite,doMDWriteWithRetryUnlessCanceled,createEntryLocked],RemoveEntry->[removeEntryLocked,doMDWriteWithRetryUnlessCanceled,checkNodeForWrite,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename],getMDForReadNoIdentify->[getMDForReadHelper],makeEditNotifications->[id],kickOffPartialSyncIfNeeded->[getProtocolSyncConfigUnlocked,makeRecentFilesSyncConfig,kickOffPartialSync],getMDForRekeyWriteLocked->[getMDForWriteOrRekeyLocked],Lookup->[checkNodeForRead,lookup,transformReadError],notifyAndSyncOrSignal->[syncDirUpdateOrSignal],finalizeBlocks->[id],pathFromNodeForMDWriteLocked->[pathFromNodeHelper],statEntry->[transformReadError,pathFromNodeForRead,getMDForReadNoIdentify,checkNodeForRead,statUsingFS,getMDForReadNeedIdentify],MigrateToImplicitTeam->[Lock,Unlock,finalizeMDRekeyWriteLocked,getMDForMigrationLocked],getSuccessorMDForWriteLocked->[getSuccessorMDForWriteLockedForFilename],SetMtime->[setMtimeLocked,checkNodeForWrite,doMDWriteWithRetryUnlessCanceled],createLinkLocked->[checkForUnlinkedDir,notifyAndSyncOrSignal,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename,nowUnixNano],GetTLFHandle->[getHead],applyMDUpdatesLocked->[kickOffPartialSyncIfNeeded,setHeadSuccessorLocked,kickOffRootBlockFetch,isUnmergedLocked,waitForRootBlockFetch,Lock,getJournalPredecessorRevision,notifyOneOpLocked,getCurrMDRevisionLocked,Unlock],recomputeEditHistory->[getEditMessages,id],getAndApplyMDUpdates->[id,getLatestMergedRevision],removeDirLocked->[removeEntryLocked,pathFromNodeForMDWriteLocked,getMDForWriteLockedForFilename],SyncAll->[syncAllLocked,doMDWriteWithRetryUnlessCanceled],getUnlinkPathBeforeUpdatingPointers->[pathFromNodeForRead],waitForAndProcessUpdates->[getAndApplyMDUpdates,maybeFastForward],sendEditNotifications->[id,getConvID],Shutdown->[Shutdown],Reset->[Lock,Unlock,GetTLFHandle],registerForUpdatesShouldFireNow->[Lock,Unlock],Write->[signalWrite,checkNodeForWrite,getMDForRead,Write],Read->[transformReadError,pathFromNodeForRead,checkNodeForRead,Read,getMDForReadNeedIdentify],setHeadLocked->[commitFlushedMD,id,getJournalPredecessorRevision,validateHeadLocked,startMonitorChat,setBranchIDLocked],GetNodeMetadata->[id,statEntry],undoUnmergedMDUpdatesLocked->[getUnmergedMDUpdatesLocked,Lock,id,setHeadPredecessorLocked,setLatestMergedRevisionLocked,undoMDUpdatesLocked,Unlock,setBranchIDLocked],syncAllLocked->[id,finalizeMDWriteLocked,startSyncLocked,branch,getSuccessorMDForWriteLocked,getHead],CreateFile->[id,checkNodeForWrite,doMDWriteWithRetryUnlessCanceled,createEntryLocked],PushStatusChange->[PushStatusChange],doPartialSync->[syncOneNode],registerAndWaitForUpdates->[ctxWithFBOID,locallyFinalizeTLF,Lock,runUnlessShutdown,Unlock],GetUpdateHistory->[id,String],getTrustedHead->[commitHeadLocked,updateLastGetHeadTimestamp],getUnmergedMDUpdates->[id,Lock,Unlock,getCurrMDRevision],doMDWriteWithRetry->[Lock,Unlock,maybeWaitForSquash],handleEditActivity->[getEditMessages,recomputeEditHistory,initEditChatChannels,String],syncAllUnlocked->[syncAllLocked,Lock,Unlock],checkForUnlinkedDir->[String],getConvID->[id,Lock,Unlock],lookup->[statUsingFS,getMDForReadNeedIdentify,processMissedLookup],ForceFastForward->[newCtxWithFBOID,kickOffPartialSyncIfNeeded,doFastForwardLocked,id,Lock,Unlock],RemoveDir->[checkNodeForWrite,doMDWriteWithRetryUnlessCanceled,removeDirLocked,RemoveDir],SyncFromServer->[syncAllUnlocked,getAndApplyMDUpdates,isUnmerged,id],monitorEditsChat->[newCtxWithFBOID,Lock,recomputeEditHistory,Unlock,handleEditActivity],maybeFastForward->[kickOffPartialSyncIfNeeded,doFastForwardLocked,isUnmergedLocked,id,Lock,getJournalPredecessorRevision,isUnmerged,Unlock],getMDForWriteOrRekeyLocked->[id,Lock,identifyOnce,setHeadLocked,getTrustedHead,Unlock],registerForUpdates->[id,registerForUpdatesShouldFireNow,getLatestMergedRevision],backgroundFlusher->[id,getCachedDirOpsCount,SyncAll,runUnlessShutdown],isUnmerged->[Lock,Unlock],GetDirChildren->[getDirChildren,checkNodeForRead,transformReadError],getSuccessorMDForWriteLockedForFilename->[getMDForWriteLockedForFilename],getRootNode->[Lock,Unlock,getMDForRead,getMDForWriteOrRekeyLocked],TeamAbandoned->[newCtxWithFBOID,locallyFinalizeTLF],statUsingFS->[String,makeFakeDirEntry],GetSyncConfig->[id,branch,getHead,getProtocolSyncConfigUnlocked],CreateLink->[checkNodeForWrite,doMDWriteWithRetryUnlessCanceled,createLinkLocked],newCtxWithFBOID->[ctxWithFBOID],unblockUnmergedWrites->[Unlock],initMDLocked->[id,Lock,setNewInitialHeadLocked,getHead,Unlock,maybeUnembedAndPutBlocks],doMDWriteWithRetryUnlessCanceled->[doMDWriteWithRetry],getJournalPredecessorRevision->[id,String],finalizeMDWriteLocked->[setHeadSuccessorLocked,isUnmergedLocked,id,Lock,finalizeBlocks,handleEditNotifications,handleUnflushedEditNotifications,Unlock,setBranchIDLocked],TeamNameChanged->[id,newCtxWithFBOID,Unlock,Lock],initEditChatChannels->[id,String],transformReadError->[id,GetTLFHandle],onTLFBranchChange->[newCtxWithFBOID,handleTLFBranchChange],markForReIdentifyIfNeeded->[Lock,Unlock],setNewInitialHeadLocked->[setHeadLocked],identifyOnce->[Lock,Unlock],setHeadConflictResolvedLocked->[setHeadLocked],handleUnflushedEditNotifications->[id,makeEditNotifications],Truncate->[Truncate,checkNodeForWrite,getMDForRead,signalWrite],getMDForRead->[getTrustedHead,identifyOnce],unstageAfterFailedResolution->[Lock,Unlock,unstageLocked],getMostRecentFullyMergedMD->[getMDForReadHelper,getJournalPredecessorRevision,id],syncDirUpdateOrSignal->[signalWrite],renameLocked->[checkForUnlinkedDir,notifyAndSyncOrSignal,pathFromNodeForMDWriteLocked,unrefEntryLocked,getMDForWriteLockedForFilename,nowUnixNano],Stat->[statEntry],handleMDFlush->[commitFlushedMD,id,Lock,setLatestMergedRevisionLocked,handleEditNotifications,Unlock],getMDForReadNeedIdentifyOnMaybeFirstAccess->[Lock,Unlock,getMDForRead,getMDForWriteOrRekeyLocked],UnstageForTesting->[doMDWriteWithRetry,isUnmerged,unstageLocked],undoMDUpdatesLocked->[Lock,setHeadPredecessorLocked,notifyOneOpLocked,getCurrMDRevisionLocked,Unlock],Unlock->[Unlock],String]
commitHeadLocked commits the head of the current node.
Maybe do that delayed cancellation thing here to preserve the tags? Also do you think we should still have some sort of timeout here?
@@ -4,6 +4,7 @@ class WP_Test_Jetpack_Sync_Module_Stats extends WP_Test_Jetpack_Sync_Base { function test_sends_stats_data_on_heartbeat() { + $this->markTestIncomplete( 'Stalls' ); $heartbeat = Jetpack_Heartbeat::init(); $heartbeat->cron_exec(); $this->sender->do_sync();
[WP_Test_Jetpack_Sync_Module_Stats->[test_sends_stats_data_on_heartbeat->[do_sync,assertEquals,cron_exec,get_most_recent_event]]]
test sending stats data on heartbeat.
what's going wrong with this one, any ideas?
@@ -142,6 +142,16 @@ public class PutHBaseRecord extends AbstractPutHBase { .allowableValues(NULL_FIELD_EMPTY, NULL_FIELD_SKIP) .build(); + protected static final PropertyDescriptor VISIBILITY_RECORD_PATH = new PropertyDescriptor.Builder() + .name("put-hb-rec-visibility-record-path") + .displayName("Visibility String Record Path Root") + .description("A record path that points to part of the record which contains a path to a mapping of visibility strings to record paths") + .required(false) + .addValidator(Validator.VALID) + .build(); + + protected RecordPathCache recordPathCache; + @Override public final List<PropertyDescriptor> getSupportedPropertyDescriptors() { final List<PropertyDescriptor> properties = new ArrayList<>();
[PutHBaseRecord->[onTrigger->[addBatch],createPut->[asBytes]]]
Replies the list of supported property descriptors.
I like this configuration. Should we add similar property to PutHBaseJson, in order to pass visibility label within the input JSON tree? Similar to `Column Family` property. How do you think?
@@ -72,3 +72,7 @@ func TestFetchEventContents(t *testing.T) { assert.EqualValues(t, 17, statusCodes["200"]) assert.EqualValues(t, 1, statusCodes["404"]) } + +func TestData(t *testing.T) { + mbtest.TestDataFiles(t, "traefik", "health", mbtest.TestDataConfig(t)) +}
[Header,Write,EqualValues,GetErrors,GetEvents,NewServer,ReadFile,Println,HandlerFunc,Close,NoError,NewReportingMetricSetV2Error,Fetch,Nil,Set,WriteHeader]
Assert that t is 200 and 404.
Why `mbtest.TestDataConfig(t)` is not called directly from the `TestData` function which already has `t`?
@@ -179,10 +179,8 @@ public class ImagingComponent extends DefaultComponent implements ImagingService try (CloseableFile cf = blob.getCloseableFile(ext)) { imageInfo = ImageIdentifier.getInfo(cf.getFile().getAbsolutePath()); } - } catch (CommandNotAvailable | CommandException e) { - log.error("Failed to get ImageInfo for file " + blob.getFilename(), e); - } catch (IOException e) { - log.error("Failed to transfer file " + blob.getFilename(), e); + } catch (CommandNotAvailable | CommandException | IOException e) { + log.error("Failed to get ImageInfo for file {}", blob.getFilename(), e); } return imageInfo; }
[ImagingComponent->[convertToPDF->[convertToPDF],crop->[crop],computeOriginalJpegView->[getImageInfo,wrapBlob],rotate->[rotate],getPictureConversions->[getPictureConversions],resize->[resize],computeViewFor->[computeViewFor,getImageInfo,getImageMimeType],computeViewsFor->[getPictureConversions,computeViewsFor,getImageInfo,computeView,getImageMimeType],computeView->[computeView,getImageInfo,getConfigurationValue],getPictureConversion->[getPictureConversion],callPictureConversionChain->[wrapBlob]]]
Get the ImageInfo for the given blob.
We're not interested in the different cases?
@@ -54,7 +54,15 @@ const CAPTION_PROPS = { * @return {PreactDef.Renderable} */ export function LightboxGalleryProviderWithRef( - {children, onAfterClose, onAfterOpen, onBeforeOpen, render}, + { + children, + onAfterClose, + onAfterOpen, + onBeforeOpen, + onToggleCaption, + onViewGrid, + render, + }, ref ) { const classes = useStyles();
[No CFG could be retrieved]
Provides a single with a reference to the container. missing count - > count.
FMI/suspicion: What is `onAfterOpen` for? Why do we need it? I'm suspicious of having both onBeforeOpen/onAfterOpen
@@ -137,7 +137,17 @@ final class EagerLoadingExtension implements QueryCollectionExtensionInterface, continue; } - if ((false === $propertyMetadata->isReadableLink() || false === $propertyMetadata->isReadable()) && false === $propertyMetadata->getAttribute('fetchEager', false)) { + if ($inAttributes = isset($context[AbstractNormalizer::ATTRIBUTES][$association])) { + // prepare the child context + $context[AbstractNormalizer::ATTRIBUTES] = $context[AbstractNormalizer::ATTRIBUTES][$association]; + } else { + unset($context[AbstractNormalizer::ATTRIBUTES]); + } + + if ( + ((!$inAttributes && false === $propertyMetadata->isReadableLink()) || false === $propertyMetadata->isReadable()) && + false === $propertyMetadata->getAttribute('fetchEager', false) + ) { continue; }
[EagerLoadingExtension->[addSelect->[addSelect],joinRelations->[joinRelations]]]
Joins the relations of the specified resource class. Adds the join conditions for the association.
cs is strange here
@@ -459,6 +459,14 @@ func resourceAwsGlueCrawlerRead(d *schema.ResourceData, meta interface{}) error return nil } + arn := arn.ARN{ + Partition: meta.(*AWSClient).partition, + Service: "glue", + Region: meta.(*AWSClient).region, + AccountID: meta.(*AWSClient).accountid, + Resource: fmt.Sprintf("crawler/%s", d.Id()), + }.String() + d.Set("arn", arn) d.Set("name", crawlerOutput.Crawler.Name) d.Set("database_name", crawlerOutput.Crawler.DatabaseName) d.Set("role", crawlerOutput.Crawler.Role)
[StringInSlice,NonRetryableError,Set,GetCrawler,UpdateCrawler,Error,GetOk,DeleteCrawler,Errorf,SetId,RetryableError,CreateCrawler,NormalizeJsonString,Id,Get,Printf,StringValue,Print,TrimPrefix,String,Parse,Retry]
This function reads a single Glue crawler and updates the resource data with it s properties. UpdateBehavior updates the schema_change_policy and the Dynamodb targets if any.
I would be careful to not shadow the import name by calling this `crawlerARN :=` or something.
@@ -10,6 +10,8 @@ import org.jboss.jandex.Type.Kind; abstract class AbstractGenerator { static final String DEFAULT_PACKAGE = Arc.class.getPackage().getName() + ".generator"; + static final String UNDERSCORE = "_"; + static final String SYNTHETIC_SUFFIX = "Synthetic"; /** * Create a generated bean name from a bean package. When bean is located
[AbstractGenerator->[getBaseName->[substring,lastIndexOf,getSimpleName],getPackageName->[isProducerField,isProducerMethod,packageName,isEmpty,kind,startsWith,name],isReflectionFallbackNeeded->[isPrivate,equals,isProtected,flags,isPackagePrivate],generatedNameFromTarget->[replace,isEmpty],isPackagePrivate->[isPrivate,isPublic,isProtected],getName]]
Creates a generated bean name from a base name and suffix. Checks if a method is private protected or non - public on a different package.
Smaller class names mean smaller JARs :wink: Maybe use the abbreviation `Syn`?
@@ -21,8 +21,6 @@ static struct pm_runtime_data *prd; void pm_runtime_init(void) { - trace_pm("pm_runtime_init()"); - prd = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, sizeof(*prd)); spinlock_init(&prd->lock);
[pm_runtime_disable->[platform_pm_runtime_disable,tracev_pm],pm_runtime_init->[trace_pm,spinlock_init,platform_pm_runtime_init,rzalloc],pm_runtime_get->[platform_pm_runtime_get,tracev_pm],pm_runtime_get_sync->[platform_pm_runtime_get,tracev_pm],pm_runtime_is_active->[platform_pm_runtime_is_active,tracev_pm],pm_runtime_enable->[platform_pm_runtime_enable,tracev_pm],pm_runtime_put->[platform_pm_runtime_put,tracev_pm],pm_runtime_put_sync->[platform_pm_runtime_put,tracev_pm]]
- - - - - - - - - - - - - - - - - -.
Maybe verbose trace? Or in case of debugging future PM-related issues such prints would be added by whoever is doing the debugging?
@@ -292,6 +292,10 @@ public class NxQueryBuilder { protected String guessFieldType(String field) { String fieldType; + if (ES_SCORE_FIELD.equals(field)) { + // this special field should not have an unmappedType + return null; + } try { SchemaManager schemaManager = Framework.getService(SchemaManager.class); fieldType = schemaManager.getField(field).getType().getName();
[NxQueryBuilder->[getEsAggregates->[getAggregateFilterId,getAggregateFilterExceptFor],getFetcher->[isFetchFromElasticsearch],updateRequest->[getEsAggregates,makeQuery,getSortBuilders,isFetchFromElasticsearch,getLimit,getAggregateFilter]]]
Guess the field type.
Is this strictly related to the RestClient switch? I would prefer a separate commit for all unrelated cleanups...
@@ -74,6 +74,7 @@ async function doBuild(extraArgs = {}) { await compileCoreRuntime(options); } else { await compileAllJs(options); + await buildVendorConfigs(options); } await buildExtensions(options); if (!argv.watch) {
[No CFG could be retrieved]
Builds the given AMP library. Builds runtime with the EXPERIMENT constant set to true.
For consistency with `dist`, I think this should happen after `buildExtensions()`, and be gated by `if (!argv.core_runtime_only)`.
@@ -14,7 +14,7 @@ RSpec.describe Search::Postgres::Tag, type: :service do result = described_class.search_documents(tag.name) expect(result.first.keys).to match_array( - %w[id name hotness_score rules_html supported short_summary], + %i[id name hotness_score rules_html supported short_summary], ) end
[create,to,search_documents,include,describe,match_array,save!,not_to,eq,it,require,name,map]
describe all tags to recalculate the score.
This value changing from strings to symbols is a non-issue since there's one more step before we return the tags to the UI (there's another step making the JSON output)?
@@ -248,6 +248,12 @@ class EpochsVectorizer(TransformerMixin): class PSDEstimator(TransformerMixin): """Compute power spectrum density (PSD) using a multi-taper method + This structures data so that it can be easily incorporated into + scikit-learn pipelines. It relies heavily on the multitaper_psd + function found in the time_frequency module. Running the transform + method will create attributes for the estimated psd as well as the + frequencies used, on top of return these values. + Parameters ---------- sfreq : float
[PSDEstimator->[transform->[type,multitaper_psd,atleast_3d,ValueError,isinstance,reshape],fit->[ValueError,isinstance,type]],EpochsVectorizer->[inverse_transform->[reshape,ValueError,isinstance,type],transform->[type,atleast_3d,ValueError,isinstance,reshape],fit->[ValueError,isinstance,type]],Scaler->[inverse_transform->[type,atleast_3d,iteritems,ValueError,isinstance],transform->[type,atleast_3d,iteritems,ValueError,isinstance],__init__->[dict],fit->[type,dict,mean,atleast_3d,ValueError,isinstance,items,pick_types]],FilterEstimator->[transform->[band_pass_filter,type,low_pass_filter,atleast_3d,band_stop_filter,ValueError,isinstance,high_pass_filter],__init__->[_check_type_picks],fit->[type,ValueError,isinstance,float,pick_types]]]
Returns a new array with the non - zero values for the given number of epoch - dependent A class constructor for a object.
can you update `See Also`?
@@ -68,7 +68,7 @@ type containerInfo struct { // If not nil, the container is part of the pod. We can use the // podInfo to extract the relevant data. - pod *podInfo + Pod *podInfo } const containerTemplate = headerTemplate + `
[Executable,GetBool,NArg,Warnf,Now,Delims,SetInterspersed,Wrap,Strings,Format,New,Errorf,Bool,BoolP,ID,Join,Execute,Name,Lookup,NewFlagSet,Config,StopTimeout,Sprintf,Changed,String,Parse]
This function returns the name of the ethernet unit that is part of the pod. ContainerUnit generates a systemd unit for the specified container.
I don't think exporting is necessary
@@ -44,7 +44,8 @@ class SimpleFcLayer(fluid.dygraph.Layer): class TestDyToStaticSaveInferenceModel(unittest.TestCase): - def test_save_inference_model(self): + # TODO(Aurelius84): disable temporarily, need new save_inference interface + def _test_save_inference_model(self): fc_size = 20 x = np.random.random((fc_size, fc_size)).astype('float32')
[TestDyToStaticSaveInferenceModel->[test_save_inference_model->[SimpleFcLayer]]]
Test save inference model.
Why? Is this due to the program is wrapped as dygraph layer so we cannot save inference model directly?
@@ -1138,7 +1138,7 @@ public class ProTerritoryManager { combinedUnits.addAll(patd.getMaxAmphibUnits()); ProLogger.debug("Removing territory that we can't successfully attack: " + t + ", maxWin%=" + patd.getMaxBattleResult().getWinPercentage() + ", maxAttackers=" + combinedUnits.size()); - result.remove(t); + result.remove(attackMap.get(t)); for (final Set<Territory> territories : unitAttackMap.values()) { territories.remove(t); }
[ProTerritoryManager->[getMaxScrambleCount->[getMaxScrambleCount],findScrambleOptions->[compare->[compare]],findAirMoveOptions->[findNavalMoveOptions],findAlliedAttackOptions->[findAttackOptions],removePotentialTerritoriesThatCantBeConquered->[removeTerritoriesThatCantBeConquered],removeTerritoriesThatCantBeConquered->[removeTerritoriesThatCantBeConquered],findEnemyAttackOptions->[findAttackOptions],findEnemyDefendOptions->[findDefendOptions]]]
Remove territories that can t be conquered. Checks if the territory is a capital or a factory. Checks if the allied player has a territory and if so attempts to strafe This method is called to determine which territory we can attack.
Re-use this variable: `final ProTerritory patd = attackMap.get(t);`? If so, would also be good to give the variable a more meaningful name as well.
@@ -414,6 +414,7 @@ module Engine def new_operating_round(round_num = 1) @log << "-- Operating Round #{@turn}.#{round_num} --" + @corporations.reject(&:floated?).each { |c| c.push_revenue_history!(nil, round_num) } Round::Operating.new( @corporations.select(&:floated?).sort, game: self,
[Base->[current_entity->[current_entity],trains->[trains],process_action->[process_action],end_game->[format_currency],inspect->[title],rollback->[clone]]]
Initialize a new operating round.
hm i'm not sure i like this what if revenue history was more detailed
@@ -1538,7 +1538,17 @@ namespace Js // else use enumerator to extract keys from source else { - AssignForGenericObjects(from, to, scriptContext); + DynamicObject* fromObj = JavascriptOperators::TryFromVar<DynamicObject>(from); + DynamicObject* toObj = JavascriptOperators::TryFromVar<DynamicObject>(to); + bool cloned = false; + if (toObj && fromObj && toObj->GetType() == scriptContext->GetLibrary()->GetObjectType()) + { + cloned = toObj->TryCopy(fromObj); + } + if(!cloned) + { + AssignForGenericObjects(from, to, scriptContext); + } } }
[No CFG could be retrieved]
4. Assign to. Gets the next key in the chain.
say we failed to clone for the first source, do we still attempt to do for the next sources?
@@ -401,6 +401,8 @@ bool ClientLauncher::launch_game(std::string &error_message, if (menudata.name == "" && !simple_singleplayer_mode) { error_message = gettext("Please choose a name!"); + if (skip_main_menu) + errorstream << error_message << std::endl; return false; }
[main_menu->[isMenuActive],run->[what->[what],get_instance]]
Main menu entry point for the game This function is called when a user enters a menu item. It is called by the This function checks if a required sub - game is selected and if so checks if the sub.
This check can be omitted. See below: The error message is always written to `errorstream` after setting its content. Players who didn't skip the main menu should see this message too.
@@ -299,11 +299,7 @@ class Jetpack_Client { $_path = preg_replace( '/^\//', '', $path ); // Use GET by default whereas `remote_request` uses POST - if ( isset( $filtered_args['method'] ) && strtoupper( $filtered_args['method'] === 'POST' ) ) { - $request_method = 'POST'; - } else { - $request_method = 'GET'; - } + $request_method = ( isset( $filtered_args['method'] ) ) ? $filtered_args['method'] : 'GET'; $validated_args = array_merge( $filtered_args, array( 'url' => sprintf( '%s://%s/rest/v%s/%s', $proto, JETPACK__WPCOM_JSON_API_HOST, $version, $_path ),
[Jetpack_Client->[_wp_remote_request->[get_error_message],remote_request->[sign_request]]]
Sends a request to the WPCOM API as a blog.
Oh! Looks like I'm also fixing this little bug: `strtoupper( $filtered_args['method'] === 'POST' )`. That's probably supposed to read `strtoupper( $filtered_args['method'] ) === 'POST' )`.
@@ -18,6 +18,11 @@ -%> const webpackConfig = require('../../../webpack/webpack.test.js'); +var ChromiumRevision = require('puppeteer/package.json').puppeteer.chromium_revision; +var Downloader = require('puppeteer/utils/ChromiumDownloader'); +var revisionInfo = Downloader.revisionInfo(Downloader.currentPlatform(), ChromiumRevision); +process.env.CHROMIUM_BIN = revisionInfo.executablePath; + const WATCH = process.argv.indexOf('--watch') > -1; module.exports = (config) => {
[No CFG could be retrieved]
A module that exports a single that can be found in the browser. Options for the karma reporters.
use `const` please
@@ -408,6 +408,7 @@ def getitem_list(context, builder, sig, args): index = args[1] index = inst.fix_index(index) + inst.guard_index(index, msg="getitem out of range") result = inst.getitem(index) return impl_ret_borrowed(context, builder, sig.return_type, result)
[list_insert->[move,clamp_index,setitem,resize,fix_index,ListInstance],list_append->[resize,ListInstance,setitem],list_pop->[move,getitem,resize,fix_index,ListInstance,guard_index],list_add->[ListInstance,getitem,allocate,setitem],_list_extend_list->[resize,ListInstance,getitem,setitem],list_extend->[_list_extend_list],list_mul_inplace->[resize,ListInstance,getitem,setitem],list_eq->[ListInstance,getitem],ListIterInstance->[_payload->[get_list_payload],from_list->[ListInstance]],list_is->[ListInstance],ListInstance->[move->[set_dirty,_gep],inititem->[_gep],_payload->[get_list_payload],setitem->[set_dirty,_gep],__init__->[get_itemsize],resize->[get_itemsize,_payload_realloc,set_dirty],allocate->[allocate_ex],allocate_ex->[get_itemsize]],getitem_list->[fix_index,ListInstance,getitem],getslice_list->[getitem,inititem,fix_slice,allocate,ListInstance],build_list->[allocate,setitem],_ListPayloadMixin->[guard_index->[is_out_of_bounds],getitem->[_gep],fix_slice->[fix_slice]],iternext_listiter->[ListIterInstance,getitem],list_sort->[load_sorts],list_mul->[ListInstance,getitem,allocate,setitem],list_clear->[resize,ListInstance],list_len->[ListInstance],getiter_list->[from_list],setitem_list->[move,getitem,fix_slice,setitem,resize,fix_index,ListInstance]]
Retrieve a list item from a list.
Just had a look at `guard_index` which calls into `is_out_of_bounds`, seems like the underflow condition is triggered by a negative index? Should this not be valid so long as the magnitude is `<= len(list)`? If this is the case, it doesn't need fixing here and can be done independently.
@@ -182,6 +182,12 @@ class CheckListTest { } } + @Test + void rank_of_should_reflect_check_order() { + assertThat(CheckList.rankOf(new AssertionArgumentOrderCheck())) + .isLessThan(CheckList.rankOf(new UnusedTestRuleCheck())); + } + @Test void rules_targeting_tests_should_have_tests_tag() throws Exception { Set<Class<? extends JavaCheck>> testChecks = new HashSet<>(CheckList.getJavaTestChecks());
[CheckListTest->[rules_targeting_tests_should_have_tests_tag->[getKey],test->[define,CustomRulesDefinition]]]
Enforce check list registration. if nesque is null return nesque.
test title is misleading. whast about `rankOf_should_reflect_check_order()`
@@ -65,14 +65,12 @@ func (org *User) GetMembers() error { return err } - org.Members = make([]*User, len(ous)) + var ids = make([]int64, len(ous)) for i, ou := range ous { - org.Members[i], err = GetUserByID(ou.Uid) - if err != nil { - return err - } + ids[i] = ou.Uid } - return nil + org.Members, err = GetUsersByIDs(ids) + return err } // AddMember adds new member to organization.
[GetUserRepositories->[GetUserTeamIDs],GetUserMirrorRepositories->[GetUserTeamIDs],GetUserTeamIDs->[getUserTeams],GetTeams->[getTeams],GetUserTeams->[getUserTeams],GetTeam->[getTeam],getOwnerTeam->[getTeam],GetOwnerTeam->[getOwnerTeam],RemoveOrgRepo->[removeOrgRepo],GetOwnerTeam]
GetMembers - get all members of an organization.
but why ignore the errors?
@@ -610,6 +610,14 @@ func (b *Botanist) generateSecrets() ([]interface{}, error) { fmt.Sprintf("kubernetes.default.svc.%s", gardenv1beta1.DefaultDomain), b.Shoot.InternalClusterDomain, } + + etcdCertDNSNames := []string{ + fmt.Sprintf("etcd-%s-0", common.EtcdRoleMain), + fmt.Sprintf("etcd-%s-0", common.EtcdRoleEvents), + fmt.Sprintf("etcd-%s-0.etcd-%s", common.EtcdRoleMain, common.EtcdRoleMain), + fmt.Sprintf("etcd-%s-0.etcd-%s", common.EtcdRoleEvents, common.EtcdRoleEvents), + } + if b.Shoot.ExternalClusterDomain != nil { apiServerCertDNSNames = append(apiServerCertDNSNames, *(b.Shoot.Info.Spec.DNS.Domain), *(b.Shoot.ExternalClusterDomain)) }
[DeploySecrets->[DecodeCertificate,CreateSecret,createTLSSecret,ListSecrets,Unlock,Sprintf,computeSecretsCheckSums,generateSecrets,Lock,Errorf,DecodePrivateKey,createControlPlaneSecret,createRSASecret,Split,EncodePrivateKey],createTLSSecret->[CreateSecret],appendLoadBalancerIngresses->[ParseIP,Warn],createRSASecret->[CreateSecret,EncodePrivateKey],computeSecretsCheckSums->[Marshal,ComputeSHA256Hex],generateSecrets->[Sprintf,GetServiceNetwork,MonocularEnabled,GetIngressFQDN,ComputeClusterIP,ParseIP],createControlPlaneSecret->[appendLoadBalancerIngresses,CreateSecret,EncodeBase64,computeAPIServerURL],DeleteGardenSecrets->[DeleteSecret,IsNotFound],RenderLocalTemplate,Int,GenerateKey,AddDate,Sprintf,Now,Bytes,NewPublicKey,NewInt,GenerateRandomString,Command,MarshalAuthorizedKey,EncodePrivateKey,Lsh,EncodeCertificate,Run,CreateCertificate,Trim]
generateSecrets generates the secrets for the API server This is a convenience method for creating a new SecretManager instance. This is a convenience method for creating a new object that represents a bunch of objects that can This is a constructor for a Secret object that can be used to create a new ClusterManager The default configuration for all secrets.
It's better to use a headless kubernetes service and the API server to talk to `etcd-{role}-0.etcd-{role}`
@@ -1765,9 +1765,11 @@ export class AmpA4A extends AMP.BaseElement { /** * Returns base object that will be written to cross-domain iframe name * attribute. + * @param {boolean=} optIsSafeframe Whether creative is rendering into + * a safeframe. * @return {!JsonObject} */ - getAdditionalContextMetadata() { + getAdditionalContextMetadata(optIsSafeframe) { return /** @type {!JsonObject} */ ({}); } }
[AmpA4A->[extractSize->[user,get,Number],tryExecuteRealTimeConfig_->[user,maybeExecuteRealTimeConfig],constructor->[AmpAdUIHandler,AmpAdXOriginIframeHandler,dev,generateSentinel,getBinaryTypeNumericalCode,getBinaryType,protectFunctionWrapper,now],tearDownSlot->[dev],renderViaNameAttrOfXOriginIframe_->[NAMEFRAME,utf8Decode,dev,checkStillCurrent,user,getContextMetadata,dict,getDefaultBootstrapBaseUrl,stringify,SAFEFRAME,reject,length],preconnectCallback->[forEach,getDefaultBootstrapBaseUrl],attemptToRenderCreative->[round,resolve,dev,checkStillCurrent,user,cancellation],maybeTriggerAnalyticsEvent_->[round,assign,triggerAnalyticsEvent],renderNonAmpCreative_->[then,NAMEFRAME,resolve,incrementLoadingAds,user,assertHttpsUrl,SAFEFRAME],viewportCallback->[setFriendlyIframeEmbedVisible],refresh->[dev,refreshEndCallback,timerFor,resourcesForDoc],buildCallback->[loadKeyset,round,signatureVerifierFor,AmpAdUIHandler,dev,insertAnalyticsElement,viewerForDoc],renderAmpCreative_->[round,setFriendlyIframeEmbedVisible,dev,whenIniLoaded,createElementWithAttributes,installFriendlyIframeEmbed,dict,body,protectFunctionWrapper,minifiedCreative,win,setStyle,customStylesheets,iframe,push,customElementExtensions,checkStillCurrent,getTimingDataAsync,installUrlReplacementsForEmbed],getAmpAdMetadata_->[customElementExtensions,isArray,dev,customStylesheets,isSecureUrl,slice,lastIndexOf,images,minifiedCreative,parseJson,length,isObject],onCrossDomainIframeCreated->[dev],verifyStillCurrent->[cancellation],sendXhrRequest->[xhrFor,dev,adUrl,frameGetDisabled],renderOutsideViewport->[is3pThrottled,getAmpAdRenderOutsideViewport],isLayoutSupported->[isLayoutSizeDefined],forceCollapse->[dev],renderViaCachedContentIframe_->[dict,stringify,getContextMetadata,xhrFor],registerAlpHandler_->[viewerForDoc,document,handleClick,isExperimentOn],iframeRenderHelper_->[AmpAdXOriginIframeHandler,dict,dev,protectFunctionWrapper,assign,createElementWithAttributes],shouldInitializePromiseChain_->[height,dev,user,FLUID,isAdPositionAllowed,width],populatePostAdResponseExperimentFeatures_->[dev,length,split],getSigningServiceNames->[getMode],initiateAdRequest->[signatureVerifierFor,status,href,dev,CRYPTO_UNAVAILABLE,getMode,images,headers,ERROR_SIGNATURE_MISMATCH,reject,CLIENT_CACHE,arrayBuffer,utf8Decode,byteLength,user,extensionsFor,customStylesheets,isSecureUrl,UNVERIFIED,OK,customElementExtensions,resolve,checkStillCurrent,tryDecodeUriComponent,ERROR_KEY_NOT_FOUND,preloadExtension,viewerForDoc],promiseErrorHandler_->[isCancellation,duplicateErrorIfNecessary,user,assignAdUrlToError,dev,ignoreStack,message,getMode,random],getNonAmpCreativeRenderingMethod->[dev,platformFor,isEnumValue,SAFEFRAME],resumeCallback->[isMeasureRequested,hasBeenMeasured],BaseElement],AD_RENDER_START,AD_REQUEST_START,dict,AD_RENDER_END,indexOf,apply,unshift,args,substring,AD_RESPONSE_END,AD_IFRAME_LOADED]
Returns additional context metadata.
You'll want to call this opt_isSafeframe, or linter will complain.
@@ -16,8 +16,10 @@ class WordStemmer(Registrable): """ default_implementation = 'pass_through' - def stem_word(self, word: str) -> str: - """Converts a word to its lemma""" + def stem_word(self, word: Token) -> Token: + """ + `Modifies` the ``text`` field in the input token, and also returns the same token. + """ raise NotImplementedError @classmethod
[PorterStemmer->[__init__->[NltkPorterStemmer],stem_word->[stem]],WordStemmer->[from_params->[by_name,assert_empty,list_available,pop_choice]],register]
Converts a word to its lemma.
looking at the two implementations below, one of them does return the same token, one of them doesn't. which is the correct behavior?
@@ -40,7 +40,14 @@ class Listings extends Component { this.state = { first: 12, search: getStateFromQuery(props), - sort: 'featured' + // currently sort is activated through hardcoding the sort state + // order = 'asc' or 'desc' + // I have made it possible to sort by other values but its not tested + // target: 'price.amount' is the current goal + // this will be updated to be user set when the front end is completed + // graphql is expecting string values so to disable use empty strings + sort: { target: '', order: '' } + // sort: { target: 'price.amount', order: 'asc' } } }
[No CFG could be retrieved]
Component that returns a list of all the listings in the system. XmlSerializer for the section.
Could we just have a var for 'sort' and another for 'order' instead of nesting those properties inside a 'sort' object?
@@ -583,6 +583,9 @@ class Var(SymbolNode, Statement): is_classmethod = False is_property = False is_settable_property = False + # Set to true when this variable refers to a module we were unable to + # parse for some reason (eg a silenced module) + is_import = False def __init__(self, name: str, type: 'mypy.types.Type' = None) -> None: self._name = name
[Decorator->[fullname->[fullname],serialize->[serialize],name->[name],deserialize->[Decorator,deserialize]],TypeVarExpr->[serialize->[serialize],deserialize->[TypeVarExpr,deserialize]],method_type_with_fallback->[function_type],NameExpr->[serialize->[serialize],deserialize->[deserialize,NameExpr]],Argument->[deserialize->[Argument,deserialize],serialize->[serialize],_initialization_statement->[name],set_line->[set_line]],SymbolNode->[deserialize->[deserialize]],ClassDef->[serialize->[serialize],is_generic->[is_generic],deserialize->[ClassDef,deserialize]],namedtuple_type_info->[TypeInfo],function_type->[name],SymbolTable->[serialize->[serialize],deserialize->[deserialize,SymbolTable]],OverloadedFuncDef->[serialize->[serialize],deserialize->[deserialize,OverloadedFuncDef],__init__->[set_line]],merge->[MroError],FuncDef->[serialize->[serialize],deserialize->[FuncDef,deserialize]],TypeInfo->[__repr__->[fullname],has_readable_member->[get],all_subtypes->[all_subtypes],serialize->[fullname,serialize],__getitem__->[get],get->[get],get_var_or_getter->[get_var],get_var_or_setter->[get_var],has_base->[fullname],deserialize->[TypeInfo,deserialize],_calculate_is_enum->[fullname],__str__->[fullname]],Var->[serialize->[serialize],deserialize->[Var,deserialize]],linearize_hierarchy->[direct_base_classes,fullname,linearize_hierarchy],FuncItem->[set_line->[set_line]],MypyFile->[serialize->[serialize],deserialize->[deserialize,MypyFile]],SymbolTableNode->[fullname->[fullname],serialize->[fullname,serialize],deserialize->[deserialize,SymbolTableNode]]]
Initialize a missing object.
Maybe rename this to is_suppressed_import or is_silent_import?
@@ -125,14 +125,13 @@ public class SocketClient extends AbstractSiteToSiteClient { } final EndpointConnection connectionState = pool.getEndpointConnection(direction, getConfig()); - if (connectionState == null) { - return null; - } final Transaction transaction; try { transaction = connectionState.getSocketClientProtocol().startTransaction( connectionState.getPeer(), connectionState.getCodec(), direction); + } catch (final NoContentException e) { + return null; } catch (final Throwable t) { pool.terminate(connectionState); throw new IOException("Unable to create Transaction to communicate with " + connectionState.getPeer(), t);
[SocketClient->[isSecure->[isSecure],createTransaction->[receive->[receive],error->[error],confirm->[confirm],getCommunicant->[getCommunicant],complete->[complete],cancel->[cancel],send->[send],getState->[getState],getPortIdentifier]]]
Create a new transaction in the specified transfer direction. Send a message to the server.
The previous closing curly bracket and this `catch` should be in the same line.
@@ -95,7 +95,7 @@ URLS_V1 = [ def api_response(result, status_code=HTTPStatus.OK): response = make_response(( - json.dumps(result), + json.dumps(result) if result else '', status_code, {'mimetype': 'application/json', 'Content-Type': 'application/json'} ))
[RestAPI->[connect->[api_error,api_response],get_connection_managers_info->[get_connection_managers_info,api_response],token_swap->[api_error,api_response,token_swap],_deposit->[deposit,api_error,api_response],close->[close,api_response],get_partners_by_token->[api_response,get_channel_list],open->[api_error,api_response,open],leave->[api_response],register_token->[register_token,api_error,api_response],initiate_transfer->[api_error,api_response],patch_channel->[_close,_settle,api_error,get_channel,_deposit],_settle->[api_error,api_response],deposit->[deposit,api_error,api_response],get_channel->[api_response,get_channel],get_our_address->[api_response],get_network_events->[normalize_events_list,get_network_events,api_response],_close->[close,api_error,api_response],get_channel_events->[get_channel_events,normalize_events_list,api_response],get_token_network_events->[normalize_events_list,get_token_network_events,api_response],get_tokens_list->[get_tokens_list,api_response],get_channel_list->[api_response,get_channel_list]],APIServer->[start->[start],stop->[stop],run->[run],__init__->[restapi_setup_type_converters,restapi_setup_urls]]]
Returns a response with a object.
This doesnt really fix the problem, if this function is called with `status_code=HTTPStatus.NO_CONTENT` the error will persist.
@@ -84,7 +84,7 @@ def test_remove_and_add(database, module_type): return rm_cli_args = ['rm', '-y', 'mpileaks'] - module_files = _module_files(module_type, 'mpileaks') + module_files = _module_files(module_type, 'mpileaks ^mpich') for item in module_files: assert os.path.exists(item)
[test_find_recursive->[split,module,len],test_find->[module],test_setdefault_command->[dirname,module_configuration,realpath,Spec,join,writer_cls,exists,islink,module],test_remove_and_add->[exists,_module_files,module],test_deprecated_command->[raises,module],test_find_fails_on_non_existing_packages->[module],ensure_module_files_are_there->[use_configuration,use_repo,SpackCommand,use_store,module],_module_files->[Spec,writer_cls],test_find_fails_on_multiple_matches->[module],test_exit_with_failure->[raises,module],test_find_recursive_blacklisted->[module,module_configuration],test_loads_recursive_blacklisted->[split,any,module_configuration,match,module],usefixtures,fixture,regression,SpackCommand,parametrize]
Tests removing and adding a tcl module file.
why's the extra `^mpich` needed?
@@ -530,7 +530,7 @@ namespace Pulumi.Automation /// <summary> /// Gets the current set of Stack outputs from the last <see cref="UpAsync(UpOptions?, CancellationToken)"/>. /// </summary> - private async Task<ImmutableDictionary<string, OutputValue>> GetOutputAsync(CancellationToken cancellationToken) + public async Task<ImmutableDictionary<string, OutputValue>> GetOutputAsync(CancellationToken cancellationToken = default) { await this.Workspace.SelectStackAsync(this.Name).ConfigureAwait(false);
[WorkspaceStack->[GetConfigValueAsync->[GetConfigValueAsync],Dispose->[Dispose],GetConfigAsync->[GetConfigAsync],RefreshConfigAsync->[RefreshConfigAsync],InlineLanguageHost->[ValueTask->[Dispose],GetPortAsync->[Task]]]]
Asynchronously gets the output of the stack.
Do we want to keep the singular name `GetOutputAsync` or use plural `GetOutputsAsync`? Other APIs have `outputs`
@@ -93,9 +93,9 @@ func Home(ctx *context.Context) { } var opts = models.FindOrgMembersOpts{ - OrgID: org.ID, - PublicOnly: true, - Limit: 25, + OrgID: org.ID, + PublicOnly: true, + ListOptions: models.ListOptions{PageSize: 25}, } if ctx.User != nil {
[CountOrgMembers,HTML,Error,SetParams,NotFound,SetDefaultParams,HandleOrgAssignment,HasOrgVisible,SearchRepository,ServerError,Written,IsUserSiteAdmin,DisplayName,IsOrgMember,Query,FindOrgMembers,QueryInt,NewPagination,Params,Trim]
Show all the items in the system that match the given keyword. count - Count of users in repo.
Without setting the Page here that limit does nothing.
@@ -4399,7 +4399,7 @@ class Graph(object): for controller in self._control_dependencies_stack: controller.add_op(op) - def control_dependencies(self, control_inputs): + def control_dependencies(self, *control_inputs): """Returns a context manager that specifies control dependencies. Use with the `with` keyword to specify that all operations constructed
[Graph->[_attr_scope->[type],_create_op_from_tf_operation->[_check_not_finalized,Operation],create_op->[_check_not_finalized,_name_from_scope_name,Operation,_NodeDef],_record_op_seen_by_control_dependencies->[add_op],_apply_device_functions->[_set_device],name_scope->[__exit__->[],__enter__->[],_name_from_scope_name],_as_graph_element_locked->[type,_as_graph_element],_add_new_tf_operations->[_control_flow_post_processing,_create_op_from_tf_operation,_add_control_inputs],_create_op_helper->[type,set_shapes_for_outputs,_set_device,value,get_attr,_set_attr,colocation_groups,_add_op],add_to_collection->[_check_not_finalized],_next_id->[_check_not_finalized],get_operation_by_name->[type,as_graph_element],add_to_collections->[add_to_collection],clear_collection->[_check_not_finalized],_ControlDependenciesController->[__exit__->[_set_control_flow_context],__enter__->[_set_control_flow_context,_get_control_flow_context]],_as_graph_def->[get_shape,_copy_functions_to_graph_def],as_graph_def->[_as_graph_def],__init__->[uid],get_tensor_by_name->[type,as_graph_element],_add_op->[_check_not_finalized],control_dependencies->[_current_control_dependencies,_ControlDependenciesController,as_graph_element],_get_tensor_by_tf_output->[_get_operation_by_tf_operation],_control_dependencies_for_inputs->[op_in_group],colocate_with->[internal_convert_to_tensor_or_indexed_slices],get_operations->[values],_get_operation_by_tf_operation->[_get_operation_by_name_unsafe],_copy_functions_to_graph_def->[values]],default_session->[get_controller],eager_run->[run,enable_eager_execution],get_collection_ref->[get_default_graph],internal_convert_n_to_tensor->[internal_convert_to_tensor],_create_c_op->[_as_tf_output],_DefaultStack->[get_controller->[type]],init_scope->[control_dependencies,_GetGlobalDefaultGraph,get_name_scope,name_scope],name_scope->[__exit__->[__exit__],__enter__->[_get_graph_from_inputs,as_default,__exit__,__enter__,name_scope]],_eval_using_default_session->[run,get_default_session],get_gradient_function->[get_attr],_get_graph_from_inputs->[type,get_default_graph,_assert_same_graph,_as_graph_element],add_to_collection->[get_default_graph],add_to_collections->[get_default_graph],_DefaultGraphStack->[_GetGlobalDefaultGraph->[Graph]],_set_shapes_for_outputs_c_api->[set_shape,_as_tf_output],get_default_session->[get_default],get_name_scope->[get_default_graph],enable_eager_execution->[type],reset_default_graph->[reset,is_cleared],Tensor->[__repr__->[get_shape],_override_operator->[_override_helper],_shape_tuple->[_shape_as_list],__init__->[uid],__str__->[get_shape]],get_collection->[get_default_graph],convert_n_to_tensor->[internal_convert_n_to_tensor],internal_convert_to_tensor->[_error_prefix],_run_using_default_session->[run,get_default_session],get_default_graph->[get_default],IndexedSlices->[__neg__->[IndexedSlices]],control_dependencies->[_NullContextmanager],set_shapes_for_outputs->[_set_shapes_for_outputs,_set_shapes_for_outputs_c_api],_EagerTensorBase->[__repr__->[numpy_text],__float__->[numpy],cpu->[_copy],__int__->[numpy],_copy->[grad_fun->[_copy],_copy_to_device],shape->[_shape_tuple],_shape_as_list->[_shape_tuple],__array__->[numpy],__nonzero__->[__bool__],gpu->[_copy],__bool__->[_shape_tuple,cpu],__format__->[numpy],__str__->[numpy_text]],Operation->[_add_control_input->[_add_control_inputs],_set_device->[_device_string],_update_input->[_add_consumer,_tf_input,_as_tf_output],_add_input->[_add_consumer],__init__->[_add_consumer,_create_c_op,Tensor],_recompute_node_def->[_as_node_def_input],_control_inputs->[_remove_all_control_inputs,_add_control_inputs],_input_types->[_tf_input],inputs->[_InputList]],colocate_with->[_NullContextmanager,device,internal_convert_to_tensor,colocate_with],internal_convert_to_tensor_or_indexed_slices->[internal_convert_to_tensor],op_scope->[name_scope],get_all_collection_keys->[get_default_graph],_NodeDef->[device,_device_string],internal_convert_n_to_tensor_or_indexed_slices->[internal_convert_to_tensor_or_indexed_slices],get_stats_for_node_def->[OpStats],device->[device],_set_shapes_for_outputs->[set_shape],convert_n_to_tensor_or_indexed_slices->[internal_convert_n_to_tensor_or_indexed_slices],_DefaultGraphStack,_DefaultStack,register_dense_tensor_like_type,register_tensor_conversion_function]
Records that a given op has been seen by all registered control dependencies. Returns a context manager that can be used to manage all control dependencies of all operations in the.
The docstring for control_inputs could use an update :) Also, doesn't this break the behavior of `tf.control_dependencies(None)`?
@@ -69,7 +69,7 @@ class CsDhcp(CsDataBag): if not self.cl.is_redundant() or self.cl.is_master(): if restart_dnsmasq: - CsHelper.service("dnsmasq", "restart") + CsHelper.service("dnsmasq", "reload") else: CsHelper.start_if_stopped("dnsmasq") CsHelper.service("dnsmasq", "reload")
[CsDhcp->[process->[CsFile,get_device_info,write_hosts,preseed,is_master,service,configure_server,is_redundant,add,delete_leases,commit,start_if_stopped,repopulate],write_hosts->[CsFile,debug,info,add,is_changed,commit,repopulate],preseed->[add_host,address,get_hostname,is_router],configure_server->[get_domain,search,is_vpc,get_netmask,get_dns,get_gateway,str,CsGuestNetwork,i,join],del_host->[pop],add->[randint,IPAddress,entry,add,add_host],delete_leases->[split,error,append,str,del_host,info,open,execute]]]
Process the nanomsg file and populate hosts and dhcp options. This function returns DHCP - option - tag line.
@andrijapanicsb "reload" works only if the service is started. this is why we have line 74
@@ -96,13 +96,13 @@ public class PostProjectAnalysisTasksExecutor implements ComputationStepExecutor return; } - ProjectAnalysis projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED); + ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED); for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) { executeTask(projectAnalysis, postProjectAnalysisTask); } } - private static void executeTask(ProjectAnalysis projectAnalysis, PostProjectAnalysisTask postProjectAnalysisTask) { + private static void executeTask(ProjectAnalysisImpl projectAnalysis, PostProjectAnalysisTask postProjectAnalysisTask) { try { postProjectAnalysisTask.finished(projectAnalysis); } catch (Exception e) {
[PostProjectAnalysisTasksExecutor->[getAnalysisDate->[getAnalysisDate],executeTask->[finished]]]
This method is called when all the tasks in the chain have been executed.
let `getAnalysisDate()` be `getAnalysis()` and save the ugly mutable variable
@@ -850,7 +850,13 @@ static void getTileInfo( layer.material_flags |= MATERIAL_FLAG_BACKFACE_CULLING; } - if (!data->m_smooth_lighting) { + bool fullbright = g_settings->getBool("fullbright_mode"); + + // fullbright mode (solid nodes) + if (fullbright) { + // full artificial light, no sunlight + lights[0] = lights[1] = lights[2] = lights[3] = 0xff00; + } else if (!data->m_smooth_lighting) { lights[0] = lights[1] = lights[2] = lights[3] = getFaceLight(n0, n1, face_dir, ndef); } else {
[getInteriorLight->[getInteriorLight], m_minimap_mapblock->[final_color_blend,get_sunlight_color],void->[getNodeTile,getSmoothLightSolid,getFaceLight],final_color_blend->[final_color_blend,get_sunlight_color],getNodeTile->[getNodeTileN],getFaceLight->[getFaceLight],fill->[fillBlockDataBegin,fillBlockData],animate->[final_color_blend,get_sunlight_color]]
Get the tile info for a specific node in a mesh. lights are the lights that are in the smooth light.
This setting getter is too slow to use for every tile, fullbright mode needs to be cached in a bool.
@@ -246,7 +246,8 @@ int ca_main(int argc, char **argv) const char *serialfile = NULL, *subj = NULL; char *prog, *startdate = NULL, *enddate = NULL; char *dbfile = NULL, *f, *randfile = NULL; - char buf[3][BSIZE]; + char new_cert[BSIZE] = { 0 }; + char tmp[10 + 1] = "\0"; char *const *pp; const char *p; int create_ser = 0, free_key = 0, total = 0, total_done = 0;
[No CFG could be retrieved]
A configuration file with the following methods - signature - signature - signature - signature - signature - The main entry point for the ca_main.
interesting, might be better to put put `new_cert[0] == '\0';` at the start of the code, because this requires the whole certs be initialized. maybe.
@@ -10,7 +10,7 @@ module Settings setting( :community_name, type: :string, - default: ApplicationConfig["COMMUNITY_NAME"] || "New Forem", + default: ApplicationConfig["COMMUNITY_NAME"], validates: { format: { with: /\A[^[<|>]]+\Z/,
[Community->[setting,year,table_name]]
The Community class.
We remove "New Forem", because when a creator signs up and then goes through their onboarding, we want to show them a blank field or what was set by their `ApplicationConfig["COMMUNITY_NAME"]`. "New Forem" no longer makes sense with this onboarding step at the start.
@@ -62,3 +62,12 @@ class FilterInputObjectType(InputObjectType): field_type.kwargs = kwargs args[name] = field_type return args + + +class ChannelFilterInputObjectType(FilterInputObjectType): + channel = Argument( + String, description="Specifies the channel in which to sort the data.", + ) + + class Meta: + abstract = True
[FilterInputObjectType->[__init_subclass_with_meta__->[update,get_filtering_args_from_filterset,super,yank_fields_from_attrs,InputObjectTypeOptions],get_filtering_args_from_filterset->[dict,getattr,convert_form_field,get_filterset_class,items]]]
Retrieve the filtering arguments from a FilterSet and produce the arguments to pass to a Graphene.
I am not sure if the description is correct. Maybe something like that: `Specifies the channel by which the data should be sorted.`
@@ -245,7 +245,12 @@ function jetpack_og_get_image( $width = 200, $height = 200, $max_images = 4 ) { $image['src'] = jetpack_site_icon_url( null, '512' ); } - // Fourth fall back, blank image + // Fourth fall back, Core Site Icon. Added in WP 4.3. + if ( empty( $image ) && ( function_exists( 'has_site_icon') && has_site_icon() ) ) { + $image['src'] = get_site_icon_url( null, '512' ); + } + + // Finally fall back, blank image if ( empty( $image ) ) { $image['src'] = apply_filters( 'jetpack_open_graph_image_default', 'https://s0.wp.com/i/blank.jpg' ); }
[No CFG could be retrieved]
Get an image from Facebook This function is used to display the avatar of a site. Filter for Jetpack Open Graph Image.
I think this could be done a bit better since jetpack_site_icon_url and get_site_icon_url should always return the same thing. We can do a check if one exist and it it doesn't call the `jetpack_site_icon_url` function.
@@ -38,6 +38,8 @@ public class FileStructure { COVERAGES("coverages-", Domain.PB), TESTS("tests-", Domain.PB), COVERAGE_DETAILS("coverage-details-", Domain.PB), + FILE_DEPENDENCIES("file-dependencies-", Domain.PB), + MODULE_DEPENDENCIES("module-dependencies-", Domain.PB), SOURCE("source-", ".txt"); private static final String PB = ".pb";
[FileStructure->[metadataFile->[File],fileFor->[File],exists,IllegalArgumentException,isDirectory]]
FileStructure provides a FileStructure for the given analysis report. Returns the file for the metadata. pb file for the given domain and component reference.
where are the writer/reader for module dependencies ?
@@ -23,6 +23,7 @@ from ..product.utils import ( increase_stock, ) from ..product.utils.digital_products import get_default_digital_content_settings +from ..shipping.utils import applicable_shipping_methods from . import events
[order_needs_automatic_fullfilment->[order_line_needs_automatic_fulfillment],cancel_fulfillment->[update_order_status],update_order_prices->[recalculate_order],automatically_fulfill_digital_lines->[order_line_needs_automatic_fulfillment,fulfill_order_line]]
Check if given line is digital and should be automatically fulfilled.
I know that we are doing that in almost all part of code but... Personally - I don't think that we should mixin and import utils file from different modules. It generates circular imports (now or in the future), utils file should contain only helper logic used only internally in the base module. If you need to import utils's function in a different module it means that the function shouldn't be in the `utils` file
@@ -19,7 +19,6 @@ package org.apache.beam.sdk.loadtests; import static java.lang.String.format; import static java.util.Optional.empty; -import static java.util.Optional.of; import java.io.IOException; import java.util.List;
[JobFailure->[lookForMetricResultFailure->[JobFailure],lookForInvalidState->[JobFailure]]]
Creates a new object. Look for failure in pipeline.
Could you make this change for `Optional.empty` and `String.format` as well? It looks like the `BadImport` check doesn't catch these, but they're in the same spirit.
@@ -106,6 +106,12 @@ class Command(object): if options.exists_action: os.environ['PIP_EXISTS_ACTION'] = ''.join(options.exists_action) + if not ssl and options.allow_no_ssl: + os.environ['PIP_ALLOW_NO_SSL'] = '1' + + if options.cert_path: + os.environ['PIP_CERT_PATH'] = options.cert_path + if options.require_venv: # If a venv is required check if it can really be found if not os.environ.get('VIRTUAL_ENV'):
[Command->[_copy_option_group->[_copy_options],main->[setup_logging,merge_options]]]
Entry point for the command line interface. This function stores the complete log in a file.
Why do we need to set an environment variable here? The option system in pip was built so it can accept env vars and pass them as part of the optparse options to the functions that need it, not the other way around.
@@ -720,7 +720,7 @@ namespace System.Windows.Forms // Region visualRegion = new Region(visualRectangle); - if (_links != null && _links.Count == 1) + if (_links is not null && _links.Count == 1) { Links[0].VisualRegion = visualRegion; }
[LinkLabel->[PerformClick->[OnLinkClicked,LinkInText,ConvertToCharIndex],OnFontChanged->[InvalidateLinkFonts,InvalidateTextLayout,OnFontChanged],AdjustCharacterRangesForSurrogateChars->[ConvertToCharIndex],PaintLink->[IsOneLink],OnAutoEllipsisChanged->[InvalidateTextLayout,OnAutoEllipsisChanged],Link->[EnsureRun],EnsureRun->[ConvertToCharIndex],OnMouseLeave->[OnMouseLeave,InvalidateLink],OnMouseUp->[OnMouseUp,InvalidateLink],OnPaintBackground->[OnPaintBackground],SetBoundsCore->[InvalidateTextLayout,SetBoundsCore],InvalidateLink->[IsOneLink],OnGotFocus->[OnGotFocus,InvalidateLink],OnMouseDown->[OnMouseDown,InvalidateLink],OnPaddingChanged->[InvalidateTextLayout,OnPaddingChanged],OnLostFocus->[InvalidateLink,OnLostFocus],OnPaint->[EnsureRun,IsOneLink],OnTextChanged->[InvalidateTextLayout,OnTextChanged],OnEnabledChanged->[InvalidateTextLayout,OnEnabledChanged],OnTextAlignChanged->[OnTextAlignChanged,InvalidateTextLayout],ResetLinkColor->[InvalidateLink],Select->[GetNextLinkIndex,Select],CreateHandle->[CreateHandle],GetNextLinkIndex->[LinkInText,ConvertToCharIndex],OnKeyDown->[OnKeyDown],OnMouseMove->[OnMouseMove,InvalidateLink],ProcessDialogKey->[ProcessDialogKey],UpdateSelectability->[LinkInText,ConvertToCharIndex],OnAutoSizeChanged->[InvalidateTextLayout,OnAutoSizeChanged],WndProc->[WmSetCursor,WndProc],InvalidateTextLayout]]
Checks if the label has not been run. Private functions - TextLayout and TextFormat.
Cant ``_links.Count == 1`` be changed to ``_links.Count is 1``?
@@ -45,6 +45,14 @@ class SearchServiceClient(HeadersMixin): # pylint: disable=too-many-public-meth def __init__(self, endpoint, credential, **kwargs): # type: (str, AzureKeyCredential, **Any) -> None + try: + if endpoint.lower().startswith('http') and not endpoint.lower().startswith('https'): + raise ValueError("Endpoint should be secure. Use https.") + if not endpoint.lower().startswith('http'): + endpoint = "https://" + endpoint + except AttributeError: + raise ValueError("Endpoint must be a string") + self._endpoint = endpoint # type: str self._credential = credential # type: AzureKeyCredential self._client = _SearchServiceClient(
[SearchServiceClient->[close->[close],__exit__->[__exit__],__enter__->[__enter__],get_service_statistics->[get_service_statistics]]]
Initializes the object with the specified endpoint and credential.
Is there scenario were a valid endpoint would not start by `http`?
@@ -7,10 +7,18 @@ * https://www.openssl.org/source/license.html */ +#include <string.h> #include <openssl/core_names.h> #include <openssl/params.h> +#include <openssl/evp.h> +#include "internal/sizes.h" +#include "internal/param_build_set.h" #include "crypto/rsa.h" +#ifdef _WIN32 +# define strcasecmp _stricmp +#endif + /* * The intention with the "backend" source file is to offer backend support * for legacy backends (EVP_PKEY_ASN1_METHOD and EVP_PKEY_METHOD) and provider
[No CFG could be retrieved]
region Private Encryption Functions END of function RSA_set_key.
Why are you doing this when it is defined somewhere already?
@@ -394,13 +394,13 @@ public class FunctionRegistry .maximumSize(1000) .expireAfterWrite(1, HOURS) .build(CacheLoader.from(key -> ((SqlScalarFunction) key.getFunction()) - .specialize(key.getBoundVariables(), key.getArity(), typeManager, this))); + .specialize(key.getBoundVariables(), key.getArity(), metadata.getTypeManager(), this))); specializedAggregationCache = CacheBuilder.newBuilder() .maximumSize(1000) .expireAfterWrite(1, HOURS) .build(CacheLoader.from(key -> ((SqlAggregationFunction) key.getFunction()) - .specialize(key.getBoundVariables(), key.getArity(), typeManager, this))); + .specialize(key.getBoundVariables(), key.getArity(), metadata.getTypeManager(), this))); specializedWindowCache = CacheBuilder.newBuilder() .maximumSize(1000)
[FunctionRegistry->[resolveOperator->[resolveFunction],getCoercion->[getCoercion,getScalarFunctionImplementation],MagicLiteralFunction->[specialize->[isDeterministic]],ApplicableFunction->[toString->[toString]],isRegistered->[getScalarFunctionImplementation],FunctionMap->[get->[get]],list->[list],selectMostSpecificFunctions->[selectMostSpecificFunctions],mangleOperatorName->[mangleOperatorName]]]
Construct a cache for the specialized functions. window for missing values.
Why not keep the field? The code and diff would be smaller.
@@ -137,7 +137,7 @@ namespace System.Diagnostics var getRequestedDataUsingContext = listener.GetRequestedDataUsingContext; if (getRequestedDataUsingContext != null) { - ActivityCreationOptions<ActivityContext> aco = new ActivityCreationOptions<ActivityContext>(this, name, context, kind, tags, links); + ActivityCreationOptions<ActivityContext> aco = new ActivityCreationOptions<ActivityContext>(this, name, context == default && Activity.Current != null ? Activity.Current.Context : context, kind, tags, links); ActivityDataRequest dr = getRequestedDataUsingContext(ref aco); if (dr > dateRequest) {
[ActivitySource->[StartActivity->[StartActivity]],SynchronizedList->[AddIfNotExist->[Add],Add->[Add],Remove->[Remove]]]
Start an activity. returns a that can be used to identify the activity.
Fetching Activity.Current has some perf overhead to access the async-local. Rather than repeat this operation twice per listener I'd recommend computing the parent Activity and context once outside the loop and then reusing it for all the listener callbacks and passing it into CreateAndStart. As a sidenote not for this PR we still need to spend some more time analyzing performance. I notice the current implementation is going to allocate a closure for the delegate we are passing to EnumWithFunc and the goal is to make sampled-out Activities be a zero-allocation code-path.
@@ -385,6 +385,7 @@ export class AmpList extends AMP.BaseElement { */ ssrTemplate_(refresh) { let request; + assertHttpsUrl(this.element.getAttribute('src'), this.element); // Construct the fetch init data that would be called by the viewer // passed in as the 'originalRequest'. return requestForBatchFetch(
[No CFG could be retrieved]
Provides a way to handle the case where a list of items has been rendered and the fallback Get the items of the element.
Why is this line necessary? In what cases is the URL not HTTPS?
@@ -75,16 +75,12 @@ def spec(parser, args): print(spec.to_yaml()) continue - # Print some diagnostic info by default. + kwargs['hashes'] = False # Always False for input spec print("Input spec") print("--------------------------------") print(spec.tree(**kwargs)) - print("Normalized") - print("--------------------------------") - spec.normalize() - print(spec.tree(**kwargs)) - + kwargs['hashes'] = args.long or args.very_long print("Concretized") print("--------------------------------") spec.concretize()
[spec->[normalize,print,concretize,parse_specs,tree,to_yaml],setup_parser->[add_common_arguments,add_argument]]
Parse input and print a single .
I think this is good but it would make sense to move the above setting: `'hashes': False, # Always false for input spec` Into this loop as well. That is clearer and I think that without that the hashes will be printed for the next input spec.
@@ -33,13 +33,16 @@ import io.confluent.ksql.parser.tree.Sink; import io.confluent.ksql.serde.SerdeOption; import io.confluent.ksql.util.AggregateExpressionRewriter; import io.confluent.ksql.util.KsqlException; +import io.confluent.ksql.util.TableFunctionExpressionRewriter; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +// CHECKSTYLE_RULES.OFF: ClassDataAbstractionCoupling public class QueryAnalyzer { + // CHECKSTYLE_RULES.ON: ClassDataAbstractionCoupling private final Analyzer analyzer; private final MetaStore metaStore;
[QueryAnalyzer->[analyzeAggregate->[processSelectExpressions,AggregateAnalyzer,processHavingExpression,enforceAggregateRules,AggregateExpressionRewriter,isEmpty,getDefaultArgument,collect,joining,MutableAggregateAnalysis,KsqlException,ifPresent,processGroupByExpression],processSelectExpressions->[rewriteWith,getExpression,getSelectExpressions,processSelect,addFinalSelectExpression],processHavingExpression->[processHaving,rewriteWith,setHavingExpression],enforceAggregateRules->[getGroupByExpressions,difference,toList,isPresent,copyOf,getAggregateSelectFields,isEmpty,collect,KsqlException,getNonAggregateHavingFields],analyze->[validate,isStatic,analyze],processGroupByExpression->[getGroupByExpressions,processGroupBy],StaticQueryValidator,Analyzer,ContinuousQueryValidator,requireNonNull]]
Method to import all components of a class from a base class. Construct a new instance of the SerdeOptions class.
Rather than disable the coupling check, would it be possible to refactor this class so that there is less coupling? e.g. would it make sense to move some of the logic in `analyzeAggregate` into the `AggregateAnalyzer` class? (No idea, that's just a thought).
@@ -91,14 +91,12 @@ public class LogoutResourceIT { private WebTestClient webTestClient; <%_ } _%> - private OidcIdToken idToken; - @BeforeEach public void before() <% if (!reactive) { %>throws Exception <% } %>{ Map<String, Object> claims = new HashMap<>(); claims.put("groups", Collections.singletonList("ROLE_USER")); claims.put("sub", 123); - this.idToken = new OidcIdToken(ID_TOKEN, Instant.now(), Instant.now().plusSeconds(60), claims); + OidcIdToken idToken = new OidcIdToken(ID_TOKEN, Instant.now(), Instant.now().plusSeconds(60), claims); <%_ if (!reactive) { _%> SecurityContextHolder.getContext().setAuthentication(authenticationToken(idToken));
[No CFG could be retrieved]
A LogoutResource is a REST controller that logs out the user. It is a login resource Logout of the current session.
idToken is used in getLogoutInformation method
@@ -90,7 +90,9 @@ public class SparkInterpreter extends Interpreter { "spark", SparkInterpreter.class.getName(), new InterpreterPropertyBuilder() - .add("spark.app.name", "Zeppelin", "The name of spark application.") + .add("spark.app.name", + getSystemDefault("SPARK_APP_NAME", "spark.app.name", "Zeppelin"), + "The name of spark application.") .add("master", getSystemDefault("MASTER", "spark.master", "local[*]"), "Spark master uri. ex) spark://masterhost:7077")
[SparkInterpreter->[getProgress->[getJobGroup],interpretInput->[toString,interpret],getSQLContext->[useHiveContext,getSparkContext],getProgressFromStage_1_0x->[getProgressFromStage_1_0x],getProgressFromStage_1_1x->[getProgressFromStage_1_1x],close->[close],cancel->[getJobGroup],open->[getDependencyResolver,getSQLContext,getDepInterpreter,getSparkContext],interpret->[getJobGroup,interpret]]]
Creates a spark interpreter for Zeppelin. The object that holds the context of the current query.
Why not set spark.app.name from the interpreter config?
@@ -644,8 +644,8 @@ func (ur *UpResult) GetPermalink() (string, error) { // GetPermalink returns the permalink URL in the Pulumi Console for the update // or refresh operation. This will error for alternate, local backends. func GetPermalink(stdout string) (string, error) { - const permalinkSearchStr = "View Live: " - var startRegex = regexp.MustCompile(permalinkSearchStr) + const permalinkSearchStr = "View Live: |Permalink: " + startRegex := regexp.MustCompile(permalinkSearchStr) var endRegex = regexp.MustCompile("\n") // Find the start of the permalink in the output.
[RefreshConfig->[Name,Workspace,RefreshConfig],Destroy->[Name,Workspace],Info->[Name,Workspace],Refresh->[Name,Workspace],SetConfig->[Name,Workspace,SetConfig],Cancel->[Name,Workspace],History->[Name,Workspace],Outputs->[Name,Workspace],Up->[Name,Workspace],RemoveConfig->[Name,Workspace,RemoveConfig],SetAllConfig->[Name,Workspace,SetAllConfig],Preview->[Name,Workspace],GetConfig->[Name,Workspace,GetConfig],runPulumiCmdSync->[Name,Workspace],GetAllConfig->[Name,Workspace,GetAllConfig],Run->[GetConfig],RemoveAllConfig->[RemoveAllConfig,Workspace,Name],Import->[Name,Workspace],Export->[Name,Workspace]]
GetPermalink returns the permalink URL for the given expected state transition. Op is the kind of operation being performed.
`View Live:` is only relevant to app.pulumi.com backend. Other backends fail to get the permalink.
@@ -363,11 +363,11 @@ namespace System.Xml.Schema } if (!v1Compat) { - if (o1._targetNamespace == string.Empty) + if (o1._targetNamespace.Length == 0) { // clause 6 - o1 is negation of absent nslist = o2.Clone(); } - else if (o2._targetNamespace == string.Empty) + else if (o2._targetNamespace.Length == 0) { //clause 6 - o1 is negation of absent nslist = o1.Clone(); }
[NamespaceListV1Compat->[Allows->[Allows]],NamespaceList->[ToString->[ToString],Allows->[Allows]]]
Intersection method. if o1 is absent or o2 is expressible return a clone of o1.
Same question for these; how do we know they're non-null?
@@ -1317,8 +1317,10 @@ public class ElasticsearchIO { spec.getTypeFn() != null ? spec.getTypeFn().apply(parsedDocument) : null, spec.getIdFn() != null ? spec.getIdFn().apply(parsedDocument) : null, spec.getUsePartialUpdate() ? DEFAULT_RETRY_ON_CONFLICT : null); + SimpleModule module = new SimpleModule(); + module.addSerializer(DocumentMetadata.class, new DocumentMetadataSerializer()); + OBJECT_MAPPER.registerModule(module); return OBJECT_MAPPER.writeValueAsString(metadata); - } else { return "{}"; // use configuration and auto-generated document IDs }
[ElasticsearchIO->[ConnectionConfiguration->[withSocketAndRetryTimeout->[build],create->[build],withKeystorePath->[build],populateDisplayData->[isTrustSelfSignedCerts,getUsername,getType,getConnectTimeout,getKeystorePath,getIndex,getSocketAndRetryTimeout],withUsername->[build],withPassword->[build],withTrustSelfSignedCerts->[build],createClient->[customizeRequestConfig->[setConnectTimeout,getConnectTimeout,getSocketAndRetryTimeout],isTrustSelfSignedCerts,builder,getUsername,getKeystorePassword,getKeystorePath,build,getSocketAndRetryTimeout,getAddresses,getPassword],withConnectTimeout->[build],withKeystorePassword->[build]],Write->[withMaxBatchSizeBytes->[build],expand->[getConnectionConfiguration],withConnectionConfiguration->[build],withRetryConfiguration->[build],withTypeFn->[build],withIdFn->[build],withIndexFn->[build],withMaxBatchSize->[build],WriteFn->[handleRetry->[test],flushBatch->[test,checkForErrors,getType,getUsePartialUpdate,getIndex,getRetryConfiguration],closeClient->[close],processElement->[getMaxBatchSize,getMaxBatchSizeBytes,getDocumentMetadata,getUsePartialUpdate],setup->[createClient,getConnectionConfiguration,getRetryConfiguration,getMaxDuration],getDocumentMetadata->[getIdFn,getTypeFn,getUsePartialUpdate,getIndexFn,DocumentMetadata]],withUsePartialUpdate->[build]],getBackendVersion->[createClient,parseResponse],checkForErrors->[parseResponse],Read->[withBatchSize->[build],withScrollKeepalive->[build],expand->[getConnectionConfiguration],withConnectionConfiguration->[build],populateDisplayData->[getBatchSize,populateDisplayData,getScrollKeepalive,getQuery,isWithMetadata],withQuery->[withQuery,build],withMetadata->[build]],RetryConfiguration->[DefaultRetryPredicate->[test->[errorCodePresent],errorCodePresent->[parseResponse]],withRetryPredicate->[build],create->[build]],BoundedElasticsearchSource->[getEstimatedSizeBytes->[getType,getConnectionConfiguration,getIndex,getQuery,createClient],split->[getConnectionConfiguration,BoundedElasticsearchSource],populateDisplayData->[populateDisplayData],validate->[validate],queryCount->[parseResponse],getStats->[createClient,parseResponse,getIndex]],BoundedElasticsearchReader->[start->[getBatchSize,getType,getScrollKeepalive,parseResponse,getIndex,getQuery,createClient],close->[close],advance->[parseResponse,getScrollKeepalive,updateScrollId],readNextBatchAndReturnFirstDocument->[isWithMetadata]]]]
Returns the document metadata if the document is not yet parsed.
This shouldn't be done **here**, should it?
@@ -406,7 +406,9 @@ public class ActiveMQActivationSpec extends ConnectionFactoryProperties implemen } else if ("AUTO_ACKNOWLEDGE".equalsIgnoreCase(value) || "Auto-acknowledge".equalsIgnoreCase(value)) { acknowledgeMode = Session.AUTO_ACKNOWLEDGE; } else { - throw new IllegalArgumentException("Unsupported acknowledgement mode " + value); + final String message = "Unsupported acknowledgement mode " + value; + logger.warn(message); + throw new IllegalArgumentException(message); } }
[ActiveMQActivationSpec->[getJndiParams->[getJndiParams],equals->[equals],isUseJNDI->[isUseJNDI],setDestinationLookup->[setDestination,setUseJNDI],validate->[isSubscriptionDurable],hashCode->[hashCode],getSetupAttempts->[getSetupAttempts],getParsedJndiParams->[getParsedJndiParams],getSetupInterval->[getSetupInterval],toString->[getAcknowledgeMode,toString],getDestinationLookup->[getDestination],getPassword->[getPassword]]]
Sets the acknowledge mode.
As warn requires a registered logger and logger code