content_type stringclasses 8
values | main_lang stringclasses 7
values | message stringlengths 1 50 | sha stringlengths 40 40 | patch stringlengths 52 962k | file_count int64 1 300 |
|---|---|---|---|---|---|
Javascript | Javascript | use factory superclass in route#model hook | ba1c747b668e23c2fca0e19970f3a042a375a399 | <ide><path>packages/ember-routing/lib/system/route.js
<ide> Ember.Route = Ember.Object.extend({
<ide> if (!name && sawParams) { return params; }
<ide> else if (!name) { return; }
<ide>
<del> var modelClass = this.container.lookupFactory('model:' + name);
<add> var modelClass = this.container.lookupFactory('model:' + name).superclass;
<ide> var namespace = get(this, 'router.namespace');
<ide>
<ide> Ember.assert("You used the dynamic segment " + name + "_id in your router, but " + namespace + "." + classify(name) + " did not exist and you did not override your route's `model` hook.", modelClass);
<ide><path>packages/ember-routing/tests/system/route_test.js
<ide> test("default model utilizes the container to acquire the model factory", functi
<ide>
<ide> post = {};
<ide>
<del> Post = {
<add> Post = Ember.Object.extend();
<add> Post.reopenClass({
<ide> find: function(id) {
<ide> return post;
<ide> }
<del> };
<add> });
<ide>
<ide> container = {
<ide> lookupFactory: lookupFactory
<ide> test("default model utilizes the container to acquire the model factory", functi
<ide> function lookupFactory(fullName) {
<ide> equal(fullName, "model:post", "correct factory was looked up");
<ide>
<del> return Post;
<add> return Post.extend();
<ide> }
<ide>
<ide> });
<ide> test("controllerFor uses route's controllerName if specified", function() {
<ide> routeOne.controllerName = 'test';
<ide>
<ide> equal(routeTwo.controllerFor('one'), testController);
<del>});
<ide>\ No newline at end of file
<add>});
<ide><path>packages/ember/tests/routing/basic_test.js
<ide> test("currentRouteName is a property installed on ApplicationController that can
<ide> transitionAndCheck('each.other', 'be.excellent.to.each.other', 'each.other');
<ide> });
<ide>
<add>test("Route model hook finds the same model as a manual find", function() {
<add> var Post;
<add> App.Post = Ember.Object.extend();
<add> App.Post.reopenClass({
<add> find: function() {
<add> Post = this;
<add> return {};
<add> }
<add> });
<add>
<add> Router.map(function() {
<add> this.route('post', { path: '/post/:post_id' });
<add> });
<add>
<add> bootApplication();
<add>
<add> handleURL('/post/1');
<add>
<add> equal(App.Post, Post);
<add>}); | 3 |
Javascript | Javascript | use a runtime module to inject system polyfill | bfb9df6c20b0aeaccba311ed7f6a4609f1880417 | <ide><path>buildin/system.js
<del>// Provide a "System" global.
<del>module.exports = {
<del> // Make sure import is only used as "System.import"
<del> import: function() {
<del> throw new Error("System.import cannot be used indirectly");
<del> }
<del>};
<ide><path>lib/RuntimeGlobals.js
<ide> exports.hmrDownloadUpdateHandlers = "__webpack_require__.hmrC";
<ide> * object with all hmr module data for all modules
<ide> */
<ide> exports.hmrModuleData = "__webpack_require__.hmrD";
<add>
<add>/**
<add> * the System polyfill object
<add> */
<add>exports.system = "__webpack_require__.System";
<ide><path>lib/dependencies/ConstDependency.js
<ide> const NullDependency = require("./NullDependency");
<ide> /** @typedef {import("../util/createHash").Hash} Hash */
<ide>
<ide> class ConstDependency extends NullDependency {
<add> /**
<add> * @param {string} expression the expression
<add> * @param {TODO} range the source range
<add> * @param {string[]=} runtimeRequirements runtime requirements
<add> */
<ide> constructor(expression, range, runtimeRequirements) {
<ide> super();
<ide> this.expression = expression;
<ide><path>lib/dependencies/SystemPlugin.js
<ide> const {
<ide> evaluateToString,
<ide> expressionIsUnsupported,
<del> getModulePath,
<ide> toConstantDependency
<ide> } = require("../JavascriptParserHelpers");
<add>const RuntimeGlobals = require("../RuntimeGlobals");
<ide> const WebpackError = require("../WebpackError");
<add>const ConstDependency = require("./ConstDependency");
<ide> const ProvidedDependency = require("./ProvidedDependency");
<del>const systemBuildin = require.resolve("../../buildin/system");
<add>const SystemRuntimeModule = require("./SystemRuntimeModule");
<ide>
<ide> class SystemPlugin {
<ide> constructor(options) {
<ide> class SystemPlugin {
<ide> new ProvidedDependency.Template()
<ide> );
<ide>
<add> compilation.hooks.runtimeRequirementInModule
<add> .for(RuntimeGlobals.system)
<add> .tap("SystemPlugin", (module, set) => {
<add> set.add(RuntimeGlobals.require);
<add> });
<add>
<add> compilation.hooks.runtimeRequirementInTree
<add> .for(RuntimeGlobals.system)
<add> .tap("SystemPlugin", (chunk, set) => {
<add> compilation.addRuntimeModule(chunk, new SystemRuntimeModule());
<add> });
<add>
<ide> const handler = (parser, parserOptions) => {
<ide> if (parserOptions.system === undefined || !parserOptions.system) {
<ide> return;
<ide> class SystemPlugin {
<ide> setNotSupported("System.register");
<ide>
<ide> parser.hooks.expression.for("System").tap("SystemPlugin", expr => {
<del> const dep = new ProvidedDependency(
<del> getModulePath(parser.state.module.context, systemBuildin),
<del> "System",
<del> null,
<del> expr.range
<del> );
<add> const dep = new ConstDependency(RuntimeGlobals.system, expr.range, [
<add> RuntimeGlobals.system
<add> ]);
<ide> dep.loc = expr.loc;
<ide> parser.state.module.addDependency(dep);
<ide> return true;
<ide><path>lib/dependencies/SystemRuntimeModule.js
<add>/*
<add> MIT License http://www.opensource.org/licenses/mit-license.php
<add> Author Florent Cailhol @ooflorent
<add>*/
<add>
<add>"use strict";
<add>
<add>const RuntimeGlobals = require("../RuntimeGlobals");
<add>const RuntimeModule = require("../RuntimeModule");
<add>const Template = require("../Template");
<add>
<add>const SystemPolyfill = Template.asString([
<add> `${RuntimeGlobals.system} = {`,
<add> Template.indent([
<add> "import: function () {",
<add> Template.indent([
<add> "throw new Error('System.import cannot be used indirectly');"
<add> ]),
<add> "}"
<add> ]),
<add> "}"
<add>]);
<add>
<add>class SystemRuntimeModule extends RuntimeModule {
<add> constructor() {
<add> super("system");
<add> }
<add>
<add> /**
<add> * @returns {string} runtime code
<add> */
<add> generate() {
<add> return SystemPolyfill;
<add> }
<add>}
<add>
<add>module.exports = SystemRuntimeModule; | 5 |
Javascript | Javascript | fix whitespace issues | 6a68d646297b5264fc3c0672fc58dcbf64523bdc | <ide><path>lib/path.js
<ide> if (isWindows) {
<ide> resolvedTail = normalizeArray(resolvedTail.split(/[\\\/]+/).filter(f),
<ide> !resolvedAbsolute).join('\\');
<ide>
<del> // If device is a drive letter, we'll normalize to lower case.
<del> if (resolvedDevice && resolvedDevice.charAt(1) === ':')
<del> resolvedDevice = resolvedDevice[0].toLowerCase() +
<del> resolvedDevice.substr(1);
<add> // If device is a drive letter, we'll normalize to lower case.
<add> if (resolvedDevice && resolvedDevice.charAt(1) === ':') {
<add> resolvedDevice = resolvedDevice[0].toLowerCase() +
<add> resolvedDevice.substr(1);
<add> }
<ide>
<ide> return (resolvedDevice + (resolvedAbsolute ? '\\' : '') + resolvedTail) ||
<ide> '.'; | 1 |
PHP | PHP | fix psalm errors | b094090907b11b28102e7f714835616d03737df1 | <ide><path>src/Console/Exception/NoOptionException.php
<ide> */
<ide> namespace Cake\Console\Exception;
<ide>
<add>use Throwable;
<add>
<ide> /**
<ide> * Exception raised with suggestions
<ide> */
<ide> class NoOptionException extends ConsoleException
<ide> * Constructor.
<ide> *
<ide> * @param string $message The string message.
<del> * @param null $suggestions The code of the error, is also the HTTP status code for the error.
<del> * @param array $code Either the string of the error message, or an array of attributes
<add> * @param array $suggestions The code of the error, is also the HTTP status code for the error.
<add> * @param int|null $code Either the string of the error message, or an array of attributes
<ide> * @param \Throwable|null $previous the previous exception.
<ide> */
<ide> public function __construct( | 1 |
Ruby | Ruby | add another comment | 749c877f032f4990581eec636ffcf1a826e5b4e9 | <ide><path>Library/Homebrew/os/mac/xcode.rb
<ide> module Xcode
<ide>
<ide> # Locate the "current Xcode folder" via xcode-select. See:
<ide> # man xcode-select
<del> # NOTE!! use Xcode.prefix rather than this generally!
<add> # TODO Should this be moved to OS::Mac? As of 10.9 this is referred to
<add> # as the "developer directory", and be either a CLT or Xcode instance.
<ide> def folder
<ide> @folder ||= `xcode-select -print-path 2>/dev/null`.strip
<ide> end | 1 |
Text | Text | add v2.8.0-beta.2 to changelog.md | 868e29c9840592a81f8a67e32262fe28976d762e | <ide><path>CHANGELOG.md
<ide> # Ember Changelog
<ide>
<add>### 2.8.0-beta.2 (August 1, 2016)
<add>
<add>- [#13887](https://github.com/emberjs/ember.js/pull/13887) [BUGFIX] Add assertions for illegal component invocations.
<add>- [#13892](https://github.com/emberjs/ember.js/pull/13892) [CLEANUP] Remove `View#createElement` / `View#destroyElement`.
<add>- [#13895](https://github.com/emberjs/ember.js/pull/13895) [BUGFIX] Fix template meta lookup for nested tagless and blockless components.
<add>- [#13911](https://github.com/emberjs/ember.js/pull/13911) [BUGFIX] Avoid using clobbering `.env` property on components.
<add>- [#13913](https://github.com/emberjs/ember.js/pull/13913) [BUGFIX] Disallow paths beginning with @ in templates.
<add>- [#13920](https://github.com/emberjs/ember.js/pull/13920) [BUGFIX] Add more info to the `Ember.Binding` deprecation.
<add>
<ide> ### 2.8.0-beta.1 (July 25, 2016)
<ide>
<ide> - [#13757](https://github.com/emberjs/ember.js/pull/13757) / [#13773](https://github.com/emberjs/ember.js/pull/13773) [CLEANUP] Remove legacy view layer features. | 1 |
Javascript | Javascript | change variable naming | 09cd3335c85c9b1dd5d20f996d5771f541c55ca3 | <ide><path>packages/ember-metal/lib/set_properties.js
<ide> import keys from "ember-metal/keys";
<ide> ```
<ide>
<ide> @method setProperties
<del> @param self
<del> @param {Object} hash
<del> @return self
<add> @param obj
<add> @param {Object} properties
<add> @return obj
<ide> */
<del>export default function setProperties(self, hash) {
<add>export default function setProperties(obj, properties) {
<ide> changeProperties(function() {
<del> var props = keys(hash);
<del> var prop;
<add> var props = keys(properties);
<add> var propertyName;
<ide>
<ide> for (var i = 0, l = props.length; i < l; i++) {
<del> prop = props[i];
<add> propertyName = props[i];
<ide>
<del> set(self, prop, hash[prop]);
<add> set(obj, propertyName, properties[propertyName]);
<ide> }
<ide> });
<del> return self;
<add> return obj;
<ide> } | 1 |
Text | Text | add changelog entry | ddcb3eaf877300229130648eebde964c9de17274 | <ide><path>railties/CHANGELOG.md
<add>* Enable HSTS with IncludeSudomains header for new applications.
<add>
<add> *Egor Homakov*, *Prathamesh Sonpatki*
<add>
<ide> ## Rails 5.0.0.beta3 (February 24, 2016) ##
<ide>
<ide> * Alias `rake` with `rails_command` in the Rails Application Templates API | 1 |
PHP | PHP | add theme to mailable properties | 44879ecf372236f2446483da687f5eea7434c978 | <ide><path>src/Illuminate/Mail/Mailable.php
<ide> class Mailable implements MailableContract, Renderable
<ide> */
<ide> public $callbacks = [];
<ide>
<add> /**
<add> * The name of the theme that should be used when formatting the message.
<add> *
<add> * @var string|null
<add> */
<add> public $theme;
<add>
<ide> /**
<ide> * The name of the mailer that should send the message.
<ide> * | 1 |
Python | Python | remove pytest dependency. | 0cb163865a4c761c226b151283309eedb2b1ca4d | <ide><path>transformers/tests/optimization_tf_test.py
<ide> from __future__ import print_function
<ide>
<ide> import unittest
<del>import pytest
<ide>
<ide> from transformers import is_tf_available
<ide>
<add>from .utils import require_tf
<add>
<ide> if is_tf_available():
<ide> import tensorflow as tf
<ide> from tensorflow.python.eager import context
<ide> from tensorflow.python.framework import ops
<ide> from transformers import (create_optimizer, GradientAccumulator)
<del>else:
<del> pytestmark = pytest.mark.skip("Require TensorFlow")
<ide>
<add>
<add>@require_tf
<ide> class OptimizationFTest(unittest.TestCase):
<ide> def assertListAlmostEqual(self, list1, list2, tol):
<ide> self.assertEqual(len(list1), len(list2)) | 1 |
Ruby | Ruby | fix typo in fortran environment variables | 399d73e10037885d0970ee1a7fa89ee2cf876bf8 | <ide><path>Library/Homebrew/extend/ENV.rb
<ide> def fortran
<ide>
<ide> if ARGV.include? '--default-fortran-flags'
<ide> self['FCFLAGS'] = self['CFLAGS'] unless self['FCFLAGS']
<del> self['FFFLAGS'] = self['CFLAGS'] unless self['FFFLAGS']
<add> self['FFLAGS'] = self['CFLAGS'] unless self['FFLAGS']
<ide> elsif not self['FCFLAGS'] or self['FFLAGS']
<ide> opoo <<-EOS
<ide> No Fortran optimization information was provided. You may want to consider | 1 |
Python | Python | remove debug print | ee9bbc4041a493d78edf31298e8042639401b8f8 | <ide><path>libcloud/test/storage/test_local.py
<ide> def remove_tmp_file(self, tmppath):
<ide>
<ide> @unittest.skipIf(platform.system().lower() == 'windows', 'Unsupported on Windows')
<ide> def test_lock_local_storage(self):
<del> print("aaaa")
<ide> # 1. Acquire succeeds
<ide> lock = LockLocalStorage("/tmp/a")
<ide> with lock: | 1 |
Ruby | Ruby | override the initializers instead of using tap | 58f9c4f096c92518a81093aa9912314b14873a66 | <ide><path>actionmailer/lib/action_mailer/parameterized.rb
<ide> def initialize(mailer, params)
<ide>
<ide> def method_missing(method_name, *args)
<ide> if @mailer.action_methods.include?(method_name.to_s)
<del> ActionMailer::Parameterized::MessageDelivery.new(@mailer, method_name, *args).tap { |pmd| pmd.params = @params }
<add> ActionMailer::Parameterized::MessageDelivery.new(@mailer, method_name, @params, *args)
<ide> else
<ide> super
<ide> end
<ide> end
<ide> end
<ide>
<ide> class MessageDelivery < ActionMailer::MessageDelivery # :nodoc:
<del> attr_accessor :params
<add> def initialize(mailer_class, action, params, *args)
<add> super(mailer_class, action, *args)
<add> @params = params
<add> end
<ide>
<ide> private
<ide> def processed_mailer
<ide> @processed_mailer ||= @mailer_class.new.tap do |mailer|
<del> mailer.params = params
<add> mailer.params = @params
<ide> mailer.process @action, *@args
<ide> end
<ide> end | 1 |
Ruby | Ruby | remove duplicate function | dbe460ef04ddc8b043988879730414e31600db53 | <ide><path>Library/Homebrew/formula_support.rb
<ide> def include? name
<ide> @args.include? '--' + name
<ide> end
<ide>
<del> def using? name
<del> @args.include? '--' + name
<del> end
<del>
<ide> def head?
<ide> @args.flag? '--HEAD'
<ide> end | 1 |
Python | Python | add possibility to maintain full copies of files | 57461ac0b4e4f7349c2437fcf8d4115014d6ceda | <ide><path>examples/tensorflow/question-answering/utils_qa.py
<ide> def postprocess_qa_predictions(
<ide> null_score_diff_threshold: float = 0.0,
<ide> output_dir: Optional[str] = None,
<ide> prefix: Optional[str] = None,
<add> is_world_process_zero: bool = True,
<ide> ):
<ide> """
<ide> Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
<ide> def postprocess_qa_predictions(
<ide> scores_diff_json = collections.OrderedDict()
<ide>
<ide> # Logging.
<add> logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN)
<ide> logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
<ide>
<ide> # Let's loop over all the examples!
<ide><path>utils/check_copies.py
<ide> PATH_TO_DOCS = "docs/source"
<ide> REPO_PATH = "."
<ide>
<add># Mapping for files that are full copies of others (keys are copies, values the file to keep them up to data with)
<add>FULL_COPIES = {"examples/tensorflow/question-answering/utils_qa.py": "examples/pytorch/question-answering/utils_qa.py"}
<add>
<ide>
<ide> def _should_continue(line, indent):
<ide> return line.startswith(indent) or len(line) <= 1 or re.search(r"^\s*\):\s*$", line) is not None
<ide> def check_copies(overwrite: bool = False):
<ide> check_model_list_copy(overwrite=overwrite)
<ide>
<ide>
<add>def check_full_copies(overwrite: bool = False):
<add> diffs = []
<add> for target, source in FULL_COPIES.items():
<add> with open(source, "r", encoding="utf-8") as f:
<add> source_code = f.read()
<add> with open(target, "r", encoding="utf-8") as f:
<add> target_code = f.read()
<add> if source_code != target_code:
<add> if overwrite:
<add> with open(target, "w", encoding="utf-8") as f:
<add> print(f"Replacing the content of {target} by the one of {source}.")
<add> f.write(source_code)
<add> else:
<add> diffs.append(f"- {target}: copy does not match {source}.")
<add>
<add> if not overwrite and len(diffs) > 0:
<add> diff = "\n".join(diffs)
<add> raise Exception(
<add> "Found the following copy inconsistencies:\n"
<add> + diff
<add> + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them."
<add> )
<add>
<add>
<ide> def get_model_list():
<ide> """Extracts the model list from the README."""
<ide> # If the introduction or the conclusion of the list change, the prompts may need to be updated.
<ide> def check_model_list_copy(overwrite=False, max_per_line=119):
<ide> args = parser.parse_args()
<ide>
<ide> check_copies(args.fix_and_overwrite)
<add> check_full_copies(args.fix_and_overwrite) | 2 |
Python | Python | fix pipeline analysis on remove pipe | afe4a428f78abe45d6104d74ef42a066570fa43d | <ide><path>spacy/language.py
<ide> def remove_pipe(self, name):
<ide> """
<ide> if name not in self.pipe_names:
<ide> raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
<add> removed = self.pipeline.pop(self.pipe_names.index(name))
<ide> if ENABLE_PIPELINE_ANALYSIS:
<ide> analyze_all_pipes(self.pipeline)
<del> return self.pipeline.pop(self.pipe_names.index(name))
<add> return removed
<ide>
<ide> def __call__(self, text, disable=[], component_cfg=None):
<ide> """Apply the pipeline to some text. The text can span multiple sentences,
<ide><path>spacy/tests/pipeline/test_analysis.py
<ide> def test_analysis_validate_attrs_valid():
<ide> def test_analysis_validate_attrs_invalid(attr):
<ide> with pytest.raises(ValueError):
<ide> validate_attrs([attr])
<add>
<add>
<add>def test_analysis_validate_attrs_remove_pipe():
<add> """Test that attributes are validated correctly on remove."""
<add> spacy.language.ENABLE_PIPELINE_ANALYSIS = True
<add>
<add> @component("c1", assigns=["token.tag"])
<add> def c1(doc):
<add> return doc
<add>
<add> @component("c2", requires=["token.pos"])
<add> def c2(doc):
<add> return doc
<add>
<add> nlp = Language()
<add> nlp.add_pipe(c1)
<add> with pytest.warns(UserWarning):
<add> nlp.add_pipe(c2)
<add> with pytest.warns(None) as record:
<add> nlp.remove_pipe("c2")
<add> assert not record.list | 2 |
Text | Text | fix bug in `readable.unshift` code example | ee38bbd4cfc9d515b185fb406ffce5919e63bafa | <ide><path>doc/api/stream.md
<ide> function parseHeader(stream, callback) {
<ide> let chunk;
<ide> while (null !== (chunk = stream.read())) {
<ide> const str = decoder.write(chunk);
<del> if (str.match(/\n\n/)) {
<add> if (str.includes('\n\n')) {
<ide> // Found the header boundary.
<ide> const split = str.split(/\n\n/);
<ide> header += split.shift();
<ide> function parseHeader(stream, callback) {
<ide> stream.unshift(buf);
<ide> // Now the body of the message can be read from the stream.
<ide> callback(null, header, stream);
<del> } else {
<del> // Still reading the header.
<del> header += str;
<add> return;
<ide> }
<add> // Still reading the header.
<add> header += str;
<ide> }
<ide> }
<ide> } | 1 |
Javascript | Javascript | remove useless dependencies in tests | 4da1cc3b81048180eff99637d407ccd9dd07ec6d | <ide><path>test/ng/compileSpec.js
<ide> describe('$compile', function() {
<ide> ));
<ide>
<ide> it('should not load cross domain templates by default', inject(
<del> function($compile, $httpBackend, $rootScope, $sce) {
<add> function($compile, $rootScope) {
<ide> expect(function() {
<ide> $compile('<div class="crossDomainTemplate"></div>')($rootScope);
<ide> }).toThrowMinErr('$sce', 'insecurl', 'Blocked loading resource from url not allowed by $sceDelegate policy. URL: http://example.com/should-not-load.html');
<ide> }
<ide> ));
<ide>
<ide> it('should trust what is already in the template cache', inject(
<del> function($compile, $httpBackend, $rootScope, $templateCache, $sce) {
<add> function($compile, $httpBackend, $rootScope, $templateCache) {
<ide> $httpBackend.expect('GET', 'http://example.com/should-not-load.html').respond('<span>example.com/remote-version</span>');
<ide> $templateCache.put('http://example.com/should-not-load.html', '<span>example.com/cached-version</span>');
<ide> element = $compile('<div class="crossDomainTemplate"></div>')($rootScope); | 1 |
Text | Text | fix types and description for dns.resolvetxt | 3888a576ac83239d8d3d2fc985e7ea1856dac83e | <ide><path>doc/api/dns.md
<ide> records. The type and structure of individual results varies based on `rrtype`:
<ide> | `'PTR'` | pointer records | {string} | [`dns.resolvePtr()`][] |
<ide> | `'SOA'` | start of authority records | {Object} | [`dns.resolveSoa()`][] |
<ide> | `'SRV'` | service records | {Object} | [`dns.resolveSrv()`][] |
<del>| `'TXT'` | text records | {string} | [`dns.resolveTxt()`][] |
<add>| `'TXT'` | text records | {string[]} | [`dns.resolveTxt()`][] |
<ide> | `'ANY'` | any records | {Object} | [`dns.resolveAny()`][] |
<ide>
<ide> On error, `err` is an [`Error`][] object, where `err.code` is one of the
<ide> added: v0.1.27
<ide> - `hostname` {string}
<ide> - `callback` {Function}
<ide> - `err` {Error}
<del> - `addresses` {string[]}
<add> - `records` {string[][]}
<ide>
<ide> Uses the DNS protocol to resolve text queries (`TXT` records) for the
<del>`hostname`. The `addresses` argument passed to the `callback` function is
<del>is a two-dimensional array of the text records available for `hostname` (e.g.,
<add>`hostname`. The `records` argument passed to the `callback` function is a
<add>two-dimensional array of the text records available for `hostname` (e.g.,
<ide> `[ ['v=spf1 ip4:0.0.0.0 ', '~all' ] ]`). Each sub-array contains TXT chunks of
<ide> one record. Depending on the use case, these could be either joined together or
<ide> treated separately. | 1 |
Text | Text | remove the redundant "[" before "this" word | 4e7d3e718e941c7e774cbbad36388fdee043618e | <ide><path>guide/english/algorithms/algorithm-performance/index.md
<ide> Therefore we can say that the best case time complexity of bubble sort is O(_n_)
<ide> Examining the worst case scenario where the array is in reverse order, the first iteration will make _n_ comparisons while the next will have to make _n_ - 1 comparisons and so on until only 1 comparison must be made.
<ide> The big-O notation for this case is therefore _n_ * [(_n_ - 1) / 2] which = 0.5*n*^2 - 0.5*n* = O(_n_^2) as the _n_^2 term dominates the function which allows us to ignore the other term in the function.
<ide>
<del>We can confirm this analysis using [this handy big-O cheat sheet</a> that features the big-O time complexity of many commonly used data structures and algorithms
<add>We can confirm this analysis using this handy big-O cheat sheet</a> that features the big-O time complexity of many commonly used data structures and algorithms
<ide>
<ide> It is very apparent that while for small use cases this time complexity might be alright, at a large scale bubble sort is simply not a good solution for sorting.
<ide> This is the power of big-O notation: it allows developers to easily see the potential bottlenecks of their application, and take steps to make these more scalable. | 1 |
Python | Python | fix assertion to check actual content | 6734eb1d09a99dc519e89a59e2086cef09a87098 | <ide><path>tests/operators/test_trigger_dagrun.py
<ide> def test_trigger_dagrun_operator_conf(self):
<ide> with create_session() as session:
<ide> dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
<ide> assert len(dagruns) == 1
<del> assert dagruns[0].conf, {"foo": "bar"}
<add> assert dagruns[0].conf == {"foo": "bar"}
<ide>
<ide> def test_trigger_dagrun_operator_templated_invalid_conf(self):
<ide> """Test passing a conf that is not JSON Serializable raise error."""
<ide> def test_trigger_dagrun_operator_templated_conf(self):
<ide> with create_session() as session:
<ide> dagruns = session.query(DagRun).filter(DagRun.dag_id == TRIGGERED_DAG_ID).all()
<ide> assert len(dagruns) == 1
<del> assert dagruns[0].conf, {"foo": TEST_DAG_ID}
<add> assert dagruns[0].conf == {"foo": TEST_DAG_ID}
<ide>
<ide> def test_trigger_dagrun_with_reset_dag_run_false(self):
<ide> """Test TriggerDagRunOperator with reset_dag_run.""" | 1 |
PHP | PHP | pull routes after booting application | c33453e97f93a7ed0eb019e86ae8bedf9ddff9e3 | <ide><path>app/src/Providers/RouteServiceProvider.php
<ide> public function before()
<ide> */
<ide> public function map()
<ide> {
<del> require app_path().'/src/Http/routes.php';
<add> $this->app->booted(function()
<add> {
<add> require app('path.src').'/Http/routes.php';
<add> });
<ide> }
<ide>
<ide> }
<ide>\ No newline at end of file | 1 |
Javascript | Javascript | initialize elements in an array from left to right | 966f6d831f9469a917601f9a10604612cd7bd792 | <ide><path>src/ng/parse.js
<ide> ASTCompiler.prototype = {
<ide> self.if(self.notNull(right), function() {
<ide> self.addEnsureSafeFunction(right);
<ide> forEach(ast.arguments, function(expr) {
<del> self.recurse(expr, undefined, undefined, function(argument) {
<add> self.recurse(expr, self.nextId(), undefined, function(argument) {
<ide> args.push(self.ensureSafeObject(argument));
<ide> });
<ide> });
<ide> ASTCompiler.prototype = {
<ide> self.addEnsureSafeObject(self.member(left.context, left.name, left.computed));
<ide> expression = self.member(left.context, left.name, left.computed) + ast.operator + right;
<ide> self.assign(intoId, expression);
<del> recursionFn(expression);
<add> recursionFn(intoId || expression);
<ide> });
<ide> }, 1);
<ide> break;
<ide> case AST.ArrayExpression:
<ide> args = [];
<ide> forEach(ast.elements, function(expr) {
<del> self.recurse(expr, undefined, undefined, function(argument) {
<add> self.recurse(expr, self.nextId(), undefined, function(argument) {
<ide> args.push(argument);
<ide> });
<ide> });
<ide> ASTCompiler.prototype = {
<ide> case AST.ObjectExpression:
<ide> args = [];
<ide> forEach(ast.properties, function(property) {
<del> self.recurse(property.value, undefined, undefined, function(expr) {
<add> self.recurse(property.value, self.nextId(), undefined, function(expr) {
<ide> args.push(self.escape(
<ide> property.key.type === AST.Identifier ? property.key.name :
<ide> ('' + property.key.value)) +
<ide><path>test/ng/parseSpec.js
<ide> describe('parser', function() {
<ide> expect(scope.$eval('a + \n b.c + \r "\td" + \t \r\n\r "\r\n\n"')).toEqual("abc\td\r\n\n");
<ide> });
<ide>
<add>
<add> // https://github.com/angular/angular.js/issues/10968
<add> it('should evaluate arrays literals initializers left-to-right', inject(function($parse) {
<add> var s = {c:function() {return {b: 1}; }};
<add> expect($parse("e=1;[a=c(),d=a.b+1]")(s)).toEqual([{b: 1}, 2]);
<add> }));
<add>
<add> it('should evaluate function arguments left-to-right', inject(function($parse) {
<add> var s = {c:function() {return {b: 1}; }, i: function(x, y) { return [x, y];}};
<add> expect($parse("e=1;i(a=c(),d=a.b+1)")(s)).toEqual([{b: 1}, 2]);
<add> }));
<add>
<add> it('should evaluate object properties expressions left-to-right', inject(function($parse) {
<add> var s = {c:function() {return {b: 1}; }};
<add> expect($parse("e=1;{x: a=c(), y: d=a.b+1}")(s)).toEqual({x: {b: 1}, y: 2});
<add> }));
<add>
<add>
<ide> describe('sandboxing', function() {
<ide> describe('Function constructor', function() {
<ide> it('should not tranverse the Function constructor in the getter', function() { | 2 |
PHP | PHP | extract a protected method | 0c46ccf657b81fc7338b9da6eb04625419013147 | <ide><path>src/View/Input/Radio.php
<ide> public function render($data) {
<ide> $options = (array)$data['options'];
<ide> }
<ide>
<del> $escape = $data['escape'];
<ide> if (!empty($data['empty'])) {
<ide> $empty = $data['empty'] === true ? 'empty' : $data['empty'];
<ide> $options = ['' => $empty] + $options;
<ide> public function render($data) {
<ide>
<ide> $opts = [];
<ide> foreach ($options as $val => $text) {
<del> if (is_int($val) && isset($text['text'], $text['value'])) {
<del> $radio = $text;
<del> $text = $radio['text'];
<del> } else {
<del> $radio = ['value' => $val, 'text' => $text];
<del> }
<del> $radio['name'] = $data['name'];
<del>
<del> if (empty($radio['id'])) {
<del> $radio['id'] = Inflector::slug($radio['name'] . '_' . $radio['value']);
<del> }
<del>
<del> if (isset($data['value']) && strval($data['value']) === strval($radio['value'])) {
<del> $radio['checked'] = true;
<del> }
<del>
<del> if ($this->_isDisabled($radio, $data['disabled'])) {
<del> $radio['disabled'] = true;
<del> }
<del>
<del> $label = $this->_renderLabel($radio, $data['label'], $escape);
<del>
<del> $input = $this->_templates->format('radio', [
<del> 'name' => $radio['name'],
<del> 'value' => $escape ? h($radio['value']) : $radio['value'],
<del> 'attrs' => $this->_templates->formatAttributes($radio, ['name', 'value', 'text']),
<del> ]);
<del>
<del> $opts[] = $this->_templates->format('radioContainer', [
<del> 'input' => $input,
<del> 'label' => $label,
<del> ]);
<add> $opts[] = $this->_renderInput($val, $text, $data);
<ide> }
<ide> return implode('', $opts);
<ide> }
<ide> protected function _isDisabled($radio, $disabled) {
<ide> return (!is_array($disabled) || in_array((string)$radio['value'], $disabled, !$isNumeric));
<ide> }
<ide>
<add>/**
<add> * Renders a single radio input and label.
<add> *
<add> * @param string|int $val The value of the radio input.
<add> * @param string|array $text The label text, or complex radio type.
<add> * @param array $data Additional options for input generation.
<add> * @return string.
<add> */
<add> protected function _renderInput($val, $text, $data) {
<add> $escape = $data['escape'];
<add> if (is_int($val) && isset($text['text'], $text['value'])) {
<add> $radio = $text;
<add> $text = $radio['text'];
<add> } else {
<add> $radio = ['value' => $val, 'text' => $text];
<add> }
<add> $radio['name'] = $data['name'];
<add>
<add> if (empty($radio['id'])) {
<add> $radio['id'] = Inflector::slug($radio['name'] . '_' . $radio['value']);
<add> }
<add>
<add> if (isset($data['value']) && strval($data['value']) === strval($radio['value'])) {
<add> $radio['checked'] = true;
<add> }
<add>
<add> if ($this->_isDisabled($radio, $data['disabled'])) {
<add> $radio['disabled'] = true;
<add> }
<add>
<add> $label = $this->_renderLabel($radio, $data['label'], $escape);
<add>
<add> $input = $this->_templates->format('radio', [
<add> 'name' => $radio['name'],
<add> 'value' => $escape ? h($radio['value']) : $radio['value'],
<add> 'attrs' => $this->_templates->formatAttributes($radio, ['name', 'value', 'text']),
<add> ]);
<add>
<add> return $this->_templates->format('radioContainer', [
<add> 'input' => $input,
<add> 'label' => $label,
<add> ]);
<add> }
<add>
<ide> /**
<ide> * Renders a label element for a given radio button.
<ide> * | 1 |
Javascript | Javascript | remove reference to dprecation (no longer applies) | 02e3f0a00c78174ce6cc24bf6e7fe0d83a89c7d6 | <ide><path>packages/ember-runtime/lib/mixins/array.js
<ide> export default Mixin.create(Enumerable, {
<ide> return an enumerable that maps automatically to the named key on the
<ide> member objects.
<ide>
<del> @each should only be used in a non-terminal context, and is deprecated when
<del> used as a leaf node. Example:
<add> @each should only be used in a non-terminal context. Example:
<ide>
<ide> ```javascript
<ide> myMethod: computed('posts.@each.author', function(){ | 1 |
Ruby | Ruby | make unsafesubversiondownloadstrategy a no-op | 023bddbd046c7dce0f9e56344f9fb5e258e0f59f | <ide><path>Library/Homebrew/download_strategy.rb
<ide> def get_externals
<ide> end
<ide> end
<ide>
<del> def fetch_args
<del> []
<del> end
<del>
<ide> def fetch_repo target, url, revision=nil, ignore_externals=false
<ide> # Use "svn up" when the repository already exists locally.
<ide> # This saves on bandwidth and will have a similar effect to verifying the
<ide> # cache as it will make any changes to get the right revision.
<ide> svncommand = target.directory? ? 'up' : 'checkout'
<del> args = ['svn', svncommand] + fetch_args
<add> args = ['svn', svncommand]
<ide> args << url unless target.directory?
<ide> args << target
<ide> args << '-r' << revision if revision
<ide> def clone_repo
<ide>
<ide> # @deprecated
<ide> StrictSubversionDownloadStrategy = SubversionDownloadStrategy
<del>
<ide> # @deprecated
<del>class UnsafeSubversionDownloadStrategy < SubversionDownloadStrategy
<del> def fetch_args
<del> %w[--non-interactive --trust-server-cert]
<del> end
<del> private :fetch_args
<del>end
<add>UnsafeSubversionDownloadStrategy = SubversionDownloadStrategy
<ide>
<ide> class GitDownloadStrategy < VCSDownloadStrategy
<ide> SHALLOW_CLONE_WHITELIST = [ | 1 |
Python | Python | use lowercase test_ for concrete test cases | 98d501f9375de5e2977e66c47ab1ee43039b0671 | <ide><path>celery/tests/test_app/test_app_amqp.py
<ide> from celery.tests.utils import AppCase
<ide>
<ide>
<del>class TestMsgOptions(AppCase):
<add>class test_extract_message_options(AppCase):
<ide>
<ide> def test_MSG_OPTIONS(self):
<ide> self.assertTrue(MSG_OPTIONS)
<ide><path>celery/tests/test_app/test_celery.py
<ide> import celery
<ide>
<ide>
<del>class TestInitFile(Case):
<add>class test_celery_package(Case):
<ide>
<ide> def test_version(self):
<ide> self.assertTrue(celery.VERSION)
<ide><path>celery/tests/test_app/test_loaders.py
<ide> def read_configuration(self):
<ide> return {"foo": "bar", "CELERY_IMPORTS": ("os", "sys")}
<ide>
<ide>
<del>class TestLoaders(AppCase):
<add>class test_loaders(AppCase):
<ide>
<ide> def test_get_loader_cls(self):
<ide>
<ide> def test_load_settings(self):
<ide> self.assertIs(loaders.load_settings(), self.app.conf)
<ide>
<ide>
<del>class TestLoaderBase(Case):
<add>class test_LoaderBase(Case):
<ide> message_options = {"subject": "Subject",
<ide> "body": "Body",
<ide> "sender": "x@x.com",
<ide> def test_cmdline_config_ValueError(self):
<ide> self.loader.cmdline_config_parser(["broker.port=foobar"])
<ide>
<ide>
<del>class TestDefaultLoader(Case):
<add>class test_DefaultLoader(Case):
<ide>
<ide> def test_wanted_module_item(self):
<ide> l = default.Loader()
<ide><path>celery/tests/test_backends/__init__.py
<ide> from celery.tests.utils import Case
<ide>
<ide>
<del>class TestBackends(Case):
<add>class test_backends(Case):
<ide>
<ide> def test_get_backend_aliases(self):
<ide> expects = [("amqp", AMQPBackend),
<ide><path>celery/tests/test_backends/test_mongodb.py
<ide> MONGODB_COLLECTION = "collection1"
<ide>
<ide>
<del>class TestBackendMongoDb(Case):
<add>class test_MongoBackend(Case):
<ide>
<ide> def setUp(self):
<ide> if pymongo is None:
<ide><path>celery/tests/test_backends/test_redis.py
<ide> def emit_no_redis_msg(reason):
<ide> return emit_no_redis_msg("not configured")
<ide>
<ide>
<del>class TestRedisBackend(Case):
<add>class test_RedisBackend(Case):
<ide>
<ide> def test_mark_as_done(self):
<ide> tb = get_redis_or_SkipTest()
<ide> def test_mark_as_failure(self):
<ide> self.assertIsInstance(tb.get_result(tid3), KeyError)
<ide>
<ide>
<del>class TestRedisBackendNoRedis(Case):
<add>class test_RedisBackend_without_redis(Case):
<ide>
<ide> def test_redis_None_if_redis_not_installed(self):
<ide> prev = sys.modules.pop("celery.backends.redis")
<ide><path>celery/tests/test_concurrency/test_concurrency_gevent.py
<del>from __future__ import absolute_import
<del>
<del>from nose import SkipTest
<del>
<del>from celery.concurrency.gevent import TaskPool
<del>from celery.tests.utils import unittest
<del>
<del>
<del>class GeventCase(unittest.TestCase):
<del>
<del> def setUp(self):
<del> try:
<del> self.gevent = __import__("gevent")
<del> except ImportError:
<del> raise SkipTest(
<del> "gevent not installed, skipping related tests.")
<del>
<del>
<del>class test_TaskPool(GeventCase):
<del>
<del> def test_grow(self):
<del> pool = TaskPool(10)
<del> pool.start()
<del> self.assertEqual(pool._pool.size, 10)
<del> pool.grow()
<del> self.assertEqual(pool._pool.size, 11)
<del>
<del> def test_grow_many(self):
<del> pool = TaskPool(10)
<del> pool.start()
<del> self.assertEqual(pool._pool.size, 10)
<del> pool.grow(2)
<del> self.assertEqual(pool._pool.size, 12)
<del>
<del> def test_shrink(self):
<del> pool = TaskPool(10)
<del> pool.start()
<del> self.assertEqual(pool._pool.size, 10)
<del> pool.shrink()
<del> self.assertEqual(pool._pool.size, 9)
<del>
<del> def test_shrink_many(self):
<del> pool = TaskPool(10)
<del> pool.start()
<del> self.assertEqual(pool._pool.size, 10)
<del> pool.shrink(2)
<del> self.assertEqual(pool._pool.size, 8)
<del>
<del> def test_num_processes(self):
<del> pool = TaskPool(10)
<del> pool.start()
<del> pool.apply_async(lambda x: x, (2, ), {})
<del> self.assertEqual(pool.num_processes, 1)
<ide><path>celery/tests/test_concurrency/test_gevent.py
<ide> )
<ide>
<ide> from celery.tests.utils import Case, mock_module
<add>gevent_modules = (
<add> "gevent",
<add> "gevent.monkey",
<add> "gevent.greenlet",
<add> "gevent.pool",
<add> "greenlet",
<add>)
<ide>
<ide>
<ide> class GeventCase(Case):
<ide> def setUp(self):
<ide> class test_gevent_patch(GeventCase):
<ide>
<ide> def test_is_patched(self):
<del> monkey_patched = []
<del> from gevent import monkey
<del> prev_monkey_patch = monkey.patch_all
<del> monkey.patch_all = lambda: monkey_patched.append(True)
<del> prev_gevent = sys.modules.pop("celery.concurrency.gevent", None)
<del> os.environ.pop("GEVENT_NOPATCH")
<del> try:
<del> import celery.concurrency.gevent # noqa
<del> self.assertTrue(monkey_patched)
<del> finally:
<del> sys.modules["celery.concurrency.gevent"] = prev_gevent
<del> os.environ["GEVENT_NOPATCH"] = "yes"
<del> monkey.patch_all = prev_monkey_patch
<del>
<del>
<del>gevent_modules = (
<del> "gevent",
<del> "gevent.monkey",
<del> "gevent.greenlet",
<del> "gevent.pool",
<del> "greenlet",
<del>)
<add> with mock_module(*gevent_modules):
<add> monkey_patched = []
<add> from gevent import monkey
<add> prev_monkey_patch = monkey.patch_all
<add> monkey.patch_all = lambda: monkey_patched.append(True)
<add> prev_gevent = sys.modules.pop("celery.concurrency.gevent", None)
<add> os.environ.pop("GEVENT_NOPATCH")
<add> try:
<add> import celery.concurrency.gevent # noqa
<add> self.assertTrue(monkey_patched)
<add> finally:
<add> sys.modules["celery.concurrency.gevent"] = prev_gevent
<add> os.environ["GEVENT_NOPATCH"] = "yes"
<add> monkey.patch_all = prev_monkey_patch
<ide>
<ide>
<ide> class test_Schedule(Case):
<ide>
<ide> def test_sched(self):
<ide> with mock_module(*gevent_modules):
<del> @patch("gevent.greenlet.Greenlet")
<add> @patch("gevent.greenlet")
<ide> @patch("gevent.greenlet.GreenletExit")
<del> def do_test(Greenlet, GreenletExit):
<add> def do_test(GreenletExit, greenlet):
<add> greenlet.Greenlet = object
<ide> x = Schedule()
<add> greenlet.Greenlet = Mock()
<ide> x._Greenlet.spawn_later = Mock()
<ide> x._GreenletExit = KeyError
<ide> entry = Mock()
<ide><path>celery/tests/test_concurrency/test_pool.py
<ide> def raise_something(i):
<ide> return ExceptionInfo(sys.exc_info())
<ide>
<ide>
<del>class TestTaskPool(Case):
<add>class test_TaskPool(Case):
<ide>
<ide> def setUp(self):
<ide> try:
<ide><path>celery/tests/test_events/__init__.py
<ide> def has_event(self, kind):
<ide> return False
<ide>
<ide>
<del>class TestEvent(Case):
<add>class test_Event(Case):
<ide>
<ide> def test_constructor(self):
<ide> event = events.Event("world war II")
<ide> self.assertEqual(event["type"], "world war II")
<ide> self.assertTrue(event["timestamp"])
<ide>
<ide>
<del>class TestEventDispatcher(Case):
<add>class test_EventDispatcher(Case):
<ide>
<ide> def setUp(self):
<ide> self.app = app_or_default()
<ide> def test_enabled_disable(self):
<ide> connection.close()
<ide>
<ide>
<del>class TestEventReceiver(Case):
<add>class test_EventReceiver(Case):
<ide>
<ide> def setUp(self):
<ide> self.app = app_or_default()
<ide><path>celery/tests/test_events/test_events_cursesmon.py
<ide> def getmaxyx(self):
<ide> return self.y, self.x
<ide>
<ide>
<del>class TestCursesDisplay(Case):
<add>class test_CursesDisplay(Case):
<ide>
<ide> def setUp(self):
<ide> try:
<ide><path>celery/tests/test_task/__init__.py
<ide> def retry_task_customexc(arg1, arg2, kwarg=1, **kwargs):
<ide> return current.retry(countdown=0, exc=exc)
<ide>
<ide>
<del>class TestTaskRetries(Case):
<add>class test_task_retries(Case):
<ide>
<ide> def test_retry(self):
<ide> retry_task.__class__.max_retries = 3
<ide> def test_max_retries_exceeded(self):
<ide> self.assertEqual(retry_task.iterations, 2)
<ide>
<ide>
<del>class TestCeleryTasks(Case):
<add>class test_tasks(Case):
<ide>
<ide> def test_unpickle_task(self):
<ide> import pickle
<ide> def test_get_logger(self):
<ide> self.assertTrue(logger)
<ide>
<ide>
<del>class TestTaskSet(Case):
<add>class test_TaskSet(Case):
<ide>
<ide> @with_eager_tasks
<ide> def test_function_taskset(self):
<ide> def test_named_taskset(self):
<ide> self.assertTrue(res.taskset_id.startswith(prefix))
<ide>
<ide>
<del>class TestTaskApply(Case):
<add>class test_apply_task(Case):
<ide>
<ide> def test_apply_throw(self):
<ide> with self.assertRaises(KeyError):
<ide> def my_periodic():
<ide> pass
<ide>
<ide>
<del>class TestPeriodicTask(Case):
<add>class test_periodic_tasks(Case):
<ide>
<ide> def test_must_have_run_every(self):
<ide> with self.assertRaises(NotImplementedError):
<ide><path>celery/tests/test_task/test_context.py
<ide> def run(self):
<ide> self.result = get_context_as_dict(self.ctx)
<ide>
<ide>
<del>class TestTaskContext(Case):
<add>class test_Context(Case):
<ide>
<ide> def test_default_context(self):
<ide> # A bit of a tautological test, since it uses the same
<ide><path>celery/tests/test_task/test_registry.py
<ide> from celery.tests.utils import Case
<ide>
<ide>
<del>class TestTask(Task):
<add>class MockTask(Task):
<ide> name = "celery.unittest.test_task"
<ide>
<ide> def run(self, **kwargs):
<ide> return True
<ide>
<ide>
<del>class TestPeriodicTask(PeriodicTask):
<add>class MockPeriodicTask(PeriodicTask):
<ide> name = "celery.unittest.test_periodic_task"
<ide> run_every = 10
<ide>
<ide> def run(self, **kwargs):
<ide> return True
<ide>
<ide>
<del>class TestTaskRegistry(Case):
<add>class test_TaskRegistry(Case):
<ide>
<ide> def assertRegisterUnregisterCls(self, r, task):
<ide> with self.assertRaises(r.NotRegistered):
<ide> def test_task_registry(self):
<ide> self.assertIsInstance(r, dict,
<ide> "TaskRegistry is mapping")
<ide>
<del> self.assertRegisterUnregisterCls(r, TestTask)
<del> self.assertRegisterUnregisterCls(r, TestPeriodicTask)
<add> self.assertRegisterUnregisterCls(r, MockTask)
<add> self.assertRegisterUnregisterCls(r, MockPeriodicTask)
<ide>
<del> r.register(TestPeriodicTask)
<del> r.unregister(TestPeriodicTask.name)
<del> self.assertNotIn(TestPeriodicTask, r)
<del> r.register(TestPeriodicTask)
<add> r.register(MockPeriodicTask)
<add> r.unregister(MockPeriodicTask.name)
<add> self.assertNotIn(MockPeriodicTask, r)
<add> r.register(MockPeriodicTask)
<ide>
<ide> tasks = dict(r)
<del> self.assertIsInstance(tasks.get(TestTask.name), TestTask)
<del> self.assertIsInstance(tasks.get(TestPeriodicTask.name),
<del> TestPeriodicTask)
<add> self.assertIsInstance(tasks.get(MockTask.name), MockTask)
<add> self.assertIsInstance(tasks.get(MockPeriodicTask.name),
<add> MockPeriodicTask)
<ide>
<del> self.assertIsInstance(r[TestTask.name], TestTask)
<del> self.assertIsInstance(r[TestPeriodicTask.name],
<del> TestPeriodicTask)
<add> self.assertIsInstance(r[MockTask.name], MockTask)
<add> self.assertIsInstance(r[MockPeriodicTask.name],
<add> MockPeriodicTask)
<ide>
<del> r.unregister(TestTask)
<del> self.assertNotIn(TestTask.name, r)
<del> r.unregister(TestPeriodicTask)
<del> self.assertNotIn(TestPeriodicTask.name, r)
<add> r.unregister(MockTask)
<add> self.assertNotIn(MockTask.name, r)
<add> r.unregister(MockPeriodicTask)
<add> self.assertNotIn(MockPeriodicTask.name, r)
<ide>
<del> self.assertTrue(TestTask().run())
<del> self.assertTrue(TestPeriodicTask().run())
<add> self.assertTrue(MockTask().run())
<add> self.assertTrue(MockPeriodicTask().run())
<ide><path>celery/tests/test_task/test_result.py
<ide> def make_mock_taskset(size=10):
<ide> return [AsyncResult(task["id"]) for task in tasks]
<ide>
<ide>
<del>class TestAsyncResult(AppCase):
<add>class test_AsyncResult(AppCase):
<ide>
<ide> def setup(self):
<ide> self.task1 = mock_task("task1", states.SUCCESS, "the")
<ide> def get_many(self, *args, **kwargs):
<ide> return ((id, {"result": i}) for i, id in enumerate(self.ids))
<ide>
<ide>
<del>class TestTaskSetResult(AppCase):
<add>class test_TaskSetResult(AppCase):
<ide>
<ide> def setup(self):
<ide> self.size = 10
<ide> def test_completed_count(self):
<ide> self.assertEqual(self.ts.completed_count(), len(self.ts))
<ide>
<ide>
<del>class TestPendingAsyncResult(AppCase):
<add>class test_pending_AsyncResult(AppCase):
<ide>
<ide> def setup(self):
<ide> self.task = AsyncResult(uuid())
<ide> def test_result(self):
<ide> self.assertIsNone(self.task.result)
<ide>
<ide>
<del>class TestFailedTaskSetResult(TestTaskSetResult):
<add>class test_failed_AsyncResult(TestTaskSetResult):
<ide>
<ide> def setup(self):
<ide> self.size = 11
<ide> def test_failed(self):
<ide> self.assertTrue(self.ts.failed())
<ide>
<ide>
<del>class TestTaskSetPending(AppCase):
<add>class test_pending_TaskSet(AppCase):
<ide>
<ide> def setup(self):
<ide> self.ts = TaskSetResult(uuid(), [
<ide> def run(self, x, y):
<ide> raise KeyError("xy")
<ide>
<ide>
<del>class TestEagerResult(AppCase):
<add>class test_EagerResult(AppCase):
<ide>
<ide> def test_wait_raises(self):
<ide> res = RaisingTask.apply(args=[3, 3])
<ide><path>celery/tests/test_task/test_task_abortable.py
<ide> def run(self, **kwargs):
<ide> return True
<ide>
<ide>
<del>class TestAbortableTask(Case):
<add>class test_AbortableTask(Case):
<ide>
<ide> def test_async_result_is_abortable(self):
<ide> t = MyAbortableTask()
<ide><path>celery/tests/test_task/test_task_http.py
<ide> def unknown_response():
<ide> return _response(dumps({"status": "u.u.u.u", "retval": True}))
<ide>
<ide>
<del>class TestEncodings(Case):
<add>class test_encodings(Case):
<ide>
<ide> def test_utf8dict(self):
<ide> uk = "foobar"
<ide> def test_utf8dict(self):
<ide> self.assertIsInstance(value, str)
<ide>
<ide>
<del>class TestMutableURL(Case):
<add>class test_MutableURL(Case):
<ide>
<ide> def test_url_query(self):
<ide> url = http.MutableURL("http://example.com?x=10&y=20&z=Foo")
<ide> def test_set_query(self):
<ide> self.assertEqual(url.query, {"zzz": "xxx"})
<ide>
<ide>
<del>class TestHttpDispatch(Case):
<add>class test_HttpDispatch(Case):
<ide>
<ide> def test_dispatch_success(self):
<ide> logger = logging.getLogger("celery.unittest")
<ide> def test_dispatch_POST(self):
<ide> self.assertEqual(d.dispatch(), 100)
<ide>
<ide>
<del>class TestURL(Case):
<add>class test_URL(Case):
<ide>
<ide> def test_URL_get_async(self):
<ide> with eager_tasks():
<ide><path>celery/tests/test_utils/test_pickle.py
<ide> def __init__(self, message, status_code=10):
<ide> Exception.__init__(self, message, status_code)
<ide>
<ide>
<del>class TestPickle(Case):
<add>class test_Pickle(Case):
<ide>
<ide> def test_pickle_regular_exception(self):
<ide> exc = None
<ide><path>celery/tests/test_utils/test_serialization.py
<ide> from celery.tests.utils import Case, mask_modules
<ide>
<ide>
<del>class TestAAPickle(Case):
<add>class test_AAPickle(Case):
<ide>
<ide> def test_no_cpickle(self):
<ide> prev = sys.modules.pop("celery.utils.serialization", None)
<ide><path>celery/tests/test_utils/test_utils_info.py
<ide> QUEUE_FORMAT2 = """. queue2: exchange:exchange2 (type2) binding:bind2"""
<ide>
<ide>
<del>class TestInfo(Case):
<add>class test_Info(Case):
<ide>
<ide> def test_textindent(self):
<ide> self.assertEqual(indent(RANDTEXT, 4), RANDTEXT_RES)
<ide><path>celery/tests/test_worker/test_worker_heartbeat.py
<ide> def cancel(self, entry):
<ide> entry.cancel()
<ide>
<ide>
<del>class TestHeart(Case):
<add>class test_Heart(Case):
<ide>
<ide> def test_stop(self):
<ide> timer = MockTimer() | 21 |
Text | Text | fix cache_timestamp_format default value in guides | 4a84942e0b99807d1320cf2e4563ede53cf75b82 | <ide><path>guides/source/configuring.md
<ide> All these configuration options are delegated to the `I18n` library.
<ide>
<ide> * `config.active_record.lock_optimistically` controls whether Active Record will use optimistic locking and is `true` by default.
<ide>
<del>* `config.active_record.cache_timestamp_format` controls the format of the timestamp value in the cache key. Default is `:nsec`.
<add>* `config.active_record.cache_timestamp_format` controls the format of the timestamp value in the cache key. Default is `:usec`.
<ide>
<ide> * `config.active_record.record_timestamps` is a boolean value which controls whether or not timestamping of `create` and `update` operations on a model occur. The default value is `true`.
<ide> | 1 |
Javascript | Javascript | remove settimeout (covered by misc/timeout.js) | fef35fc4f1da9e06790d5b3b34b3bcdb085dd3a2 | <ide><path>benchmark/settimeout.js
<del>console.log("wait...");
<del>var done = 0;
<del>var N = 5000000;
<del>var begin = new Date();
<del>for (var i = 0; i < N; i++) {
<del> setTimeout(function () {
<del> if (++done == N) {
<del> var end = new Date();
<del> console.log("smaller is better");
<del> console.log("startup: %d", start - begin);
<del> console.log("done: %d", end - start);
<del> }
<del> }, 1000);
<del>}
<del>var start = new Date(); | 1 |
Javascript | Javascript | fix regression #372 | 528e89693b15154913f2fb0d2df9da7b8ce052d6 | <ide><path>pdf.js
<ide> var PartialEvaluator = (function() {
<ide> }
<ide>
<ide> if (fontDict.has('ToUnicode')) {
<add> encodingMap['empty'] = true;
<ide> var cmapObj = xref.fetchIfRef(fontDict.get('ToUnicode'));
<ide> if (IsName(cmapObj)) {
<ide> error('ToUnicode file cmap translation not implemented'); | 1 |
Python | Python | fix doc/source/conf.py to work with python 3 | 9d8722b5bc76ecb2fe74a8e8dd3a7b1c2c83985b | <ide><path>doc/source/conf.py
<ide> version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
<ide> # The full version, including alpha/beta/rc tags.
<ide> release = numpy.__version__
<del>print version, release
<add>print("%s %s" % (version, release))
<ide>
<ide> # There are two options for replacing |today|: either, you set today to some
<ide> # non-false value, then it is used:
<ide> except ImportError:
<ide> pass
<ide> else:
<del> print "NOTE: linkcode extension not found -- no links to source generated"
<add> print("NOTE: linkcode extension not found -- no links to source generated")
<ide>
<ide> def linkcode_resolve(domain, info):
<ide> """ | 1 |
PHP | PHP | remove needless line of code | 26aeb1155063f9e4169dc5820d18fe159c066989 | <ide><path>lib/Cake/Utility/Sanitize.php
<ide> public static function clean($data, $options = array()) {
<ide> $data = str_replace("\r", "", $data);
<ide> }
<ide>
<del> $data = str_replace("'", "'", str_replace("!", "!", $data));
<del>
<ide> if ($options['unicode']) {
<ide> $data = preg_replace("/&#([0-9]+);/s", "&#\\1;", $data);
<ide> } | 1 |
Ruby | Ruby | add factory methods for empty alias trackers | bfc776f7bb114e90cf91f16f5892be636ed2f0c8 | <ide><path>activerecord/lib/active_record/associations/alias_tracker.rb
<ide> module Associations
<ide> class AliasTracker # :nodoc:
<ide> attr_reader :aliases, :connection
<ide>
<add> def self.empty(connection)
<add> new connection, Hash.new(0)
<add> end
<add>
<add> def self.create(connection, table_joins)
<add> if table_joins.empty?
<add> empty connection
<add> else
<add> aliases = Hash.new { |h,k|
<add> h[k] = initial_count_for(connection, k, table_joins)
<add> }
<add> new connection, aliases
<add> end
<add> end
<add>
<add> def self.initial_count_for(connection, name, table_joins)
<add> # quoted_name should be downcased as some database adapters (Oracle) return quoted name in uppercase
<add> quoted_name = connection.quote_table_name(name).downcase
<add>
<add> counts = table_joins.map do |join|
<add> if join.is_a?(Arel::Nodes::StringJoin)
<add> # Table names + table aliases
<add> join.left.downcase.scan(
<add> /join(?:\s+\w+)?\s+(\S+\s+)?#{quoted_name}\son/
<add> ).size
<add> else
<add> join.left.table_name == name ? 1 : 0
<add> end
<add> end
<add>
<add> counts.sum
<add> end
<add>
<ide> # table_joins is an array of arel joins which might conflict with the aliases we assign here
<del> def initialize(connection, table_joins)
<del> @aliases = Hash.new { |h,k| h[k] = initial_count_for(k, table_joins) }
<del> @connection = connection
<add> def initialize(connection, aliases)
<add> @aliases = aliases
<add> @connection = connection
<ide> end
<ide>
<ide> def aliased_table_for(table_name, aliased_name)
<ide> def aliased_name_for(table_name, aliased_name)
<ide>
<ide> private
<ide>
<del> def initial_count_for(name, table_joins)
<del> # quoted_name should be downcased as some database adapters (Oracle) return quoted name in uppercase
<del> quoted_name = connection.quote_table_name(name).downcase
<del>
<del> counts = table_joins.map do |join|
<del> if join.is_a?(Arel::Nodes::StringJoin)
<del> # Table names + table aliases
<del> join.left.downcase.scan(
<del> /join(?:\s+\w+)?\s+(\S+\s+)?#{quoted_name}\son/
<del> ).size
<del> else
<del> join.left.table_name == name ? 1 : 0
<del> end
<del> end
<del>
<del> counts.sum
<del> end
<del>
<ide> def truncate(name)
<ide> name.slice(0, connection.table_alias_length - 2)
<ide> end
<ide><path>activerecord/lib/active_record/associations/association_scope.rb
<ide> def scope(association, connection)
<ide> reflection = association.reflection
<ide> scope = klass.unscoped
<ide> owner = association.owner
<del> alias_tracker = AliasTracker.new(connection, [])
<add> alias_tracker = AliasTracker.empty connection
<ide>
<ide> scope.extending! Array(reflection.options[:extend])
<ide> add_constraints(scope, owner, klass, reflection, alias_tracker)
<ide><path>activerecord/lib/active_record/associations/join_dependency.rb
<ide> def self.walk_tree(associations, hash)
<ide> # joins # => []
<ide> #
<ide> def initialize(base, associations, joins)
<del> @alias_tracker = AliasTracker.new(base.connection, joins)
<add> @alias_tracker = AliasTracker.create(base.connection, joins)
<ide> @alias_tracker.aliased_name_for(base.table_name, base.table_name) # Updates the count for base.table_name to 1
<ide> tree = self.class.make_tree associations
<ide> @join_root = JoinBase.new base, build(tree, base) | 3 |
Javascript | Javascript | fix extend_prototypes for sproutcore-datetime | 2885a044938380e1e0ed78534f76a830297df1e1 | <ide><path>packages/sproutcore-datetime/lib/datetime.js
<ide> SC.DateTime.reopenClass(SC.Comparable,
<ide> if ( opts.month === 2 && opts.day > 29 ){
<ide> return null;
<ide> }
<del> if ([4,6,9,11].contains(opts.month) && opts.day > 30) {
<add> if (jQuery.inArray(opts.month, [4,6,9,11]) > -1 && opts.day > 30) {
<ide> return null;
<ide> }
<ide> }
<ide><path>packages/sproutcore-datetime/tests/datetime_test.js
<ide> function timeShouldBeEqualToHash(t, h, message) {
<ide> return;
<ide> }
<ide>
<del> equals(get(t, 'year'), h.year , message.fmt('year'));
<del> equals(get(t, 'month'), h.month, message.fmt('month'));
<del> equals(get(t, 'day'), h.day, message.fmt('day'));
<del> equals(get(t, 'hour'), h.hour, message.fmt('hour'));
<del> equals(get(t, 'minute'), h.minute, message.fmt('minute'));
<del> equals(get(t, 'second'), h.second, message.fmt('second'));
<del> equals(get(t, 'millisecond'), h.millisecond, message.fmt('millisecond'));
<del> equals(get(t, 'timezone'), h.timezone, message.fmt('timezone'));
<add> equals(get(t, 'year'), h.year , SC.String.fmt(message, 'year'));
<add> equals(get(t, 'month'), h.month, SC.String.fmt(message, 'month'));
<add> equals(get(t, 'day'), h.day, SC.String.fmt(message, 'day'));
<add> equals(get(t, 'hour'), h.hour, SC.String.fmt(message, 'hour'));
<add> equals(get(t, 'minute'), h.minute, SC.String.fmt(message, 'minute'));
<add> equals(get(t, 'second'), h.second, SC.String.fmt(message, 'second'));
<add> equals(get(t, 'millisecond'), h.millisecond, SC.String.fmt(message, 'millisecond'));
<add> equals(get(t, 'timezone'), h.timezone, SC.String.fmt(message, 'timezone'));
<ide> }
<ide>
<ide> function formatTimezone(offset) { | 2 |
Javascript | Javascript | consolidate logic for press event component | 051513bfa064c0d5d058482db4cc3a32955b9ec5 | <ide><path>packages/react-events/src/Press.js
<ide> import type {
<ide> ReactResponderDispatchEventOptions,
<ide> } from 'shared/ReactTypes';
<ide> import {REACT_EVENT_COMPONENT_TYPE} from 'shared/ReactSymbols';
<add>import {
<add> getEventPointerType,
<add> getEventCurrentTarget,
<add> isEventPositionWithinTouchHitTarget,
<add>} from './utils';
<ide>
<ide> const CAPTURE_PHASE = 2;
<ide>
<ide> type PointerType = '' | 'mouse' | 'keyboard' | 'pen' | 'touch';
<ide> type PressState = {
<ide> isActivePressed: boolean,
<ide> isActivePressStart: boolean,
<del> isAnchorTouched: boolean,
<ide> isLongPressed: boolean,
<ide> isPressed: boolean,
<ide> isPressWithinResponderRegion: boolean,
<ide> type PressState = {
<ide> right: number,
<ide> top: number,
<ide> |}>,
<del> shouldSkipMouseAfterTouch: boolean,
<add> ignoreEmulatedMouseEvents: boolean,
<ide> };
<ide>
<ide> type PressEventType =
<ide> function calculateResponderRegion(target, props) {
<ide> };
<ide> }
<ide>
<del>function getPointerType(nativeEvent: any) {
<del> const {type, pointerType} = nativeEvent;
<del> if (pointerType != null) {
<del> return pointerType;
<del> }
<del> if (type.indexOf('mouse') > -1) {
<del> return 'mouse';
<del> }
<del> if (type.indexOf('touch') > -1) {
<del> return 'touch';
<del> }
<del> if (type.indexOf('key') > -1) {
<del> return 'keyboard';
<del> }
<del> return '';
<del>}
<del>
<ide> function isPressWithinResponderRegion(
<ide> nativeEvent: $PropertyType<ReactResponderEvent, 'nativeEvent'>,
<ide> state: PressState,
<ide> const PressResponder = {
<ide> didDispatchEvent: false,
<ide> isActivePressed: false,
<ide> isActivePressStart: false,
<del> isAnchorTouched: false,
<ide> isLongPressed: false,
<ide> isPressed: false,
<ide> isPressWithinResponderRegion: true,
<ide> const PressResponder = {
<ide> pressStartTimeout: null,
<ide> pressTarget: null,
<ide> responderRegion: null,
<del> shouldSkipMouseAfterTouch: false,
<add> ignoreEmulatedMouseEvents: false,
<ide> };
<ide> },
<ide> onEvent(
<ide> const PressResponder = {
<ide> if (phase === CAPTURE_PHASE) {
<ide> return false;
<ide> }
<add>
<ide> const nativeEvent: any = event.nativeEvent;
<add> const pointerType = getEventPointerType(event);
<ide> const shouldStopPropagation =
<ide> props.stopPropagation === undefined ? true : props.stopPropagation;
<ide>
<ide> switch (type) {
<del> /**
<del> * Respond to pointer events and fall back to mouse.
<del> */
<add> // START
<ide> case 'pointerdown':
<del> case 'mousedown': {
<del> if (!state.isPressed && !state.shouldSkipMouseAfterTouch) {
<del> const pointerType = getPointerType(nativeEvent);
<del> state.pointerType = pointerType;
<add> case 'keydown':
<add> case 'keypress':
<add> case 'mousedown':
<add> case 'touchstart': {
<add> if (!state.isPressed) {
<add> if (type === 'pointerdown' || type === 'touchstart') {
<add> state.ignoreEmulatedMouseEvents = true;
<add> }
<add>
<add> // Ignore unrelated key events
<add> if (pointerType === 'keyboard') {
<add> if (!isValidKeyPress(nativeEvent.key)) {
<add> return shouldStopPropagation;
<add> }
<add> }
<ide>
<del> // Ignore pressing on hit slop area with mouse
<del> if (
<del> (pointerType === 'mouse' || type === 'mousedown') &&
<del> context.isPositionWithinTouchHitTarget(
<del> target.ownerDocument,
<del> nativeEvent.x,
<del> nativeEvent.y,
<del> )
<del> ) {
<del> return false;
<add> // Ignore emulated mouse events and mouse pressing on touch hit target
<add> // area
<add> if (type === 'mousedown') {
<add> if (
<add> state.ignoreEmulatedMouseEvents ||
<add> isEventPositionWithinTouchHitTarget(event, context)
<add> ) {
<add> return shouldStopPropagation;
<add> }
<ide> }
<ide>
<ide> // Ignore any device buttons except left-mouse and touch/pen contact
<ide> if (nativeEvent.button > 0) {
<ide> return shouldStopPropagation;
<ide> }
<ide>
<add> state.pointerType = pointerType;
<ide> state.pressTarget = target;
<ide> state.isPressWithinResponderRegion = true;
<ide> dispatchPressStartEvents(context, props, state);
<ide> context.addRootEventTypes(target.ownerDocument, rootEventTypes);
<ide> return shouldStopPropagation;
<add> } else {
<add> // Prevent spacebar press from scrolling the window
<add> if (isValidKeyPress(nativeEvent.key) && nativeEvent.key === ' ') {
<add> nativeEvent.preventDefault();
<add> return shouldStopPropagation;
<add> }
<ide> }
<del> return false;
<add> return shouldStopPropagation;
<ide> }
<add>
<add> // MOVE
<ide> case 'pointermove':
<ide> case 'mousemove':
<ide> case 'touchmove': {
<ide> if (state.isPressed) {
<del> if (state.shouldSkipMouseAfterTouch) {
<add> // Ignore emulated events (pointermove will dispatch touch and mouse events)
<add> // Ignore pointermove events during a keyboard press
<add> if (state.pointerType !== pointerType) {
<ide> return shouldStopPropagation;
<ide> }
<ide>
<del> const pointerType = getPointerType(nativeEvent);
<del> state.pointerType = pointerType;
<del>
<ide> if (state.responderRegion == null) {
<del> let currentTarget = (target: any);
<del> while (
<del> currentTarget.parentNode &&
<del> context.isTargetWithinEventComponent(currentTarget.parentNode)
<del> ) {
<del> currentTarget = currentTarget.parentNode;
<del> }
<ide> state.responderRegion = calculateResponderRegion(
<del> currentTarget,
<add> getEventCurrentTarget(event, context),
<ide> props,
<ide> );
<ide> }
<del>
<ide> if (isPressWithinResponderRegion(nativeEvent, state)) {
<ide> state.isPressWithinResponderRegion = true;
<ide> if (props.onPressMove) {
<ide> const PressResponder = {
<ide> }
<ide> return false;
<ide> }
<add>
<add> // END
<ide> case 'pointerup':
<del> case 'mouseup': {
<add> case 'keyup':
<add> case 'mouseup':
<add> case 'touchend': {
<ide> if (state.isPressed) {
<del> if (state.shouldSkipMouseAfterTouch) {
<del> state.shouldSkipMouseAfterTouch = false;
<del> return shouldStopPropagation;
<add> // Ignore unrelated keyboard events
<add> if (pointerType === 'keyboard') {
<add> if (!isValidKeyPress(nativeEvent.key)) {
<add> return false;
<add> }
<ide> }
<ide>
<del> const pointerType = getPointerType(nativeEvent);
<del> state.pointerType = pointerType;
<del>
<ide> const wasLongPressed = state.isLongPressed;
<del>
<ide> dispatchPressEndEvents(context, props, state);
<ide>
<ide> if (state.pressTarget !== null && props.onPress) {
<ide> const PressResponder = {
<ide> }
<ide> context.removeRootEventTypes(rootEventTypes);
<ide> return shouldStopPropagation;
<del> }
<del> state.isAnchorTouched = false;
<del> state.shouldSkipMouseAfterTouch = false;
<del> return false;
<del> }
<del>
<del> /**
<del> * Touch event implementations are only needed for Safari, which lacks
<del> * support for pointer events.
<del> */
<del> case 'touchstart': {
<del> if (!state.isPressed) {
<del> // We bail out of polyfilling anchor tags, given the same heuristics
<del> // explained above in regards to needing to use click events.
<del> if (isAnchorTagElement(target)) {
<del> state.isAnchorTouched = true;
<del> return shouldStopPropagation;
<del> }
<del> const pointerType = getPointerType(nativeEvent);
<del> state.pointerType = pointerType;
<del> state.pressTarget = target;
<del> state.isPressWithinResponderRegion = true;
<del> dispatchPressStartEvents(context, props, state);
<del> context.addRootEventTypes(target.ownerDocument, rootEventTypes);
<del> return shouldStopPropagation;
<del> }
<del> return false;
<del> }
<del> case 'touchend': {
<del> if (state.isAnchorTouched) {
<del> state.isAnchorTouched = false;
<del> return shouldStopPropagation;
<del> }
<del> if (state.isPressed) {
<del> const pointerType = getPointerType(nativeEvent);
<del> state.pointerType = pointerType;
<del>
<del> const wasLongPressed = state.isLongPressed;
<del>
<del> dispatchPressEndEvents(context, props, state);
<del>
<del> if (type !== 'touchcancel' && props.onPress) {
<del> // Find if the X/Y of the end touch is still that of the original target
<del> const changedTouch = nativeEvent.changedTouches[0];
<del> const doc = (target: any).ownerDocument;
<del> const fromTarget = doc.elementFromPoint(
<del> changedTouch.screenX,
<del> changedTouch.screenY,
<del> );
<del> if (
<del> fromTarget !== null &&
<del> context.isTargetWithinEventComponent(fromTarget)
<del> ) {
<del> if (
<del> !(
<del> wasLongPressed &&
<del> props.onLongPressShouldCancelPress &&
<del> props.onLongPressShouldCancelPress()
<del> )
<del> ) {
<del> dispatchEvent(context, state, 'press', props.onPress);
<del> }
<del> }
<del> }
<del> state.shouldSkipMouseAfterTouch = true;
<del> context.removeRootEventTypes(rootEventTypes);
<del> return shouldStopPropagation;
<del> }
<del> return false;
<del> }
<del>
<del> /**
<del> * Keyboard interaction support
<del> * TODO: determine UX for metaKey + validKeyPress interactions
<del> */
<del> case 'keydown':
<del> case 'keypress': {
<del> if (isValidKeyPress(nativeEvent.key)) {
<del> if (state.isPressed) {
<del> // Prevent spacebar press from scrolling the window
<del> if (nativeEvent.key === ' ') {
<del> nativeEvent.preventDefault();
<del> }
<del> } else {
<del> const pointerType = getPointerType(nativeEvent);
<del> state.pointerType = pointerType;
<del> state.pressTarget = target;
<del> dispatchPressStartEvents(context, props, state);
<del> context.addRootEventTypes(target.ownerDocument, rootEventTypes);
<del> }
<del> return shouldStopPropagation;
<del> }
<del> return false;
<del> }
<del> case 'keyup': {
<del> if (state.isPressed && isValidKeyPress(nativeEvent.key)) {
<del> const wasLongPressed = state.isLongPressed;
<del> dispatchPressEndEvents(context, props, state);
<del> if (state.pressTarget !== null && props.onPress) {
<del> if (
<del> !(
<del> wasLongPressed &&
<del> props.onLongPressShouldCancelPress &&
<del> props.onLongPressShouldCancelPress()
<del> )
<del> ) {
<del> dispatchEvent(context, state, 'press', props.onPress);
<del> }
<del> }
<del> context.removeRootEventTypes(rootEventTypes);
<del> return shouldStopPropagation;
<add> } else if (type === 'mouseup' && state.ignoreEmulatedMouseEvents) {
<add> state.ignoreEmulatedMouseEvents = false;
<ide> }
<ide> return false;
<ide> }
<ide>
<add> // CANCEL
<add> case 'contextmenu':
<ide> case 'pointercancel':
<ide> case 'scroll':
<ide> case 'touchcancel': {
<ide> if (state.isPressed) {
<del> state.shouldSkipMouseAfterTouch = false;
<del> dispatchPressEndEvents(context, props, state);
<del> context.removeRootEventTypes(rootEventTypes);
<add> if (type === 'contextmenu' && props.preventDefault !== false) {
<add> nativeEvent.preventDefault();
<add> } else {
<add> state.ignoreEmulatedMouseEvents = false;
<add> dispatchPressEndEvents(context, props, state);
<add> context.removeRootEventTypes(rootEventTypes);
<add> }
<ide> return shouldStopPropagation;
<ide> }
<ide> return false;
<ide> const PressResponder = {
<ide> }
<ide> return false;
<ide> }
<del>
<del> case 'contextmenu': {
<del> if (state.isPressed) {
<del> if (props.preventDefault !== false) {
<del> nativeEvent.preventDefault();
<del> } else {
<del> state.shouldSkipMouseAfterTouch = false;
<del> dispatchPressEndEvents(context, props, state);
<del> context.removeRootEventTypes(rootEventTypes);
<del> }
<del> return shouldStopPropagation;
<del> }
<del> return false;
<del> }
<ide> }
<ide> return false;
<ide> },
<ide><path>packages/react-events/src/__tests__/Press-test.internal.js
<ide> describe('Event responder: Press', () => {
<ide> );
<ide> });
<ide>
<del> it('ignores browser emulated "mousedown" event', () => {
<add> it('ignores browser emulated events', () => {
<ide> ref.current.dispatchEvent(createPointerEvent('pointerdown'));
<add> ref.current.dispatchEvent(createPointerEvent('touchstart'));
<ide> ref.current.dispatchEvent(createPointerEvent('mousedown'));
<ide> expect(onPressStart).toHaveBeenCalledTimes(1);
<ide> });
<ide> describe('Event responder: Press', () => {
<ide> });
<ide>
<ide> it('is called after "pointerup" event', () => {
<del> ref.current.dispatchEvent(createPointerEvent('pointerdown'));
<ide> ref.current.dispatchEvent(
<del> createPointerEvent('pointerup', {pointerType: 'pen'}),
<add> createPointerEvent('pointerdown', {pointerType: 'pen'}),
<ide> );
<add> ref.current.dispatchEvent(createPointerEvent('pointerup'));
<ide> expect(onPressEnd).toHaveBeenCalledTimes(1);
<ide> expect(onPressEnd).toHaveBeenCalledWith(
<ide> expect.objectContaining({pointerType: 'pen', type: 'pressend'}),
<ide> );
<ide> });
<ide>
<del> it('ignores browser emulated "mouseup" event', () => {
<add> it('ignores browser emulated events', () => {
<add> ref.current.dispatchEvent(
<add> createPointerEvent('pointerdown', {pointerType: 'touch'}),
<add> );
<ide> ref.current.dispatchEvent(createPointerEvent('touchstart'));
<add> ref.current.dispatchEvent(createPointerEvent('pointerup'));
<ide> ref.current.dispatchEvent(createPointerEvent('touchend'));
<add> ref.current.dispatchEvent(createPointerEvent('mousedown'));
<ide> ref.current.dispatchEvent(createPointerEvent('mouseup'));
<ide> expect(onPressEnd).toHaveBeenCalledTimes(1);
<ide> expect(onPressEnd).toHaveBeenCalledWith(
<ide> describe('Event responder: Press', () => {
<ide> });
<ide>
<ide> it('is called after "pointerup" event', () => {
<del> ref.current.dispatchEvent(createPointerEvent('pointerdown'));
<ide> ref.current.dispatchEvent(
<del> createPointerEvent('pointerup', {pointerType: 'pen'}),
<add> createPointerEvent('pointerdown', {pointerType: 'pen'}),
<ide> );
<add> ref.current.dispatchEvent(createPointerEvent('pointerup'));
<ide> expect(onPress).toHaveBeenCalledTimes(1);
<ide> expect(onPress).toHaveBeenCalledWith(
<ide> expect.objectContaining({pointerType: 'pen', type: 'press'}),
<ide> describe('Event responder: Press', () => {
<ide> bottom: 500,
<ide> right: 500,
<ide> });
<del> ref.current.dispatchEvent(createPointerEvent('pointerdown'));
<add> ref.current.dispatchEvent(
<add> createPointerEvent('pointerdown', {pointerType: 'touch'}),
<add> );
<ide> ref.current.dispatchEvent(
<ide> createPointerEvent('pointermove', {
<ide> pointerType: 'touch',
<ide> describe('Event responder: Press', () => {
<ide> expect.objectContaining({pointerType: 'touch', type: 'pressmove'}),
<ide> );
<ide> });
<add>
<add> it('is not called if "pointermove" occurs during keyboard press', () => {
<add> const onPressMove = jest.fn();
<add> const ref = React.createRef();
<add> const element = (
<add> <Press onPressMove={onPressMove}>
<add> <div ref={ref} />
<add> </Press>
<add> );
<add> ReactDOM.render(element, container);
<add>
<add> ref.current.getBoundingClientRect = () => ({
<add> top: 50,
<add> left: 50,
<add> bottom: 500,
<add> right: 500,
<add> });
<add> ref.current.dispatchEvent(createKeyboardEvent('keydown', {key: 'Enter'}));
<add> ref.current.dispatchEvent(
<add> createPointerEvent('pointermove', {
<add> pointerType: 'mouse',
<add> pageX: 55,
<add> pageY: 55,
<add> }),
<add> );
<add> expect(onPressMove).not.toBeCalled();
<add> });
<add>
<add> it('ignores browser emulated events', () => {
<add> const onPressMove = jest.fn();
<add> const ref = React.createRef();
<add> const element = (
<add> <Press onPressMove={onPressMove}>
<add> <div ref={ref} />
<add> </Press>
<add> );
<add> ReactDOM.render(element, container);
<add>
<add> ref.current.getBoundingClientRect = () => ({
<add> top: 50,
<add> left: 50,
<add> bottom: 500,
<add> right: 500,
<add> });
<add> ref.current.dispatchEvent(
<add> createPointerEvent('pointerdown', {pointerType: 'touch'}),
<add> );
<add> ref.current.dispatchEvent(createPointerEvent('touchstart'));
<add> ref.current.dispatchEvent(
<add> createPointerEvent('pointermove', {
<add> pointerType: 'touch',
<add> pageX: 55,
<add> pageY: 55,
<add> }),
<add> );
<add> ref.current.dispatchEvent(createPointerEvent('touchmove'));
<add> ref.current.dispatchEvent(createPointerEvent('mousemove'));
<add> expect(onPressMove).toHaveBeenCalledTimes(1);
<add> });
<ide> });
<ide>
<ide> describe('press with movement', () => { | 2 |
Python | Python | pin version of tf and torch | 206b78d4850d3c6fe85a015654293fc4b803ed7b | <ide><path>setup.py
<ide>
<ide> # keras2onnx and onnxconverter-common version is specific through a commit until 1.7.0 lands on pypi
<ide> extras["tf"] = [
<del> "tensorflow",
<add> "tensorflow>=2.0",
<ide> "onnxconverter-common",
<ide> "keras2onnx"
<ide> # "onnxconverter-common @ git+git://github.com/microsoft/onnxconverter-common.git@f64ca15989b6dc95a1f3507ff6e4c395ba12dff5#egg=onnxconverter-common",
<ide> # "keras2onnx @ git+git://github.com/onnx/keras-onnx.git@cbdc75cb950b16db7f0a67be96a278f8d2953b48#egg=keras2onnx",
<ide> ]
<ide> extras["tf-cpu"] = [
<del> "tensorflow-cpu",
<add> "tensorflow-cpu>=2.0",
<ide> "onnxconverter-common",
<ide> "keras2onnx"
<ide> # "onnxconverter-common @ git+git://github.com/microsoft/onnxconverter-common.git@f64ca15989b6dc95a1f3507ff6e4c395ba12dff5#egg=onnxconverter-common",
<ide> # "keras2onnx @ git+git://github.com/onnx/keras-onnx.git@cbdc75cb950b16db7f0a67be96a278f8d2953b48#egg=keras2onnx",
<ide> ]
<del>extras["torch"] = ["torch"]
<add>extras["torch"] = ["torch>=1.0"]
<ide> extras["onnxruntime"] = ["onnxruntime>=1.4.0", "onnxruntime-tools>=1.4.2"]
<ide>
<ide> extras["serving"] = ["pydantic", "uvicorn", "fastapi", "starlette"]
<ide> setup(
<ide> name="transformers",
<ide> version="3.1.0",
<del> author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
<add> author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors",
<ide> author_email="thomas@huggingface.co",
<ide> description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch",
<ide> long_description=open("README.md", "r", encoding="utf-8").read(), | 1 |
Text | Text | use hook inside of function component | b760c8dd0a992cd4fceb15cc603de86f2457988f | <ide><path>docs/api-reference/next/image.md
<ide> The Ref must point to a DOM element or a React component that [forwards the Ref]
<ide> import Image from 'next/image'
<ide> import React from 'react'
<ide>
<del>const lazyRoot = React.useRef(null)
<add>const Example = () => {
<add> const lazyRoot = React.useRef(null)
<ide>
<del>const Example = () => (
<del> <div ref={lazyRoot} style={{ overflowX: 'scroll', width: '500px' }}>
<del> <Image lazyRoot={lazyRoot} src="/one.jpg" width="500" height="500" />
<del> <Image lazyRoot={lazyRoot} src="/two.jpg" width="500" height="500" />
<del> </div>
<del>)
<add> return (
<add> <div ref={lazyRoot} style={{ overflowX: 'scroll', width: '500px' }}>
<add> <Image lazyRoot={lazyRoot} src="/one.jpg" width="500" height="500" />
<add> <Image lazyRoot={lazyRoot} src="/two.jpg" width="500" height="500" />
<add> </div>
<add> )
<add>}
<ide> ```
<ide>
<ide> **Example pointing to a React component** | 1 |
Ruby | Ruby | fix svnadmin calls on linux | 69c1b4fcf984c6da97534a59bd917d3ddd14936c | <ide><path>Library/Homebrew/test/unpack_strategy/subversion_spec.rb
<ide> let(:path) { working_copy }
<ide>
<ide> before do
<del> safe_system "xcrun", "svnadmin", "create", repo
<add> svnadmin = ["svnadmin"]
<add> svnadmin = ["xcrun", *svnadmin] if OS.mac? && MacOS.version >= :catalina
<add> safe_system(*svnadmin, "create", repo)
<ide> safe_system "svn", "checkout", "file://#{repo}", working_copy
<ide>
<ide> FileUtils.touch working_copy/"test" | 1 |
PHP | PHP | improve test coverage | 4bd2251385ba49b0886830a8e1e27833ed34c1fb | <ide><path>tests/Support/SupportCollectionTest.php
<ide> public function testExcept()
<ide> $this->assertEquals(['first' => 'Taylor', 'email' => 'taylorotwell@gmail.com'], $data->except('last')->all());
<ide> }
<ide>
<add> public function testExceptSelf()
<add> {
<add> $data = new Collection(['first' => 'Taylor', 'last' => 'Otwell']);
<add> $this->assertEquals(['first' => 'Taylor', 'last' => 'Otwell'], $data->except($data)->all());
<add> }
<add>
<ide> public function testPluckWithArrayAndObjectValues()
<ide> {
<ide> $data = new Collection([(object) ['name' => 'taylor', 'email' => 'foo'], ['name' => 'dayle', 'email' => 'bar']]);
<ide> public function testTake()
<ide> $this->assertEquals(['taylor', 'dayle'], $data->all());
<ide> }
<ide>
<add> public function testPut()
<add> {
<add> $data = new Collection(['name' => 'taylor', 'email' => 'foo']);
<add> $data = $data->put('name', 'dayle');
<add> $this->assertEquals(['name' => 'dayle', 'email' => 'foo'], $data->all());
<add> }
<add>
<add> public function testPutWithNoKey()
<add> {
<add> $data = new Collection(['taylor', 'shawn']);
<add> $data = $data->put(null, 'dayle');
<add> $this->assertEquals(['taylor', 'shawn', 'dayle'], $data->all());
<add> }
<add>
<ide> public function testRandom()
<ide> {
<ide> $data = new Collection([1, 2, 3, 4, 5, 6]);
<ide><path>tests/Support/SupportMessageBagTest.php
<ide> public function testMessagesAreAdded()
<ide> $this->assertEquals(['bust'], $messages['boom']);
<ide> }
<ide>
<add> public function testKeys()
<add> {
<add> $container = new MessageBag;
<add> $container->setFormat(':message');
<add> $container->add('foo', 'bar');
<add> $container->add('foo', 'baz');
<add> $container->add('boom', 'bust');
<add> $this->assertEquals(['foo', 'boom'], $container->keys());
<add> }
<add>
<ide> public function testMessagesMayBeMerged()
<ide> {
<ide> $container = new MessageBag(['username' => ['foo']]);
<ide> public function testHasIndicatesExistence()
<ide> $this->assertFalse($container->has('bar'));
<ide> }
<ide>
<add> public function testHasWithKeyNull()
<add> {
<add> $container = new MessageBag;
<add> $container->setFormat(':message');
<add> $container->add('foo', 'bar');
<add> $this->assertTrue($container->has(null));
<add> }
<add>
<ide> public function testHasAnyIndicatesExistence()
<ide> {
<ide> $container = new MessageBag;
<ide> public function testFormatIsRespected()
<ide> $this->assertEquals('foo bar', $container->first('foo'));
<ide> }
<ide>
<add> public function testUnique()
<add> {
<add> $container = new MessageBag;
<add> $container->setFormat(':message');
<add> $container->add('foo', 'bar');
<add> $container->add('foo2', 'bar');
<add> $container->add('boom', 'baz');
<add> $this->assertEquals([0 => 'bar', 2 => 'baz'], $container->unique());
<add> }
<add>
<ide> public function testMessageBagReturnsCorrectArray()
<ide> {
<ide> $container = new MessageBag;
<ide> public function testCountReturnsCorrectValue()
<ide> public function testCountable()
<ide> {
<ide> $container = new MessageBag;
<del>
<ide> $container->add('foo', 'bar');
<ide> $container->add('boom', 'baz');
<ide>
<ide> public function testFirstFindsMessageForWildcardKey()
<ide> $container = new MessageBag;
<ide> $container->setFormat(':message');
<ide> $container->add('foo.bar', 'baz');
<del> $messages = $container->getMessages();
<ide> $this->assertEquals('baz', $container->first('foo.*'));
<ide> }
<add>
<add> public function testIsEmptyTrue()
<add> {
<add> $container = new MessageBag;
<add> $this->assertTrue($container->isEmpty());
<add> }
<add>
<add> public function testIsEmptyFalse()
<add> {
<add> $container = new MessageBag;
<add> $container->add('foo.bar', 'baz');
<add> $this->assertFalse($container->isEmpty());
<add> }
<add>
<add> public function testIsNotEmptyTrue()
<add> {
<add> $container = new MessageBag;
<add> $container->add('foo.bar', 'baz');
<add> $this->assertTrue($container->isNotEmpty());
<add> }
<add>
<add> public function testIsNotEmptyFalse()
<add> {
<add> $container = new MessageBag;
<add> $this->assertFalse($container->isNotEmpty());
<add> }
<add>
<add> public function testToString()
<add> {
<add> $container = new MessageBag;
<add> $container->add('foo.bar', 'baz');
<add> $this->assertEquals('{"foo.bar":["baz"]}', (string) $container);
<add> }
<add>
<add> public function testGetFormat()
<add> {
<add> $container = new MessageBag;
<add> $container->setFormat(':message');
<add> $this->assertEquals(':message', $container->getFormat());
<add> }
<ide> }
<ide><path>tests/Support/SupportViewErrorBagTest.php
<add><?php
<add>
<add>namespace Illuminate\Tests\Support;
<add>
<add>use PHPUnit\Framework\TestCase;
<add>use Illuminate\Support\MessageBag;
<add>use Illuminate\Support\ViewErrorBag;
<add>
<add>class SupportViewErrorBagTest extends TestCase
<add>{
<add> public function testHasBagTrue()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag->put('default', new MessageBag(['msg1', 'msg2']));
<add> $this->assertTrue($viewErrorBag->hasBag());
<add> }
<add>
<add> public function testHasBagFalse()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $this->assertFalse($viewErrorBag->hasBag());
<add> }
<add>
<add> public function testGet()
<add> {
<add> $messageBag = new MessageBag();
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag = $viewErrorBag->put('default', $messageBag);
<add> $this->assertEquals($messageBag, $viewErrorBag->getBag('default'));
<add> }
<add>
<add> public function testGetBagWithNew()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $this->assertInstanceOf(MessageBag::class, $viewErrorBag->getBag('default'));
<add> }
<add>
<add> public function testGetBags()
<add> {
<add> $messageBag1 = new MessageBag();
<add> $messageBag2 = new MessageBag();
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag->put('default', $messageBag1);
<add> $viewErrorBag->put('default2', $messageBag2);
<add> $this->assertEquals([
<add> 'default' => $messageBag1,
<add> 'default2' => $messageBag2,
<add> ], $viewErrorBag->getBags());
<add> }
<add>
<add> public function testPut()
<add> {
<add> $messageBag = new MessageBag();
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag = $viewErrorBag->put('default', $messageBag);
<add> $this->assertEquals(['default' => $messageBag], $viewErrorBag->getBags());
<add> }
<add>
<add> public function testAnyTrue()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag->put('default', new MessageBag(['message']));
<add> $this->assertTrue($viewErrorBag->any());
<add> }
<add>
<add> public function testAnyFalse()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag->put('default', new MessageBag());
<add> $this->assertFalse($viewErrorBag->any());
<add> }
<add>
<add> public function testAnyFalseWithEmptyErrorBag()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $this->assertFalse($viewErrorBag->any());
<add> }
<add>
<add> public function testCount()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag->put('default', new MessageBag(['message', 'second']));
<add> $this->assertEquals(2, $viewErrorBag->count());
<add> }
<add>
<add> public function testCountWithNoMessagesInMessageBag()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag->put('default', new MessageBag());
<add> $this->assertEquals(0, $viewErrorBag->count());
<add> }
<add>
<add> public function testCountWithNoMessageBags()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $this->assertEquals(0, $viewErrorBag->count());
<add> }
<add>
<add> public function testDynamicCallToDefaultMessageBag()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag->put('default', new MessageBag(['message', 'second']));
<add> $this->assertEquals(['message', 'second'], $viewErrorBag->all());
<add> }
<add>
<add> public function testDynamicallyGetBag()
<add> {
<add> $messageBag = new MessageBag();
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag = $viewErrorBag->put('default', $messageBag);
<add> $this->assertEquals($messageBag, $viewErrorBag->default);
<add> }
<add>
<add> public function testDynamicallyPutBag()
<add> {
<add> $messageBag = new MessageBag();
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag->default2 = $messageBag;
<add> $this->assertEquals(['default2' => $messageBag], $viewErrorBag->getBags());
<add> }
<add>
<add> public function testToString()
<add> {
<add> $viewErrorBag = new ViewErrorBag();
<add> $viewErrorBag = $viewErrorBag->put('default', new MessageBag(['message' => 'content']));
<add> $this->assertEquals('{"message":["content"]}', (string) $viewErrorBag);
<add> }
<add>} | 3 |
PHP | PHP | add default address in mail config | ab453a7e82fcb348eae7bc453fddf321a4741db4 | <ide><path>config/mail.php
<ide> |
<ide> */
<ide>
<del> 'from' => ['address' => null, 'name' => null],
<add> 'from' => [
<add> 'address' => 'hello@example.com',
<add> 'name' => 'Example',
<add> ],
<ide>
<ide> /*
<ide> |-------------------------------------------------------------------------- | 1 |
Javascript | Javascript | remove unnecessary serialization | 9629caefb7e6423020d035b0b213a1d266917fc2 | <ide><path>src/workspace.js
<ide> module.exports = class Workspace extends Model {
<ide> left: this.paneContainers.left.serialize(),
<ide> right: this.paneContainers.right.serialize(),
<ide> bottom: this.paneContainers.bottom.serialize()
<del> },
<del> hasActiveTextEditor: this.hasActiveTextEditor
<add> }
<ide> }
<ide> }
<ide>
<ide> module.exports = class Workspace extends Model {
<ide> this.paneContainers.center.deserialize(state.paneContainer, deserializerManager)
<ide> }
<ide>
<del> if (state.hasActiveTextEditor != null) {
<del> this.hasActiveTextEditor = state.hasActiveTextEditor
<del> }
<add> this.hasActiveTextEditor = this.getActiveTextEditor() != null
<ide>
<ide> this.updateWindowTitle()
<ide> } | 1 |
Text | Text | add entry to url.parse() changes metadata | 04e45db1726e1d2c11ecbf8cbacc82c88672fc83 | <ide><path>doc/api/url.md
<ide> The formatting process operates as follows:
<ide> <!-- YAML
<ide> added: v0.1.25
<ide> changes:
<add> - version: v11.14.0
<add> pr-url: https://github.com/nodejs/node/pull/26941
<add> description: The `pathname` property on the returned URL object is now `/`
<add> when there is no path and the protocol scheme is `ws:` or
<add> `wss:`.
<ide> - version: v11.0.0
<ide> pr-url: https://github.com/nodejs/node/pull/22715
<ide> description: The Legacy URL API is deprecated. Use the WHATWG URL API. | 1 |
PHP | PHP | remove unnecessary temporary variable | 04b751f3f274d7ae454cb703482ca38911947f81 | <ide><path>src/Illuminate/Database/Eloquent/Concerns/HasEvents.php
<ide> public static function withoutEventDispatcher(callable $callback)
<ide> static::unsetEventDispatcher();
<ide>
<ide> try {
<del> $result = $callback();
<add> return $callback();
<ide> } finally {
<ide> if ($dispatcher) {
<ide> static::setEventDispatcher($dispatcher);
<ide> }
<ide> }
<del>
<del> return $result;
<ide> }
<ide> } | 1 |
Ruby | Ruby | clarify usage of object.acts_like? | 5e69b7e0001ad0da7392a8465515873c516d3ead | <ide><path>activesupport/lib/active_support/core_ext/object/acts_like.rb
<ide> # frozen_string_literal: true
<ide>
<ide> class Object
<del> # A duck-type assistant method. For example, Active Support extends Date
<del> # to define an <tt>acts_like_date?</tt> method, and extends Time to define
<del> # <tt>acts_like_time?</tt>. As a result, we can do <tt>x.acts_like?(:time)</tt> and
<del> # <tt>x.acts_like?(:date)</tt> to do duck-type-safe comparisons, since classes that
<del> # we want to act like Time simply need to define an <tt>acts_like_time?</tt> method.
<add> # Provides a way to check whether some class acts like some other class based on the existence of
<add> # an appropriately-named marker method.
<add> #
<add> # A class that provides the same interface as <tt>SomeClass</tt> may define a marker method named
<add> # <tt>acts_like_some_class?</tt> to signal its compatibility to callers of
<add> # <tt>acts_like?(:some_class)</tt>.
<add> #
<add> # For example, Active Support extends <tt>Date</tt> to define an <tt>acts_like_date?</tt> method,
<add> # and extends <tt>Time</tt> to define <tt>acts_like_time?</tt>. As a result, developers can call
<add> # <tt>x.acts_like?(:time)</tt> and <tt>x.acts_like?(:date)</tt> to test duck-type compatibility,
<add> # and classes that are able to act like <tt>Time</tt> can also define an <tt>acts_like_time?</tt>
<add> # method to interoperate.
<add> #
<add> # Note that the marker method is only expected to exist. It isn't called, so its body or return
<add> # value are irrelevant.
<add> #
<add> # ==== Example: A class that provides the same interface as <tt>String</tt>
<add> #
<add> # This class may define:
<add> #
<add> # class Stringish
<add> # def acts_like_string?
<add> # end
<add> # end
<add> #
<add> # Then client code can query for duck-type-safeness this way:
<add> #
<add> # Stringish.new.acts_like?(:string) # => true
<add> #
<ide> def acts_like?(duck)
<ide> case duck
<ide> when :time | 1 |
Text | Text | update ticks.callback documentation | a4cc21f9a919ab9f80fc133a309820b6f6c4f599 | <ide><path>docs/axes/labelling.md
<ide> Namespace: `options.scales[scaleId].title`, it defines options for the scale tit
<ide>
<ide> ## Creating Custom Tick Formats
<ide>
<del>It is also common to want to change the tick marks to include information about the data type. For example, adding a dollar sign ('$'). To do this, you need to override the `ticks.callback` method in the axis configuration.
<del>In the following example, every label of the Y-axis would be displayed with a dollar sign at the front.
<add>It is also common to want to change the tick marks to include information about the data type. For example, adding a dollar sign ('$').
<add>To do this, you need to override the `ticks.callback` method in the axis configuration.
<add>
<add>The method receiver 3 arguments:
<add>
<add>* `value` - the tick value in the **internal data format** of the associated scale.
<add>* `index` - the tick index in the ticks array.
<add>* `ticks` - the array containing all of the [tick objects](../api/interfaces/tick).
<add>
<add>The call to the method is scoped to the scale. `this` inside the method is the scale object.
<ide>
<ide> If the callback returns `null` or `undefined` the associated grid line will be hidden.
<ide>
<add>:::tip
<add>The [category axis](../axes/cartesian/category), which is the default x-axis for line and bar charts, uses the `index` as internal data format. For accessing the label, use `this.getLabelForValue(value)`. [API: getLabelForValue](../api/classes/scale.html#getlabelforvalue)
<add>:::
<add>
<add>In the following example, every label of the Y-axis would be displayed with a dollar sign at the front.
<add>
<ide> ```javascript
<ide> var chart = new Chart(ctx, {
<ide> type: 'line',
<ide> var chart = new Chart(ctx, {
<ide> });
<ide> ```
<ide>
<del>The third parameter passed to the callback function is an array of labels, but in the time scale, it is an array of `{label: string, major: boolean}` objects.
<add>Related samples:
<add>
<add>* [Tick configuration sample](../samples/scale-options/ticks) | 1 |
PHP | PHP | avoid fatal error by throwing exception | fe475a1a304857e6230b4a6bf96d437a601e81b9 | <ide><path>src/Utility/ObjectRegistry.php
<ide> public function load($objectName, $config = []) {
<ide> $objectName = $config['className'];
<ide> }
<ide> $className = $this->_resolveClassName($objectName);
<del> if (!$className) {
<add> if (!$className || (is_string($className) && !class_exists($className))) {
<ide> list($plugin, $objectName) = pluginSplit($objectName);
<ide> $this->_throwMissingClassError($objectName, $plugin);
<ide> }
<ide><path>tests/TestCase/Datasource/ConnectionManagerTest.php
<ide> public function testConfigVariants($settings) {
<ide> */
<ide> public function testConfigInvalidOptions() {
<ide> ConnectionManager::config('test_variant', [
<del> 'className' => 'HerpDerp'
<add> 'className' => 'Herp\Derp'
<ide> ]);
<ide> ConnectionManager::get('test_variant');
<ide> } | 2 |
PHP | PHP | add model option to command | 255b8fc30036c3fd05544d4d6bf9b57e3fd64592 | <ide><path>src/Illuminate/Foundation/Console/ObserverMakeCommand.php
<ide> namespace Illuminate\Foundation\Console;
<ide>
<ide> use Illuminate\Console\GeneratorCommand;
<add>use Illuminate\Support\Str;
<add>use Symfony\Component\Console\Input\InputOption;
<ide>
<ide> class ObserverMakeCommand extends GeneratorCommand
<ide> {
<ide> class ObserverMakeCommand extends GeneratorCommand
<ide> */
<ide> protected $type = 'Observer';
<ide>
<add> /**
<add> * Build the class with the given name.
<add> *
<add> * @param string $name
<add> * @return string
<add> */
<add> protected function buildClass($name)
<add> {
<add> $stub = parent::buildClass($name);
<add>
<add> $model = $this->option('model');
<add>
<add> return $model ? $this->replaceModel($stub, $model) : $stub;
<add> }
<add>
<ide> /**
<ide> * Get the stub file for the generator.
<ide> *
<ide> * @return string
<ide> */
<ide> protected function getStub()
<ide> {
<del> return __DIR__.'/stubs/observer.stub';
<add> return $this->option('model')
<add> ? __DIR__.'/stubs/observer.stub'
<add> : __DIR__.'/stubs/observer.plain.stub';
<add> }
<add>
<add> /**
<add> * Replace the model for the given stub.
<add> *
<add> * @param string $stub
<add> * @param string $model
<add> * @return string
<add> */
<add> protected function replaceModel($stub, $model)
<add> {
<add> $model = str_replace('/', '\\', $model);
<add>
<add> $namespaceModel = $this->laravel->getNamespace().$model;
<add>
<add> if (Str::startsWith($model, '\\')) {
<add> $stub = str_replace('NamespacedDummyModel', trim($model, '\\'), $stub);
<add> } else {
<add> $stub = str_replace('NamespacedDummyModel', $namespaceModel, $stub);
<add> }
<add>
<add> $stub = str_replace(
<add> "use {$namespaceModel};\nuse {$namespaceModel};", "use {$namespaceModel};", $stub
<add> );
<add>
<add> $model = class_basename(trim($model, '\\'));
<add>
<add> $dummyModel = $model;
<add>
<add> $stub = str_replace('DocDummyModel', Str::snake($dummyModel, ' '), $stub);
<add>
<add> $stub = str_replace('DummyModel', $model, $stub);
<add>
<add> return str_replace('dummyModel', Str::camel($dummyModel), $stub);
<ide> }
<ide>
<ide> /**
<ide> protected function getDefaultNamespace($rootNamespace)
<ide> {
<ide> return $rootNamespace.'\Observers';
<ide> }
<add>
<add> /**
<add> * Get the console command arguments.
<add> *
<add> * @return array
<add> */
<add> protected function getOptions()
<add> {
<add> return [
<add> ['model', 'm', InputOption::VALUE_OPTIONAL, 'The model that the observer applies to.'],
<add> ];
<add> }
<ide> } | 1 |
PHP | PHP | modify auth and secure methods with new exceptions | e1ca3348a8a947478cd4ff7e97721f7a6529558d | <ide><path>src/Controller/Component/SecurityComponent.php
<ide>
<ide> use Cake\Controller\Component;
<ide> use Cake\Controller\Controller;
<add>use Cake\Controller\Exception\AuthSecurityException;
<ide> use Cake\Controller\Exception\SecurityException;
<ide> use Cake\Event\Event;
<ide> use Cake\Network\Exception\BadRequestException;
<ide> public function startup(Event $event)
<ide> $controller = $event->subject();
<ide> $this->session = $this->request->session();
<ide> $this->_action = $this->request->params['action'];
<del> $this->_secureRequired($controller);
<del> $this->_authRequired($controller);
<add> try {
<add> $this->_secureRequired($controller);
<add> } catch (SecurityException $se) {
<add> $this->blackHole($controller, $se->getType(), $se);
<add> }
<add> try {
<add> $this->_authRequired($controller);
<add> } catch (AuthSecurityException $ase) {
<add> $this->blackHole($controller, $ase->getType(), $ase);
<add> }
<add>
<ide>
<ide> $hasData = !empty($this->request->data);
<ide> $isNotRequestAction = (
<ide> public function startup(Event $event)
<ide> if ($this->_config['validatePost']) {
<ide> try {
<ide> $this->_validatePost($controller);
<del> } catch (SecurityException $ex) {
<del> return $this->blackHole($controller, 'auth', $ex);
<add> } catch (SecurityException $se) {
<add> return $this->blackHole($controller, $se->getType(), $ex);
<ide> }
<ide> }
<ide> }
<ide> protected function _secureRequired(Controller $controller)
<ide>
<ide> if (in_array($this->_action, $requireSecure) || $requireSecure === ['*']) {
<ide> if (!$this->request->is('ssl')) {
<del> if (!$this->blackHole($controller, 'secure')) {
<del> return null;
<del> }
<add> throw new SecurityException(
<add> 'Request is not SSL and the action is required to be secure'
<add> );
<ide> }
<ide> }
<ide> }
<ide> protected function _authRequired(Controller $controller)
<ide>
<ide> if (in_array($this->request->params['action'], $requireAuth) || $requireAuth == ['*']) {
<ide> if (!isset($controller->request->data['_Token'])) {
<del> if (!$this->blackHole($controller, 'auth')) {
<del> return false;
<del> }
<add> throw new AuthSecurityException(sprintf('%s was not found in request data.', '_Token'));
<ide> }
<ide>
<ide> if ($this->session->check('_Token')) {
<ide> $tData = $this->session->read('_Token');
<ide>
<ide> if (!empty($tData['allowedControllers']) &&
<del> !in_array($this->request->params['controller'], $tData['allowedControllers']) ||
<del> !empty($tData['allowedActions']) &&
<add> !in_array($this->request->params['controller'], $tData['allowedControllers'])) {
<add> throw new AuthSecurityException(
<add> sprintf('Controller %s was not found in allowed controllers: %s.',
<add> $this->request->params['controller'],
<add> implode(', ', (array)$tData['allowedControllers'])
<add> )
<add> );
<add> }
<add> if (!empty($tData['allowedActions']) &&
<ide> !in_array($this->request->params['action'], $tData['allowedActions'])
<ide> ) {
<del> if (!$this->blackHole($controller, 'auth')) {
<del> return false;
<del> }
<add> throw new AuthSecurityException(
<add> sprintf('Controller %s was not found in allowed controllers: %s.',
<add> $this->request->params['action'],
<add> implode(', ', (array)$tData['allowedActions'])
<add> )
<add> );
<ide> }
<ide> } else {
<del> if (!$this->blackHole($controller, 'auth')) {
<del> return false;
<del> }
<add> throw new AuthSecurityException(sprintf('%s was not found in session.', '_Token'));
<ide> }
<ide> }
<ide> }
<ide> protected function _validatePost(Controller $controller)
<ide>
<ide> $message = '%s was not found in request data.';
<ide> if (!isset($check['_Token'])) {
<del> throw new SecurityException(sprintf($message, '_Token'));
<add> throw new AuthSecurityException(sprintf($message, '_Token'));
<ide> }
<ide> if (!isset($check['_Token']['fields'])) {
<del> throw new SecurityException(sprintf($message, '_Token.fields'));
<add> throw new AuthSecurityException(sprintf($message, '_Token.fields'));
<ide> }
<ide> if (!isset($check['_Token']['unlocked'])) {
<del> throw new SecurityException(sprintf($message, '_Token.unlocked'));
<add> throw new AuthSecurityException(sprintf($message, '_Token.unlocked'));
<ide> }
<ide>
<ide> $locked = '';
<ide> protected function _validatePost(Controller $controller)
<ide>
<ide> $check = Security::hash(implode('', $hashParts), 'sha1');
<ide> if ($token !== $check) {
<del> throw new SecurityException(sprintf(
<add> throw new AuthSecurityException(sprintf(
<ide> 'Security hash check was not valid for url: "%s", fields: "%s", secured fields: "%s", unlocked fields: "%s"',
<ide> $controller->request->here(),
<ide> implode(', ', (array)$fieldKeys),
<ide><path>src/Controller/Exception/SecurityException.php
<ide> */
<ide> class SecurityException extends BadRequestException
<ide> {
<add> /**
<add> * Security Exception type
<add> * @var string
<add> */
<add> protected $_type = 'secure';
<add>
<add> /**
<add> * Getter for type
<add> * @return string
<add> */
<add> public function getType() {
<add> return $this->_type;
<add> }
<ide> } | 2 |
PHP | PHP | avoid possible typeerror exception | 2c40d89dd501de2bae4cfce8b065c4f916cd8d7f | <ide><path>src/Cache/Engine/FileEngine.php
<ide> public function delete($key): bool
<ide> /** @psalm-suppress PossiblyNullReference */
<ide> $path = $this->_File->getRealPath();
<ide> $this->_File = null;
<add>
<add> if ($path === false) {
<add> return false;
<add> }
<ide>
<ide> // phpcs:disable
<ide> return @unlink($path); | 1 |
Javascript | Javascript | use fixtures.readkey in https-timeout-server | cfea677a5ee82db7c031adf0cdd828e56a5ccd0b | <ide><path>test/parallel/test-https-timeout-server.js
<ide> const common = require('../common');
<ide> if (!common.hasCrypto)
<ide> common.skip('missing crypto');
<ide>
<add>const fixtures = require('../common/fixtures');
<add>
<ide> const assert = require('assert');
<ide> const https = require('https');
<ide>
<ide> const net = require('net');
<del>const fs = require('fs');
<ide>
<ide> const options = {
<del> key: fs.readFileSync(`${common.fixturesDir}/keys/agent1-key.pem`),
<del> cert: fs.readFileSync(`${common.fixturesDir}/keys/agent1-cert.pem`),
<add> key: fixtures.readKey('agent1-key.pem'),
<add> cert: fixtures.readKey('agent1-cert.pem'),
<ide> handshakeTimeout: 50
<ide> };
<ide> | 1 |
Java | Java | add equals/hashcode to responseentity | a49851d5ebcb5d3c65e324f279f3a8c33f4db16f | <ide><path>spring-web/src/main/java/org/springframework/http/HttpEntity.java
<ide> /*
<del> * Copyright 2002-2011 the original author or authors.
<add> * Copyright 2002-2012 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> package org.springframework.http;
<ide>
<ide> import org.springframework.util.MultiValueMap;
<add>import org.springframework.util.ObjectUtils;
<ide>
<ide> /**
<ide> * Represents an HTTP request or response entity, consisting of headers and body.
<ide> public boolean hasBody() {
<ide> return (this.body != null);
<ide> }
<ide>
<add> @Override
<add> public boolean equals(Object other) {
<add> if (this == other) {
<add> return true;
<add> }
<add> if (!(other instanceof HttpEntity)) {
<add> return false;
<add> }
<add> HttpEntity<?> otherEntity = (HttpEntity<?>) other;
<add> return (ObjectUtils.nullSafeEquals(this.headers, otherEntity.headers) &&
<add> ObjectUtils.nullSafeEquals(this.body, otherEntity.body));
<add> }
<add>
<add> @Override
<add> public int hashCode() {
<add> return ObjectUtils.nullSafeHashCode(this.headers) * 29 + ObjectUtils.nullSafeHashCode(this.body);
<add> }
<add>
<ide> @Override
<ide> public String toString() {
<ide> StringBuilder builder = new StringBuilder("<");
<ide> public String toString() {
<ide> builder.append('>');
<ide> return builder.toString();
<ide> }
<add>
<ide> }
<ide><path>spring-web/src/main/java/org/springframework/http/ResponseEntity.java
<ide> /*
<del> * Copyright 2002-2011 the original author or authors.
<add> * Copyright 2002-2012 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> package org.springframework.http;
<ide>
<ide> import org.springframework.util.MultiValueMap;
<add>import org.springframework.util.ObjectUtils;
<ide>
<ide> /**
<ide> * Extension of {@link HttpEntity} that adds a {@link HttpStatus} status code.
<ide> public HttpStatus getStatusCode() {
<ide> return statusCode;
<ide> }
<ide>
<add> @Override
<add> public boolean equals(Object other) {
<add> if (this == other) {
<add> return true;
<add> }
<add> if (!(other instanceof ResponseEntity)) {
<add> return false;
<add> }
<add> ResponseEntity<?> otherEntity = (ResponseEntity<?>) other;
<add> return (ObjectUtils.nullSafeEquals(this.statusCode, otherEntity.statusCode) && super.equals(other));
<add> }
<add>
<add> @Override
<add> public int hashCode() {
<add> return super.hashCode() * 29 + ObjectUtils.nullSafeHashCode(this.statusCode);
<add> }
<add>
<ide> @Override
<ide> public String toString() {
<ide> StringBuilder builder = new StringBuilder("<");
<ide><path>spring-web/src/test/java/org/springframework/http/HttpEntityTests.java
<ide> /*
<del> * Copyright 2002-2010 the original author or authors.
<add> * Copyright 2002-2012 the original author or authors.
<ide> *
<ide> * Licensed under the Apache License, Version 2.0 (the "License");
<ide> * you may not use this file except in compliance with the License.
<ide> public void noHeaders() {
<ide> assertSame(body, entity.getBody());
<ide> assertTrue(entity.getHeaders().isEmpty());
<ide> }
<del>
<add>
<ide> @Test
<ide> public void httpHeaders() {
<ide> HttpHeaders headers = new HttpHeaders();
<ide> public void multiValueMap() {
<ide> assertEquals(MediaType.TEXT_PLAIN, entity.getHeaders().getContentType());
<ide> assertEquals("text/plain", entity.getHeaders().getFirst("Content-Type"));
<ide> }
<del>
<add>
<add> @Test
<add> public void testEquals() {
<add> MultiValueMap<String, String> map1 = new LinkedMultiValueMap<String, String>();
<add> map1.set("Content-Type", "text/plain");
<add>
<add> MultiValueMap<String, String> map2 = new LinkedMultiValueMap<String, String>();
<add> map2.set("Content-Type", "application/json");
<add>
<add> assertTrue(new HttpEntity<Object>().equals(new HttpEntity<Object>()));
<add> assertFalse(new HttpEntity<Object>(map1).equals(new HttpEntity<Object>()));
<add> assertFalse(new HttpEntity<Object>().equals(new HttpEntity<Object>(map2)));
<add>
<add> assertTrue(new HttpEntity<Object>(map1).equals(new HttpEntity<Object>(map1)));
<add> assertFalse(new HttpEntity<Object>(map1).equals(new HttpEntity<Object>(map2)));
<add>
<add> assertTrue(new HttpEntity<String>(null, null).equals(new HttpEntity<String>(null, null)));
<add> assertFalse(new HttpEntity<String>("foo", null).equals(new HttpEntity<String>(null, null)));
<add> assertFalse(new HttpEntity<String>(null, null).equals(new HttpEntity<String>("bar", null)));
<add>
<add> assertTrue(new HttpEntity<String>("foo", map1).equals(new HttpEntity<String>("foo", map1)));
<add> assertFalse(new HttpEntity<String>("foo", map1).equals(new HttpEntity<String>("bar", map1)));
<add> }
<add>
<ide> @Test
<ide> public void responseEntity() {
<ide> HttpHeaders headers = new HttpHeaders(); | 3 |
Go | Go | make testing helpers as such… | cb8db44395df70fa3044d2a9683d6d24438cfa74 | <ide><path>integration/internal/requirement/requirement.go
<ide> import (
<ide> // HasHubConnectivity checks to see if https://hub.docker.com is
<ide> // accessible from the present environment
<ide> func HasHubConnectivity(t *testing.T) bool {
<add> t.Helper()
<ide> // Set a timeout on the GET at 15s
<ide> var timeout = 15 * time.Second
<ide> var url = "https://hub.docker.com"
<ide><path>integration/internal/swarm/service.go
<ide> func ContainerPoll(config *poll.Settings) {
<ide>
<ide> // NewSwarm creates a swarm daemon for testing
<ide> func NewSwarm(t *testing.T, testEnv *environment.Execution, ops ...func(*daemon.Daemon)) *daemon.Daemon {
<add> t.Helper()
<ide> skip.IfCondition(t, testEnv.IsRemoteDaemon())
<ide> if testEnv.DaemonInfo.ExperimentalBuild {
<ide> ops = append(ops, daemon.WithExperimental)
<ide> type ServiceSpecOpt func(*swarmtypes.ServiceSpec)
<ide>
<ide> // CreateService creates a service on the passed in swarm daemon.
<ide> func CreateService(t *testing.T, d *daemon.Daemon, opts ...ServiceSpecOpt) string {
<add> t.Helper()
<ide> spec := defaultServiceSpec()
<ide> for _, o := range opts {
<ide> o(&spec)
<ide> func ServiceWithName(name string) ServiceSpecOpt {
<ide>
<ide> // GetRunningTasks gets the list of running tasks for a service
<ide> func GetRunningTasks(t *testing.T, d *daemon.Daemon, serviceID string) []swarmtypes.Task {
<add> t.Helper()
<ide> client := d.NewClientT(t)
<ide> defer client.Close()
<ide>
<ide> func GetRunningTasks(t *testing.T, d *daemon.Daemon, serviceID string) []swarmty
<ide>
<ide> // ExecTask runs the passed in exec config on the given task
<ide> func ExecTask(t *testing.T, d *daemon.Daemon, task swarmtypes.Task, config types.ExecConfig) types.HijackedResponse {
<add> t.Helper()
<ide> client := d.NewClientT(t)
<ide> defer client.Close()
<ide>
<ide><path>internal/test/daemon/config.go
<ide> import (
<ide>
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/swarm"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> )
<ide>
<ide> type ConfigConstructor func(*swarm.Config)
<ide>
<ide> // CreateConfig creates a config given the specified spec
<ide> func (d *Daemon) CreateConfig(t assert.TestingT, configSpec swarm.ConfigSpec) string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) CreateConfig(t assert.TestingT, configSpec swarm.ConfigSpec) st
<ide>
<ide> // ListConfigs returns the list of the current swarm configs
<ide> func (d *Daemon) ListConfigs(t assert.TestingT) []swarm.Config {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) ListConfigs(t assert.TestingT) []swarm.Config {
<ide>
<ide> // GetConfig returns a swarm config identified by the specified id
<ide> func (d *Daemon) GetConfig(t assert.TestingT, id string) *swarm.Config {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) GetConfig(t assert.TestingT, id string) *swarm.Config {
<ide>
<ide> // DeleteConfig removes the swarm config identified by the specified id
<ide> func (d *Daemon) DeleteConfig(t assert.TestingT, id string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) DeleteConfig(t assert.TestingT, id string) {
<ide> // UpdateConfig updates the swarm config identified by the specified id
<ide> // Currently, only label update is supported.
<ide> func (d *Daemon) UpdateConfig(t assert.TestingT, id string, f ...ConfigConstructor) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide><path>internal/test/daemon/container.go
<ide> import (
<ide> "context"
<ide>
<ide> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> )
<ide>
<ide> // ActiveContainers returns the list of ids of the currently running containers
<ide> func (d *Daemon) ActiveContainers(t assert.TestingT) []string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) ActiveContainers(t assert.TestingT) []string {
<ide>
<ide> // FindContainerIP returns the ip of the specified container
<ide> func (d *Daemon) FindContainerIP(t assert.TestingT, id string) string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide><path>internal/test/daemon/daemon.go
<ide> import (
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/events"
<ide> "github.com/docker/docker/client"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/docker/docker/internal/test/request"
<ide> "github.com/docker/docker/opts"
<ide> "github.com/docker/docker/pkg/ioutils"
<ide> type Daemon struct {
<ide> // This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST.
<ide> // The daemon will not automatically start.
<ide> func New(t testingT, ops ...func(*Daemon)) *Daemon {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
<ide> if dest == "" {
<ide> dest = os.Getenv("DEST")
<ide> func (d *Daemon) NewClient() (*client.Client, error) {
<ide> // NewClientT creates new client based on daemon's socket path
<ide> // FIXME(vdemeester): replace NewClient with NewClientT
<ide> func (d *Daemon) NewClientT(t assert.TestingT) *client.Client {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> c, err := client.NewClientWithOpts(
<ide> client.FromEnv,
<ide> client.WithHost(d.Sock()))
<ide> func (d *Daemon) NewClientT(t assert.TestingT) *client.Client {
<ide>
<ide> // Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files
<ide> func (d *Daemon) Cleanup(t testingT) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> // Cleanup swarmkit wal files if present
<ide> cleanupRaftDir(t, d.Root)
<ide> cleanupNetworkNamespace(t, d.execRoot)
<ide> }
<ide>
<ide> // Start starts the daemon and return once it is ready to receive requests.
<ide> func (d *Daemon) Start(t testingT, args ...string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> if err := d.StartWithError(args...); err != nil {
<ide> t.Fatalf("Error starting daemon with arguments: %v", args)
<ide> }
<ide> func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
<ide> // StartWithBusybox will first start the daemon with Daemon.Start()
<ide> // then save the busybox image from the main daemon and load it into this Daemon instance.
<ide> func (d *Daemon) StartWithBusybox(t testingT, arg ...string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> d.Start(t, arg...)
<ide> d.LoadBusybox(t)
<ide> }
<ide> func (d *Daemon) DumpStackAndQuit() {
<ide> // instantiate a new one with NewDaemon.
<ide> // If an error occurs while starting the daemon, the test will fail.
<ide> func (d *Daemon) Stop(t testingT) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> err := d.StopWithError()
<ide> if err != nil {
<ide> if err != errDaemonNotStarted {
<ide> out2:
<ide> // Restart will restart the daemon by first stopping it and the starting it.
<ide> // If an error occurs while starting the daemon, the test will fail.
<ide> func (d *Daemon) Restart(t testingT, args ...string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> d.Stop(t)
<ide> d.Start(t, args...)
<ide> }
<ide> func (d *Daemon) ReloadConfig() error {
<ide>
<ide> // LoadBusybox image into the daemon
<ide> func (d *Daemon) LoadBusybox(t assert.TestingT) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> clientHost, err := client.NewEnvClient()
<ide> assert.NilError(t, err, "failed to create client")
<ide> defer clientHost.Close()
<ide> func (d *Daemon) queryRootDir() (string, error) {
<ide>
<ide> // Info returns the info struct for this daemon
<ide> func (d *Daemon) Info(t assert.TestingT) types.Info {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> apiclient, err := d.NewClient()
<ide> assert.NilError(t, err)
<ide> info, err := apiclient.Info(context.Background())
<ide> func (d *Daemon) Info(t assert.TestingT) types.Info {
<ide> }
<ide>
<ide> func cleanupRaftDir(t testingT, rootPath string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> walDir := filepath.Join(rootPath, "swarm/raft/wal")
<ide> if err := os.RemoveAll(walDir); err != nil {
<ide> t.Logf("error removing %v: %v", walDir, err)
<ide><path>internal/test/daemon/daemon_unix.go
<ide> import (
<ide> "os"
<ide> "path/filepath"
<ide>
<add> "github.com/docker/docker/internal/test"
<ide> "golang.org/x/sys/unix"
<ide> )
<ide>
<ide> func cleanupNetworkNamespace(t testingT, execRoot string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> // Cleanup network namespaces in the exec root of this
<ide> // daemon because this exec root is specific to this
<ide> // daemon instance and has no chance of getting
<ide><path>internal/test/daemon/node.go
<ide> import (
<ide>
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/swarm"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> )
<ide>
<ide> type NodeConstructor func(*swarm.Node)
<ide>
<ide> // GetNode returns a swarm node identified by the specified id
<ide> func (d *Daemon) GetNode(t assert.TestingT, id string) *swarm.Node {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) GetNode(t assert.TestingT, id string) *swarm.Node {
<ide>
<ide> // RemoveNode removes the specified node
<ide> func (d *Daemon) RemoveNode(t assert.TestingT, id string, force bool) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) RemoveNode(t assert.TestingT, id string, force bool) {
<ide>
<ide> // UpdateNode updates a swarm node with the specified node constructor
<ide> func (d *Daemon) UpdateNode(t assert.TestingT, id string, f ...NodeConstructor) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) UpdateNode(t assert.TestingT, id string, f ...NodeConstructor)
<ide>
<ide> // ListNodes returns the list of the current swarm nodes
<ide> func (d *Daemon) ListNodes(t assert.TestingT) []swarm.Node {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide><path>internal/test/daemon/secret.go
<ide> import (
<ide>
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/swarm"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> )
<ide>
<ide> type SecretConstructor func(*swarm.Secret)
<ide>
<ide> // CreateSecret creates a secret given the specified spec
<ide> func (d *Daemon) CreateSecret(t assert.TestingT, secretSpec swarm.SecretSpec) string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) CreateSecret(t assert.TestingT, secretSpec swarm.SecretSpec) st
<ide>
<ide> // ListSecrets returns the list of the current swarm secrets
<ide> func (d *Daemon) ListSecrets(t assert.TestingT) []swarm.Secret {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) ListSecrets(t assert.TestingT) []swarm.Secret {
<ide>
<ide> // GetSecret returns a swarm secret identified by the specified id
<ide> func (d *Daemon) GetSecret(t assert.TestingT, id string) *swarm.Secret {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) GetSecret(t assert.TestingT, id string) *swarm.Secret {
<ide>
<ide> // DeleteSecret removes the swarm secret identified by the specified id
<ide> func (d *Daemon) DeleteSecret(t assert.TestingT, id string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) DeleteSecret(t assert.TestingT, id string) {
<ide> // UpdateSecret updates the swarm secret identified by the specified id
<ide> // Currently, only label update is supported.
<ide> func (d *Daemon) UpdateSecret(t assert.TestingT, id string, f ...SecretConstructor) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide><path>internal/test/daemon/service.go
<ide> import (
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/filters"
<ide> "github.com/docker/docker/api/types/swarm"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> )
<ide>
<ide> // ServiceConstructor defines a swarm service constructor function
<ide> type ServiceConstructor func(*swarm.Service)
<ide>
<ide> func (d *Daemon) createServiceWithOptions(t assert.TestingT, opts types.ServiceCreateOptions, f ...ServiceConstructor) string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> var service swarm.Service
<ide> for _, fn := range f {
<ide> fn(&service)
<ide> func (d *Daemon) createServiceWithOptions(t assert.TestingT, opts types.ServiceC
<ide>
<ide> // CreateService creates a swarm service given the specified service constructor
<ide> func (d *Daemon) CreateService(t assert.TestingT, f ...ServiceConstructor) string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> return d.createServiceWithOptions(t, types.ServiceCreateOptions{}, f...)
<ide> }
<ide>
<ide> // GetService returns the swarm service corresponding to the specified id
<ide> func (d *Daemon) GetService(t assert.TestingT, id string) *swarm.Service {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) GetService(t assert.TestingT, id string) *swarm.Service {
<ide>
<ide> // GetServiceTasks returns the swarm tasks for the specified service
<ide> func (d *Daemon) GetServiceTasks(t assert.TestingT, service string) []swarm.Task {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) GetServiceTasks(t assert.TestingT, service string) []swarm.Task
<ide>
<ide> // UpdateService updates a swarm service with the specified service constructor
<ide> func (d *Daemon) UpdateService(t assert.TestingT, service *swarm.Service, f ...ServiceConstructor) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) UpdateService(t assert.TestingT, service *swarm.Service, f ...S
<ide>
<ide> // RemoveService removes the specified service
<ide> func (d *Daemon) RemoveService(t assert.TestingT, id string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) RemoveService(t assert.TestingT, id string) {
<ide>
<ide> // ListServices returns the list of the current swarm services
<ide> func (d *Daemon) ListServices(t assert.TestingT) []swarm.Service {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) ListServices(t assert.TestingT) []swarm.Service {
<ide>
<ide> // GetTask returns the swarm task identified by the specified id
<ide> func (d *Daemon) GetTask(t assert.TestingT, id string) swarm.Task {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide><path>internal/test/daemon/swarm.go
<ide> import (
<ide> "fmt"
<ide>
<ide> "github.com/docker/docker/api/types/swarm"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> "github.com/pkg/errors"
<ide> )
<ide> const (
<ide>
<ide> // StartAndSwarmInit starts the daemon (with busybox) and init the swarm
<ide> func (d *Daemon) StartAndSwarmInit(t testingT) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> // avoid networking conflicts
<ide> args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"}
<ide> d.StartWithBusybox(t, args...)
<ide> func (d *Daemon) StartAndSwarmInit(t testingT) {
<ide>
<ide> // StartAndSwarmJoin starts the daemon (with busybox) and join the specified swarm as worker or manager
<ide> func (d *Daemon) StartAndSwarmJoin(t testingT, leader *Daemon, manager bool) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> // avoid networking conflicts
<ide> args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"}
<ide> d.StartWithBusybox(t, args...)
<ide> func (d *Daemon) NodeID() string {
<ide>
<ide> // SwarmInit initializes a new swarm cluster.
<ide> func (d *Daemon) SwarmInit(t assert.TestingT, req swarm.InitRequest) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> if req.ListenAddr == "" {
<ide> req.ListenAddr = fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort)
<ide> }
<ide> func (d *Daemon) SwarmInit(t assert.TestingT, req swarm.InitRequest) {
<ide>
<ide> // SwarmJoin joins a daemon to an existing cluster.
<ide> func (d *Daemon) SwarmJoin(t assert.TestingT, req swarm.JoinRequest) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> if req.ListenAddr == "" {
<ide> req.ListenAddr = fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort)
<ide> }
<ide> func (d *Daemon) SwarmLeave(force bool) error {
<ide>
<ide> // SwarmInfo returns the swarm information of the daemon
<ide> func (d *Daemon) SwarmInfo(t assert.TestingT) swarm.Info {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> info, err := cli.Info(context.Background())
<ide> assert.NilError(t, err, "get swarm info")
<ide> func (d *Daemon) SwarmUnlock(req swarm.UnlockRequest) error {
<ide>
<ide> // GetSwarm returns the current swarm object
<ide> func (d *Daemon) GetSwarm(t assert.TestingT) swarm.Swarm {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) GetSwarm(t assert.TestingT) swarm.Swarm {
<ide>
<ide> // UpdateSwarm updates the current swarm object with the specified spec constructors
<ide> func (d *Daemon) UpdateSwarm(t assert.TestingT, f ...SpecConstructor) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) UpdateSwarm(t assert.TestingT, f ...SpecConstructor) {
<ide>
<ide> // RotateTokens update the swarm to rotate tokens
<ide> func (d *Daemon) RotateTokens(t assert.TestingT) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide> func (d *Daemon) RotateTokens(t assert.TestingT) {
<ide>
<ide> // JoinTokens returns the current swarm join tokens
<ide> func (d *Daemon) JoinTokens(t assert.TestingT) swarm.JoinTokens {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> cli := d.NewClientT(t)
<ide> defer cli.Close()
<ide>
<ide><path>internal/test/environment/clean.go
<ide> import (
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/filters"
<ide> "github.com/docker/docker/client"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> "golang.org/x/net/context"
<ide> )
<ide> type logT interface {
<ide> // and removing everything else. It's meant to run after any tests so that they don't
<ide> // depend on each others.
<ide> func (e *Execution) Clean(t testingT) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> client := e.APIClient()
<ide>
<ide> platform := e.OSType
<ide> func (e *Execution) Clean(t testingT) {
<ide> }
<ide>
<ide> func unpauseAllContainers(t assert.TestingT, client client.ContainerAPIClient) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> ctx := context.Background()
<ide> containers := getPausedContainers(ctx, t, client)
<ide> if len(containers) > 0 {
<ide> func unpauseAllContainers(t assert.TestingT, client client.ContainerAPIClient) {
<ide> }
<ide>
<ide> func getPausedContainers(ctx context.Context, t assert.TestingT, client client.ContainerAPIClient) []types.Container {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> filter := filters.NewArgs()
<ide> filter.Add("status", "paused")
<ide> containers, err := client.ContainerList(ctx, types.ContainerListOptions{
<ide> func getPausedContainers(ctx context.Context, t assert.TestingT, client client.C
<ide> var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`)
<ide>
<ide> func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient, protectedContainers map[string]struct{}) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> ctx := context.Background()
<ide> containers := getAllContainers(ctx, t, apiclient)
<ide> if len(containers) == 0 {
<ide> func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient,
<ide> }
<ide>
<ide> func getAllContainers(ctx context.Context, t assert.TestingT, client client.ContainerAPIClient) []types.Container {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> containers, err := client.ContainerList(ctx, types.ContainerListOptions{
<ide> Quiet: true,
<ide> All: true,
<ide> func getAllContainers(ctx context.Context, t assert.TestingT, client client.Cont
<ide> }
<ide>
<ide> func deleteAllImages(t testingT, apiclient client.ImageAPIClient, protectedImages map[string]struct{}) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{})
<ide> assert.Check(t, err, "failed to list images")
<ide>
<ide> func deleteAllImages(t testingT, apiclient client.ImageAPIClient, protectedImage
<ide> }
<ide>
<ide> func removeImage(ctx context.Context, t assert.TestingT, apiclient client.ImageAPIClient, ref string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> _, err := apiclient.ImageRemove(ctx, ref, types.ImageRemoveOptions{
<ide> Force: true,
<ide> })
<ide> func removeImage(ctx context.Context, t assert.TestingT, apiclient client.ImageA
<ide> }
<ide>
<ide> func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient, protectedVolumes map[string]struct{}) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> volumes, err := c.VolumeList(context.Background(), filters.Args{})
<ide> assert.Check(t, err, "failed to list volumes")
<ide>
<ide> func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient, protectedVolu
<ide> }
<ide>
<ide> func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatform string, protectedNetworks map[string]struct{}) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{})
<ide> assert.Check(t, err, "failed to list networks")
<ide>
<ide> func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatf
<ide> }
<ide>
<ide> func deleteAllPlugins(t assert.TestingT, c client.PluginAPIClient, protectedPlugins map[string]struct{}) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> plugins, err := c.PluginList(context.Background(), filters.Args{})
<ide> // Docker EE does not allow cluster-wide plugin management.
<ide> if client.IsErrNotImplemented(err) {
<ide><path>internal/test/environment/protect.go
<ide> import (
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/filters"
<ide> dclient "github.com/docker/docker/client"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> )
<ide>
<ide> func newProtectedElements() protectedElements {
<ide> // volumes, and, on Linux, plugins) from being cleaned up at the end of test
<ide> // runs
<ide> func ProtectAll(t testingT, testEnv *Execution) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> ProtectContainers(t, testEnv)
<ide> ProtectImages(t, testEnv)
<ide> ProtectNetworks(t, testEnv)
<ide> func ProtectAll(t testingT, testEnv *Execution) {
<ide> // ProtectContainer adds the specified container(s) to be protected in case of
<ide> // clean
<ide> func (e *Execution) ProtectContainer(t testingT, containers ...string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> for _, container := range containers {
<ide> e.protectedElements.containers[container] = struct{}{}
<ide> }
<ide> func (e *Execution) ProtectContainer(t testingT, containers ...string) {
<ide> // ProtectContainers protects existing containers from being cleaned up at the
<ide> // end of test runs
<ide> func ProtectContainers(t testingT, testEnv *Execution) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> containers := getExistingContainers(t, testEnv)
<ide> testEnv.ProtectContainer(t, containers...)
<ide> }
<ide>
<ide> func getExistingContainers(t assert.TestingT, testEnv *Execution) []string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> client := testEnv.APIClient()
<ide> containerList, err := client.ContainerList(context.Background(), types.ContainerListOptions{
<ide> All: true,
<ide> func getExistingContainers(t assert.TestingT, testEnv *Execution) []string {
<ide>
<ide> // ProtectImage adds the specified image(s) to be protected in case of clean
<ide> func (e *Execution) ProtectImage(t testingT, images ...string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> for _, image := range images {
<ide> e.protectedElements.images[image] = struct{}{}
<ide> }
<ide> func (e *Execution) ProtectImage(t testingT, images ...string) {
<ide> // ProtectImages protects existing images and on linux frozen images from being
<ide> // cleaned up at the end of test runs
<ide> func ProtectImages(t testingT, testEnv *Execution) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> images := getExistingImages(t, testEnv)
<ide>
<ide> if testEnv.OSType == "linux" {
<ide> func ProtectImages(t testingT, testEnv *Execution) {
<ide> }
<ide>
<ide> func getExistingImages(t assert.TestingT, testEnv *Execution) []string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> client := testEnv.APIClient()
<ide> filter := filters.NewArgs()
<ide> filter.Add("dangling", "false")
<ide> func tagsFromImageSummary(image types.ImageSummary) []string {
<ide> // ProtectNetwork adds the specified network(s) to be protected in case of
<ide> // clean
<ide> func (e *Execution) ProtectNetwork(t testingT, networks ...string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> for _, network := range networks {
<ide> e.protectedElements.networks[network] = struct{}{}
<ide> }
<ide> func (e *Execution) ProtectNetwork(t testingT, networks ...string) {
<ide> // ProtectNetworks protects existing networks from being cleaned up at the end
<ide> // of test runs
<ide> func ProtectNetworks(t testingT, testEnv *Execution) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> networks := getExistingNetworks(t, testEnv)
<ide> testEnv.ProtectNetwork(t, networks...)
<ide> }
<ide>
<ide> func getExistingNetworks(t assert.TestingT, testEnv *Execution) []string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> client := testEnv.APIClient()
<ide> networkList, err := client.NetworkList(context.Background(), types.NetworkListOptions{})
<ide> assert.NilError(t, err, "failed to list networks")
<ide> func getExistingNetworks(t assert.TestingT, testEnv *Execution) []string {
<ide>
<ide> // ProtectPlugin adds the specified plugin(s) to be protected in case of clean
<ide> func (e *Execution) ProtectPlugin(t testingT, plugins ...string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> for _, plugin := range plugins {
<ide> e.protectedElements.plugins[plugin] = struct{}{}
<ide> }
<ide> func (e *Execution) ProtectPlugin(t testingT, plugins ...string) {
<ide> // ProtectPlugins protects existing plugins from being cleaned up at the end of
<ide> // test runs
<ide> func ProtectPlugins(t testingT, testEnv *Execution) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> plugins := getExistingPlugins(t, testEnv)
<ide> testEnv.ProtectPlugin(t, plugins...)
<ide> }
<ide>
<ide> func getExistingPlugins(t assert.TestingT, testEnv *Execution) []string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> client := testEnv.APIClient()
<ide> pluginList, err := client.PluginList(context.Background(), filters.Args{})
<ide> // Docker EE does not allow cluster-wide plugin management.
<ide> func getExistingPlugins(t assert.TestingT, testEnv *Execution) []string {
<ide>
<ide> // ProtectVolume adds the specified volume(s) to be protected in case of clean
<ide> func (e *Execution) ProtectVolume(t testingT, volumes ...string) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> for _, volume := range volumes {
<ide> e.protectedElements.volumes[volume] = struct{}{}
<ide> }
<ide> func (e *Execution) ProtectVolume(t testingT, volumes ...string) {
<ide> // ProtectVolumes protects existing volumes from being cleaned up at the end of
<ide> // test runs
<ide> func ProtectVolumes(t testingT, testEnv *Execution) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> volumes := getExistingVolumes(t, testEnv)
<ide> testEnv.ProtectVolume(t, volumes...)
<ide> }
<ide>
<ide> func getExistingVolumes(t assert.TestingT, testEnv *Execution) []string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> client := testEnv.APIClient()
<ide> volumeList, err := client.VolumeList(context.Background(), filters.Args{})
<ide> assert.NilError(t, err, "failed to list volumes")
<ide><path>internal/test/fakecontext/context.go
<ide> import (
<ide> "os"
<ide> "path/filepath"
<ide>
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/docker/docker/pkg/archive"
<ide> )
<ide>
<ide> type testingT interface {
<ide>
<ide> // New creates a fake build context
<ide> func New(t testingT, dir string, modifiers ...func(*Fake) error) *Fake {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> fakeContext := &Fake{Dir: dir}
<ide> if dir == "" {
<ide> if err := newDir(fakeContext); err != nil {
<ide> func (f *Fake) Close() error {
<ide>
<ide> // AsTarReader returns a ReadCloser with the contents of Dir as a tar archive.
<ide> func (f *Fake) AsTarReader(t testingT) io.ReadCloser {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> reader, err := archive.TarWithOptions(f.Dir, &archive.TarOptions{})
<ide> if err != nil {
<ide> t.Fatalf("Failed to create tar from %s: %s", f.Dir, err)
<ide><path>internal/test/fakegit/fakegit.go
<ide> import (
<ide> "os/exec"
<ide> "path/filepath"
<ide>
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/docker/docker/internal/test/fakecontext"
<ide> "github.com/docker/docker/internal/test/fakestorage"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> func (g *FakeGit) Close() {
<ide>
<ide> // New create a fake git server that can be used for git related tests
<ide> func New(c testingT, name string, files map[string]string, enforceLocalServer bool) *FakeGit {
<add> if ht, ok := c.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> ctx := fakecontext.New(c, "", fakecontext.WithFiles(files))
<ide> defer ctx.Close()
<ide> curdir, err := os.Getwd()
<ide><path>internal/test/fakestorage/fixtures.go
<ide> import (
<ide> "sync"
<ide>
<ide> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/docker/docker/pkg/archive"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> )
<ide>
<ide> var ensureHTTPServerOnce sync.Once
<ide>
<ide> func ensureHTTPServerImage(t testingT) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> var doIt bool
<ide> ensureHTTPServerOnce.Do(func() {
<ide> doIt = true
<ide><path>internal/test/fakestorage/storage.go
<ide> import (
<ide> "github.com/docker/docker/api/types"
<ide> containertypes "github.com/docker/docker/api/types/container"
<ide> "github.com/docker/docker/client"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/docker/docker/internal/test/environment"
<ide> "github.com/docker/docker/internal/test/fakecontext"
<ide> "github.com/docker/docker/internal/test/request"
<ide> func SetTestEnvironment(env *environment.Execution) {
<ide>
<ide> // New returns a static file server that will be use as build context.
<ide> func New(t testingT, dir string, modifiers ...func(*fakecontext.Fake) error) Fake {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> if testEnv == nil {
<ide> t.Fatal("fakstorage package requires SetTestEnvironment() to be called before use.")
<ide> }
<ide><path>internal/test/helper.go
<add>package test
<add>
<add>// HelperT is a subset of testing.T that implements the Helper function
<add>type HelperT interface {
<add> Helper()
<add>}
<ide><path>internal/test/registry/registry.go
<ide> import (
<ide> "path/filepath"
<ide> "time"
<ide>
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/gotestyourself/gotestyourself/assert"
<ide> "github.com/opencontainers/go-digest"
<ide> )
<ide> type Config struct {
<ide>
<ide> // NewV2 creates a v2 registry server
<ide> func NewV2(t testingT, ops ...func(*Config)) *V2 {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> c := &Config{
<ide> registryURL: DefaultURL,
<ide> }
<ide> http:
<ide>
<ide> // WaitReady waits for the registry to be ready to serve requests (or fail after a while)
<ide> func (r *V2) WaitReady(t testingT) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> var err error
<ide> for i := 0; i != 50; i++ {
<ide> if err = r.Ping(); err == nil {
<ide> func (r *V2) getBlobFilename(blobDigest digest.Digest) string {
<ide>
<ide> // ReadBlobContents read the file corresponding to the specified digest
<ide> func (r *V2) ReadBlobContents(t assert.TestingT, blobDigest digest.Digest) []byte {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> // Load the target manifest blob.
<ide> manifestBlob, err := ioutil.ReadFile(r.getBlobFilename(blobDigest))
<ide> assert.NilError(t, err, "unable to read blob")
<ide> func (r *V2) ReadBlobContents(t assert.TestingT, blobDigest digest.Digest) []byt
<ide>
<ide> // WriteBlobContents write the file corresponding to the specified digest with the given content
<ide> func (r *V2) WriteBlobContents(t assert.TestingT, blobDigest digest.Digest, data []byte) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> err := ioutil.WriteFile(r.getBlobFilename(blobDigest), data, os.FileMode(0644))
<ide> assert.NilError(t, err, "unable to write malicious data blob")
<ide> }
<ide>
<ide> // TempMoveBlobData moves the existing data file aside, so that we can replace it with a
<ide> // malicious blob of data for example.
<ide> func (r *V2) TempMoveBlobData(t testingT, blobDigest digest.Digest) (undo func()) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> tempFile, err := ioutil.TempFile("", "registry-temp-blob-")
<ide> assert.NilError(t, err, "unable to get temporary blob file")
<ide> tempFile.Close()
<ide><path>internal/test/registry/registry_mock.go
<ide> import (
<ide> "regexp"
<ide> "strings"
<ide> "sync"
<add>
<add> "github.com/docker/docker/internal/test"
<ide> )
<ide>
<ide> type handlerFunc func(w http.ResponseWriter, r *http.Request)
<ide> func (tr *Mock) RegisterHandler(path string, h handlerFunc) {
<ide>
<ide> // NewMock creates a registry mock
<ide> func NewMock(t testingT) (*Mock, error) {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> testReg := &Mock{handlers: make(map[string]handlerFunc)}
<ide>
<ide> ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
<ide><path>internal/test/request/request.go
<ide> import (
<ide> "time"
<ide>
<ide> "github.com/docker/docker/client"
<add> "github.com/docker/docker/internal/test"
<ide> "github.com/docker/docker/internal/test/environment"
<ide> "github.com/docker/docker/opts"
<ide> "github.com/docker/docker/pkg/ioutils"
<ide> import (
<ide>
<ide> // NewAPIClient returns a docker API client configured from environment variables
<ide> func NewAPIClient(t assert.TestingT, ops ...func(*client.Client) error) client.APIClient {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> ops = append([]func(*client.Client) error{client.FromEnv}, ops...)
<ide> clt, err := client.NewClientWithOpts(ops...)
<ide> assert.NilError(t, err)
<ide> func NewAPIClient(t assert.TestingT, ops ...func(*client.Client) error) client.A
<ide>
<ide> // DaemonTime provides the current time on the daemon host
<ide> func DaemonTime(ctx context.Context, t assert.TestingT, client client.APIClient, testEnv *environment.Execution) time.Time {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> if testEnv.IsLocalDaemon() {
<ide> return time.Now()
<ide> }
<ide> func DaemonTime(ctx context.Context, t assert.TestingT, client client.APIClient,
<ide> // DaemonUnixTime returns the current time on the daemon host with nanoseconds precision.
<ide> // It return the time formatted how the client sends timestamps to the server.
<ide> func DaemonUnixTime(ctx context.Context, t assert.TestingT, client client.APIClient, testEnv *environment.Execution) string {
<add> if ht, ok := t.(test.HelperT); ok {
<add> ht.Helper()
<add> }
<ide> dt := DaemonTime(ctx, t, client, testEnv)
<ide> return fmt.Sprintf("%d.%09d", dt.Unix(), int64(dt.Nanosecond()))
<ide> } | 20 |
Python | Python | update structured array docs to reflect | c43e0e5c0f2e8dc52cbc1eed71bf93aa281df3d7 | <ide><path>numpy/doc/structured_arrays.py
<ide> Introduction
<ide> ============
<ide>
<del>NumPy provides powerful capabilities to create arrays of structured datatype.
<del>These arrays permit one to manipulate the data by named fields. A simple
<del>example will show what is meant.: ::
<add>Structured arrays are ndarrays whose datatype is a composition of simpler
<add>datatypes organized as a sequence of named :term:`fields <field>`. For example,
<add>::
<ide>
<del> >>> x = np.array([(1,2.,'Hello'), (2,3.,"World")],
<del> ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
<add> >>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
<add> ... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
<ide> >>> x
<del> array([(1, 2.0, 'Hello'), (2, 3.0, 'World')],
<del> dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
<add> array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
<add> dtype=[('name', 'S10'), ('age', '<i4'), ('weight', '<f4')])
<ide>
<del>Here we have created a one-dimensional array of length 2. Each element of
<del>this array is a structure that contains three items, a 32-bit integer, a 32-bit
<del>float, and a string of length 10 or less. If we index this array at the second
<del>position we get the second structure: ::
<add>Here ``x`` is a one-dimensional array of length two whose datatype is a
<add>structure with three fields: 1. A string of length 10 or less named 'name', 2.
<add>a 32-bit integer named 'age', and 3. a 32-bit float named 'weight'.
<add>
<add>If you index ``x`` at position 1 you get a structure::
<ide>
<ide> >>> x[1]
<del> (2,3.,"World")
<add> ('Fido', 3, 27.0)
<ide>
<del>Conveniently, one can access any field of the array by indexing using the
<del>string that names that field. ::
<add>You can access and modify individual fields of a structured array by indexing
<add>with the field name::
<ide>
<del> >>> y = x['bar']
<del> >>> y
<del> array([ 2., 3.], dtype=float32)
<del> >>> y[:] = 2*y
<del> >>> y
<del> array([ 4., 6.], dtype=float32)
<add> >>> x['age']
<add> array([9, 3], dtype=int32)
<add> >>> x['age'] = 5
<ide> >>> x
<del> array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
<del> dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
<add> array([('Rex', 5, 81.0), ('Fido', 5, 27.0)],
<add> dtype=[('name', 'S10'), ('age', '<i4'), ('weight', '<f4')])
<add>
<add>Structured arrays are designed for low-level manipulation of structured data,
<add>for example, for interpreting binary blobs. Structured datatypes are
<add>designed to mimic 'structs' in the C language, making them also useful for
<add>interfacing with C code. For these purposes, numpy supports specialized
<add>features such as subarrays and nested datatypes, and allows manual control over
<add>the memory layout of the structure.
<add>
<add>For simple manipulation of tabular data other pydata projects, such as pandas,
<add>xarray, or DataArray, provide higher-level interfaces that may be more
<add>suitable. These projects may also give better performance for tabular data
<add>analysis because the C-struct-like memory layout of structured arrays can lead
<add>to poor cache behavior.
<add>
<add>.. _defining-structured-types:
<add>
<add>Structured Datatypes
<add>====================
<add>
<add>To use structured arrays one first needs to define a structured datatype.
<add>
<add>A structured datatype can be thought of as a sequence of bytes of a certain
<add>length (the structure's :term:`itemsize`) which is interpreted as a collection
<add>of fields. Each field has a name, a datatype, and a byte offset within the
<add>structure. The datatype of a field may be any numpy datatype including other
<add>structured datatypes, and it may also be a :term:`sub-array` which behaves like
<add>an ndarray of a specified shape. The offsets of the fields are arbitrary, and
<add>fields may even overlap. These offsets are usually determined automatically by
<add>numpy, but can also be manually specified.
<add>
<add>Structured Datatype Creation
<add>----------------------------
<add>
<add>Structured datatypes may be created using the function :func:`numpy.dtype`.
<add>There are 4 alternative forms of specification which vary in flexibility and
<add>conciseness. These are further documented in the
<add>:ref:`Data Type Objects <arrays.dtypes.constructing>` reference page, and in
<add>summary they are:
<add>
<add>1. A list of tuples, one tuple per field
<add>
<add> Each tuple has the form ``(fieldname, datatype, shape)`` where shape is
<add> optional. ``fieldname`` is a string (or tuple if titles are used, see
<add> :ref:`Field Titles <titles>` below), ``datatype`` may be any object
<add> convertible to a datatype, and shape (optional) is a tuple of integers
<add> specifying subarray shape.
<add>
<add> >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2,2))])
<add> dtype=[('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
<add>
<add> If ``fieldname`` is the empty string ``''``, the field will be given a
<add> default name of the form ``f#``, where ``#`` is the integer index of the
<add> field, counting from 0 from the left::
<add>
<add> >>> np.dtype([('x', 'f4'),('', 'i4'),('z', 'i8')])
<add> dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')])
<add>
<add> The byte offsets of the fields within the structure and the total
<add> structure itemsize are determined automatically.
<add>
<add>2. A string of comma-separated dtype specifications
<add>
<add> In this shorthand notation any of the :ref:`string dtype specifications
<add> <arrays.dtypes.constructing>` may be used in a string and separated by
<add> commas. The itemsize and byte offsets of the fields are determined
<add> automatically, and the field names are given the default names ``f0``,
<add> ``f1``, etc. ::
<add>
<add> >>> np.dtype('i8,f4,S3')
<add> dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')])
<add> >>> np.dtype('3int8, float32, (2,3)float64')
<add> dtype([('f0', 'i1', 3), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
<add>
<add>3. A dictionary of field parameter arrays
<add>
<add> This is the most flexible form of specification since it allows control
<add> over the byte-offsets of the fields and the itemsize of the structure.
<add>
<add> The dictionary has two required keys, 'names' and 'formats', and four
<add> optional keys, 'offsets', 'itemsize', 'aligned' and 'titles'. The values
<add> for 'names' and 'formats' should respectively be a list of field names and
<add> a list of dtype specifications, of the same length. The optional 'offsets'
<add> value should be a list of integer byte-offsets, one for each field within
<add> the structure. If 'offsets' is not given the offsets are determined
<add> automatically. The optional 'itemsize' value should be an integer
<add> describing the total size in bytes of the dtype, which must be large
<add> enough to contain all the fields.
<add> ::
<add>
<add> >>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4','f4']})
<add> dtype([('col1', '<i4'), ('col2', '<f4')])
<add> >>> np.dtype({'names': ['col1', 'col2'],
<add> ... 'formats': ['i4','f4'],
<add> ... 'offsets': [0, 4],
<add> ... 'itemsize': 12})
<add> dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
<add>
<add> Offsets may be chosen such that the fields overlap, though this will mean
<add> that assigning to one field may clobber any overlapping field's data. As
<add> an exception, fields of :class:`numpy.object` type .. (see
<add> :ref:`object arrays <arrays.object>`) cannot overlap with other fields,
<add> because of the risk of clobbering the internal object pointer and then
<add> dereferencing it.
<add>
<add> The optional 'aligned' value can be set to ``True`` to make the automatic
<add> offset computation use aligned offsets (see :ref:`offsets-and-alignment`),
<add> as if the 'align' keyword argument of :func:`numpy.dtype` had been set to
<add> True.
<add>
<add> The optional 'titles' value should be a list of titles of the same length
<add> as 'names', see :ref:`Field Titles <titles>` below.
<add>
<add>4. A dictionary of field names
<add>
<add> The use of this form of specification is discouraged, but documented here
<add> because older numpy code may use it. The keys of the dictionary are the
<add> field names and the values are tuples specifying type and offset::
<add>
<add> >>> np.dtype=({'col1': ('i1',0), 'col2': ('f4',1)})
<add> dtype([(('col1'), 'i1'), (('col2'), '>f4')])
<add>
<add> This form is discouraged because Python dictionaries do not preserve order
<add> in Python versions before Python 3.6, and the order of the fields in a
<add> structured dtype has meaning. :ref:`Field Titles <titles>` may be
<add> specified by using a 3-tuple, see below.
<add>
<add>Manipulating and Displaying Structured Datatypes
<add>------------------------------------------------
<add>
<add>The list of field names of a structured datatype can be found in the ``names``
<add>attribute of the dtype object::
<add>
<add> >>> d = np.dtype([('x', 'i8'), ('y', 'f4')])
<add> >>> d.names
<add> ('x', 'y')
<add>
<add>The field names may be modified by assigning to the ``names`` attribute using a
<add>sequence of strings of the same length.
<add>
<add>The dtype object also has a dictionary-like attribute, ``fields``, whose keys
<add>are the field names (and :ref:`Field Titles <titles>`, see below) and whose
<add>values are tuples containing the dtype and byte offset of each field. ::
<add>
<add> >>> d.fields
<add> mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
<add>
<add>Both the ``names`` and ``fields`` attributes will equal ``None`` for
<add>unstructured arrays.
<add>
<add>The string representation of a structured datatype is shown in the "list of
<add>tuples" form if possible, otherwise numpy falls back to using the more general
<add>dictionary form.
<add>
<add>.. _offsets-and-alignment:
<add>
<add>Automatic Byte Offsets and Alignment
<add>------------------------------------
<add>
<add>Numpy uses one of two methods to automatically determine the field byte offsets
<add>and the overall itemsize of a structured datatype, depending on whether
<add>``align=True`` was specified as a keyword argument to :func:`numpy.dtype`.
<add>
<add>By default (with ``align=False``), numpy will pack the fields together tightly
<add>such that each field starts at the byte offset the previous field ended, and the
<add>fields are contiguous in memory. ::
<add>
<add> >>> def print_offsets(d):
<add> ... print("offsets:", [d.fields[name][1] for name in d.names])
<add> ... print("itemsize:", d.itemsize)
<add> >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2'))
<add> offsets: [0, 1, 2, 6, 7, 15]
<add> itemsize: 17
<add>
<add>If ``align=True`` is set, numpy will pad the structure in the same way many C
<add>compilers would pad a C-struct. Aligned structures can give a performance
<add>improvement in some cases, at the cost of increased datatype size. Padding
<add>bytes are inserted between fields such that each field's byte offset will be a
<add>multiple of that field's alignment (usually equal to the field's size in bytes
<add>for simple datatypes, see :c:member:`PyArray_Descr.alignment`).
<add>The structure will also have trailing padding added so that its itemsize is a
<add>multiple of the largest field's alignment. ::
<add>
<add> >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2', align=True))
<add> offsets: [0, 1, 4, 8, 16, 24]
<add> itemsize: 32
<add>
<add>Note that although almost all modern C compilers pad in this way by default,
<add>padding in C structs is C-implementation-dependent so this memory layout is not
<add>guaranteed to exactly match that of a corresponding struct in a C program. Some
<add>massaging may be needed either on the numpy side or the C side to obtain exact
<add>correspondence.
<add>
<add>If offsets were specified manually using the optional ``offsets`` key in the
<add>dictionary-based dtype specification, setting ``align=True`` will check that
<add>each field's offset is a multiple of its size and that the itemsize is a
<add>multiple of the largest field size, and raise an exception if not.
<ide>
<del>In these examples, y is a simple float array consisting of the 2nd field
<del>in the structured type. But, rather than being a copy of the data in the structured
<del>array, it is a view, i.e., it shares exactly the same memory locations.
<del>Thus, when we updated this array by doubling its values, the structured
<del>array shows the corresponding values as doubled as well. Likewise, if one
<del>changes the structured array, the field view also changes: ::
<add>If the offsets of the fields and itemsize of a structured array satisfy the
<add>alignment conditions, the array will have the ``ALIGNED`` :ref:`flag
<add><numpy.ndarray.flags>` set.
<ide>
<del> >>> x[1] = (-1,-1.,"Master")
<del> >>> x
<del> array([(1, 4.0, 'Hello'), (-1, -1.0, 'Master')],
<del> dtype=[('foo', '>i4'), ('bar', '>f4'), ('baz', '|S10')])
<del> >>> y
<del> array([ 4., -1.], dtype=float32)
<del>
<del>Defining Structured Arrays
<del>==========================
<del>
<del>One defines a structured array through the dtype object. There are
<del>**several** alternative ways to define the fields of a record. Some of
<del>these variants provide backward compatibility with Numeric, numarray, or
<del>another module, and should not be used except for such purposes. These
<del>will be so noted. One specifies record structure in
<del>one of four alternative ways, using an argument (as supplied to a dtype
<del>function keyword or a dtype object constructor itself). This
<del>argument must be one of the following: 1) string, 2) tuple, 3) list, or
<del>4) dictionary. Each of these is briefly described below.
<del>
<del>1) String argument.
<del>In this case, the constructor expects a comma-separated list of type
<del>specifiers, optionally with extra shape information. The fields are
<del>given the default names 'f0', 'f1', 'f2' and so on.
<del>The type specifiers can take 4 different forms: ::
<del>
<del> a) b1, i1, i2, i4, i8, u1, u2, u4, u8, f2, f4, f8, c8, c16, a<n>
<del> (representing bytes, ints, unsigned ints, floats, complex and
<del> fixed length strings of specified byte lengths)
<del> b) int8,...,uint8,...,float16, float32, float64, complex64, complex128
<del> (this time with bit sizes)
<del> c) older Numeric/numarray type specifications (e.g. Float32).
<del> Don't use these in new code!
<del> d) Single character type specifiers (e.g H for unsigned short ints).
<del> Avoid using these unless you must. Details can be found in the
<del> NumPy book
<del>
<del>These different styles can be mixed within the same string (but why would you
<del>want to do that?). Furthermore, each type specifier can be prefixed
<del>with a repetition number, or a shape. In these cases an array
<del>element is created, i.e., an array within a record. That array
<del>is still referred to as a single field. An example: ::
<del>
<del> >>> x = np.zeros(3, dtype='3int8, float32, (2,3)float64')
<del> >>> x
<del> array([([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
<del> ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
<del> ([0, 0, 0], 0.0, [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])],
<del> dtype=[('f0', '|i1', 3), ('f1', '>f4'), ('f2', '>f8', (2, 3))])
<del>
<del>By using strings to define the record structure, it precludes being
<del>able to name the fields in the original definition. The names can
<del>be changed as shown later, however.
<del>
<del>2) Tuple argument: The only relevant tuple case that applies to record
<del>structures is when a structure is mapped to an existing data type. This
<del>is done by pairing in a tuple, the existing data type with a matching
<del>dtype definition (using any of the variants being described here). As
<del>an example (using a definition using a list, so see 3) for further
<del>details): ::
<del>
<del> >>> x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
<del> >>> x
<del> array([0, 0, 0])
<del> >>> x['r']
<del> array([0, 0, 0], dtype=uint8)
<add>.. _titles:
<ide>
<del>In this case, an array is produced that looks and acts like a simple int32 array,
<del>but also has definitions for fields that use only one byte of the int32 (a bit
<del>like Fortran equivalencing).
<add>Field Titles
<add>------------
<ide>
<del>3) List argument: In this case the record structure is defined with a list of
<del>tuples. Each tuple has 2 or 3 elements specifying: 1) The name of the field
<del>('' is permitted), 2) the type of the field, and 3) the shape (optional).
<del>For example::
<add>In addition to field names, fields may also have an associated :term:`title`,
<add>an alternate name, which is sometimes used as an additional description or
<add>mnemonic for the field. The title may be used to index an array, just like a
<add>fieldname.
<ide>
<del> >>> x = np.zeros(3, dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
<del> >>> x
<del> array([(0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
<del> (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]]),
<del> (0.0, 0.0, [[0.0, 0.0], [0.0, 0.0]])],
<del> dtype=[('x', '>f4'), ('y', '>f4'), ('value', '>f4', (2, 2))])
<del>
<del>4) Dictionary argument: two different forms are permitted. The first consists
<del>of a dictionary with two required keys ('names' and 'formats'), each having an
<del>equal sized list of values. The format list contains any type/shape specifier
<del>allowed in other contexts. The names must be strings. There are two optional
<del>keys: 'offsets' and 'titles'. Each must be a correspondingly matching list to
<del>the required two where offsets contain integer offsets for each field, and
<del>titles are objects containing metadata for each field (these do not have
<del>to be strings), where the value of None is permitted. As an example: ::
<del>
<del> >>> x = np.zeros(3, dtype={'names':['col1', 'col2'], 'formats':['i4','f4']})
<del> >>> x
<del> array([(0, 0.0), (0, 0.0), (0, 0.0)],
<del> dtype=[('col1', '>i4'), ('col2', '>f4')])
<add>To add titles when using the list-of-tuples form of dtype specification, the
<add>fieldname may be be specified as a tuple of two strings (instead of a single
<add>string), which will be the field's title and field name respectively. For
<add>example::
<add>
<add> >>> np.dtype([(('my title', 'name'), 'f4')])
<add>
<add>When using the first form of dictionary-based specification, the titles may be
<add>supplied as an extra ``'titles'`` key as described above. When using the second
<add>(discouraged) dictionary-based specification, the title can be supplied by
<add>providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual
<add>2-element tuple::
<add>
<add> >>> np.dtype({'name': ('i4', 0, 'my title')})
<ide>
<del>The other dictionary form permitted is a dictionary of name keys with tuple
<del>values specifying type, offset, and an optional title. ::
<add>The ``dtype.fields`` dictionary will contain :term:`titles` as keys, if any
<add>titles are used. This means effectively that a field with a title will be
<add>represented twice in the fields dictionary. The tuple values for these fields
<add>will also have a third element, the field title.
<add>
<add>Because of this, and because the ``names`` attribute preserves the field order
<add>while the ``fields`` attribute may not, it is recommended to iterate through
<add>the fields of a dtype using the ``names`` attribute of the dtype (which will
<add>not list titles), as in::
<ide>
<del> >>> x = np.zeros(3, dtype={'col1':('i1',0,'title 1'), 'col2':('f4',1,'title 2')})
<add> >>> for name in d.names:
<add> ... print(d.fields[name][:2])
<add>
<add>Union types
<add>-----------
<add>
<add>Structured datatypes are implemented in numpy to have base type
<add>:class:`numpy.void` by default, but it is possible to interpret other numpy
<add>types as structured types using the ``(base_dtype, dtype)`` form of dtype
<add>specification described in
<add>:ref:`Data Type Objects <arrays.dtypes.constructing>`. Here, ``base_dtype`` is
<add>the desired underlying dtype, and fields and flags will be copied from
<add>``dtype``. This dtype is similar to a 'union' in C.
<add>
<add>Indexing and Assignment to Structured arrays
<add>=============================================
<add>
<add>Assigning data to a Structured Array
<add>------------------------------------
<add>
<add>There are a number of ways to assign values to a structured array: Using python
<add>tuples, using scalar values, or using other structured arrays.
<add>
<add>Assignment from Python Native Types (Tuples)
<add>```````````````````````````````````````````
<add>
<add>The simplest way to assign values to a structured array is using python
<add>tuples. Each assigned value should be a tuple (and not a list or array, as
<add>these will trigger numpy's broadcasting rules) of length equal to the number of
<add>fields in the array. The tuple's elements are assigned to the successive fields
<add>of the array, from left to right::
<add>
<add> >>> x = np.array([(1,2,3),(4,5,6)], dtype='i8,f4,f8')
<add> >>> x[1] = (7,8,9)
<ide> >>> x
<del> array([(0, 0.0), (0, 0.0), (0, 0.0)],
<del> dtype=[(('title 1', 'col1'), '|i1'), (('title 2', 'col2'), '>f4')])
<add> array([(1, 2., 3.), (7, 8., 9.)],
<add> dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '<f8')])
<ide>
<del>Accessing and modifying field names
<del>===================================
<add>Assignment from Scalars
<add>```````````````````````
<ide>
<del>The field names are an attribute of the dtype object defining the structure.
<del>For the last example: ::
<add>A scalar assigned to a structured element will be assigned to all fields. This
<add>happens when a scalar is assigned to a structured array, or when a scalar array
<add>is assigned to a structured array::
<ide>
<del> >>> x.dtype.names
<del> ('col1', 'col2')
<del> >>> x.dtype.names = ('x', 'y')
<add> >>> x = np.zeros(2, dtype='i8,f4,?,S1')
<add> >>> x[:] = 3
<ide> >>> x
<del> array([(0, 0.0), (0, 0.0), (0, 0.0)],
<del> dtype=[(('title 1', 'x'), '|i1'), (('title 2', 'y'), '>f4')])
<del> >>> x.dtype.names = ('x', 'y', 'z') # wrong number of names
<del> <type 'exceptions.ValueError'>: must replace all names at once with a sequence of length 2
<add> array([(3, 3.0, True, b'3'), (3, 3.0, True, b'3')],
<add> dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
<add> >>> x[:] = np.arange(2)
<add> >>> x
<add> array([(0, 0.0, False, b'0'), (1, 1.0, True, b'1')],
<add> dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
<add>
<add>Structured arrays can also be assigned to scalar arrays, but only if the
<add>structured datatype has just a single field::
<add>
<add> >>> x = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')])
<add> >>> y = np.zeros(2, dtype=[('A', 'i4')])
<add> >>> a = np.zeros(2, dtype='i4')
<add> >>> a[:] = x
<add> ValueError: Can't cast from structure to non-structure, except if the structure only has a single field.
<add> >>> a[:] = y
<add> >>> a
<add> array([0, 0], dtype=int32)
<add>
<add>Assignment from other Structured Arrays
<add>```````````````````````````````````````
<add>
<add>Assignment between two structured arrays occurs as if the source elements had
<add>been converted to tuples and then assigned to the destination elements. That
<add>is, the first field of the source array is assigned to the first field of the
<add>destination array, and the second field likewise, and so on, regardless of
<add>field names. Structured arrays with a different number of fields cannot be
<add>assigned to each other. Bytes of the destination structure which are not
<add>included in any of the fields are unaffected. ::
<add>
<add> >>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')])
<add> >>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')])
<add> >>> b[:] = a
<add> >>> b
<add> array([(0.0, b'0.0', b''), (0.0, b'0.0', b''), (0.0, b'0.0', b'')],
<add> dtype=[('x', '<f4'), ('y', 'S3'), ('z', 'O')])
<add>
<add>
<add>Assignment involving subarrays
<add>``````````````````````````````
<add>
<add>When assigning to fields which are subarrays, the assigned value will first be
<add>broadcast to the shape of the subarray.
<add>
<add>Indexing Structured Arrays
<add>--------------------------
<add>
<add>Accessing Individual Fields
<add>```````````````````````````
<add>
<add>Individual fields of a structured array may be accessed and modified by indexing
<add>the array with the field name. ::
<add>
<add> >>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
<add> >>> x['foo']
<add> array([1, 3])
<add> >>> x['foo'] = 10
<add> >>> x
<add> array([(10, 2.), (10, 4.)],
<add> dtype=[('foo', '<i8'), ('bar', '<f4')])
<add>
<add>The resulting array is a view into the original array. It shares the same
<add>memory locations and writing to the view will modify the original array. ::
<add>
<add> >>> y = x['bar']
<add> >>> y[:] = 10
<add> >>> x
<add> array([(10, 5.), (10, 5.)],
<add> dtype=[('foo', '<i8'), ('bar', '<f4')])
<add>
<add>This view has the same dtype and itemsize as the indexed field, so it is
<add>typically a non-structured array (except in the case of nested structures).
<ide>
<del>Accessing field titles
<del>====================================
<add> >>> y.dtype, y.shape, y.strides
<add> (dtype('float32'), (2,), (12,))
<ide>
<del>The field titles provide a standard place to put associated info for fields.
<del>They do not have to be strings. ::
<add>Accessing Multiple Fields
<add>```````````````````````````
<ide>
<del> >>> x.dtype.fields['x'][2]
<del> 'title 1'
<add>One can index a structured array with a multi-field index, where the index is a
<add>list of field names::
<ide>
<del>Accessing multiple fields at once
<del>====================================
<add> >>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'i4'), ('c', 'f8')])
<add> >>> a[['a', 'c']]
<add> array([(0, 0.0), (0, 0.0), (0, 0.0)],
<add> dtype={'names':['a','c'], 'formats':['<i8','<f8'], 'offsets':[0,11], 'itemsize':19})
<add> >>> a[['a', 'c']] = (2, 3)
<add> >>> a
<add> array([(2, 0, 3.0), (2, 0, 3.0), (2, 0, 3.0)],
<add> dtype=[('a', '<i8'), ('b', '<i4'), ('c', '<f8')])
<add>
<add>The resulting array is a view into the original array, such that assignment to
<add>the view modifies the original array. This view's fields will be in the order
<add>they were indexed. Note that unlike for single-field indexing, the view's dtype
<add>has the same itemsize as the original array and has fields at the same offsets
<add>as in the original array, and unindexed fields are merely missing.
<add>
<add>Since this view is a structured array itself, it obeys the assignment rules
<add>described above. For example, this means that one can swap the values of two
<add>fields using appropriate multi-field indexes::
<add>
<add> >>> a[['a', 'c']] = a[['c', 'a']]
<add>
<add>Indexing with an Integer to get a Structured Scalar
<add>```````````````````````````````````````````````````
<add>
<add>Indexing a single element of a structured array (with an integer index) returns
<add>a structured scalar::
<add>
<add> >>> x = np.array([(1, 2., 3.)], dtype='i,f,f')
<add> >>> scalar = x[0]
<add> >>> scalar
<add> (1, 2., 3.)
<add> >>> type(scalar)
<add> numpy.void
<add>
<add>Importantly, unlike other numpy scalars, structured scalars are mutable and act
<add>like views into the original array, such that modifying the scalar will modify
<add>the original array. Structured scalars also support access and assignment by
<add>field name::
<add>
<add> >>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
<add> >>> s = x[0]
<add> >>> s['bar'] = 100
<add> >>> x
<add> array([(1, 100.), (3, 4.)],
<add> dtype=[('foo', '<i8'), ('bar', '<f4')])
<ide>
<del>You can access multiple fields at once using a list of field names: ::
<add>Similarly to tuples, structured scalars can also be indexed with an integer::
<ide>
<del> >>> x = np.array([(1.5,2.5,(1.0,2.0)),(3.,4.,(4.,5.)),(1.,3.,(2.,6.))],
<del> dtype=[('x','f4'),('y',np.float32),('value','f4',(2,2))])
<add> >>> scalar = np.array([(1, 2., 3.)], dtype='i,f,f')[0]
<add> >>> scalar[0]
<add> 1
<add> >>> scalar[1] = 4
<ide>
<del>Notice that `x` is created with a list of tuples. ::
<add>Thus, tuples might be though of as the native Python equivalent to numpy's
<add>structured types, much like native python integers are the equivalent to
<add>numpy's integer types. Structured scalars may be converted to a tuple by
<add>calling :func:`ndarray.item`::
<ide>
<del> >>> x[['x','y']]
<del> array([(1.5, 2.5), (3.0, 4.0), (1.0, 3.0)],
<del> dtype=[('x', '<f4'), ('y', '<f4')])
<del> >>> x[['x','value']]
<del> array([(1.5, [[1.0, 2.0], [1.0, 2.0]]), (3.0, [[4.0, 5.0], [4.0, 5.0]]),
<del> (1.0, [[2.0, 6.0], [2.0, 6.0]])],
<del> dtype=[('x', '<f4'), ('value', '<f4', (2, 2))])
<add> >>> scalar.item(), type(scalar.item())
<add> ((1, 2.0, 3.0), tuple)
<ide>
<del>The fields are returned in the order they are asked for.::
<add>Viewing Structured Arrays Containing Objects
<add>--------------------------------------------
<ide>
<del> >>> x[['y','x']]
<del> array([(2.5, 1.5), (4.0, 3.0), (3.0, 1.0)],
<del> dtype=[('y', '<f4'), ('x', '<f4')])
<add>In order to prevent clobbering of object pointers in fields of
<add>:class:`numpy.object` type, numpy currently does not allow views of structured
<add>arrays containing objects.
<ide>
<del>Filling structured arrays
<del>=========================
<add>Structure Comparison
<add>--------------------
<ide>
<del>Structured arrays can be filled by field or row by row. ::
<add>If the dtypes of two structured arrays are equivalent, testing the equality of
<add>the arrays will result in a boolean array with the dimension of the original
<add>arrays, with elements set to True where all fields of the corresponding
<add>structures are equal. Structured dtypes are equivalent if the field names,
<add>dtypes and titles are the same, ignoring endianness, and the fields are in
<add>the same order::
<ide>
<del> >>> arr = np.zeros((5,), dtype=[('var1','f8'),('var2','f8')])
<del> >>> arr['var1'] = np.arange(5)
<add> >>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
<add> >>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')])
<add> >>> a == b
<add> array([False, False], dtype=bool)
<ide>
<del>If you fill it in row by row, it takes a take a tuple
<del>(but not a list or array!)::
<add>Currently, if the dtypes of two arrays are not equivalent all comparisons will
<add>return ``False``. This behavior is deprecated as of numpy 1.10 and may change
<add>in the future.
<ide>
<del> >>> arr[0] = (10,20)
<del> >>> arr
<del> array([(10.0, 20.0), (1.0, 0.0), (2.0, 0.0), (3.0, 0.0), (4.0, 0.0)],
<del> dtype=[('var1', '<f8'), ('var2', '<f8')])
<add>Currently, the ``<`` and ``>`` operators will always return ``False`` when
<add>comparing structured arrays. Many other pairwise operators are not supported.
<ide>
<ide> Record Arrays
<ide> =============
<ide>
<del>For convenience, numpy provides "record arrays" which allow one to access
<del>fields of structured arrays by attribute rather than by index. Record arrays
<del>are structured arrays wrapped using a subclass of ndarray,
<del>:class:`numpy.recarray`, which allows field access by attribute on the array
<del>object, and record arrays also use a special datatype, :class:`numpy.record`,
<del>which allows field access by attribute on the individual elements of the array.
<add>As an optional convenience numpy provides an ndarray subclass,
<add>:class:`numpy.recarray`, and associated helper functions in the
<add>:mod:`numpy.rec` submodule, which allows access to fields of structured arrays
<add>by attribute, instead of only by index. Record arrays also use a special
<add>datatype, :class:`numpy.record`, which allows field access by attribute on the
<add>structured scalars obtained from the array.
<ide>
<del>The simplest way to create a record array is with :func:`numpy.rec.array`: ::
<add>The simplest way to create a record array is with :func:`numpy.rec.array`::
<ide>
<del> >>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")],
<add> >>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")],
<ide> ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
<ide> >>> recordarr.bar
<ide> array([ 2., 3.], dtype=float32)
<ide> >>> recordarr[1:2]
<del> rec.array([(2, 3.0, 'World')],
<add> rec.array([(2, 3.0, 'World')],
<ide> dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
<ide> >>> recordarr[1:2].foo
<ide> array([2], dtype=int32)
<ide> >>> recordarr[1].baz
<ide> 'World'
<ide>
<del>numpy.rec.array can convert a wide variety of arguments into record arrays,
<del>including normal structured arrays: ::
<add>:func:`numpy.rec.array` can convert a wide variety of arguments into record
<add>arrays, including structured arrays::
<ide>
<del> >>> arr = array([(1,2.,'Hello'),(2,3.,"World")],
<add> >>> arr = array([(1,2.,'Hello'),(2,3.,"World")],
<ide> ... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
<ide> >>> recordarr = np.rec.array(arr)
<ide>
<del>The numpy.rec module provides a number of other convenience functions for
<add>The :mod:`numpy.rec` module provides a number of other convenience functions for
<ide> creating record arrays, see :ref:`record array creation routines
<ide> <routines.array-creation.rec>`.
<ide>
<ide> A record array representation of a structured array can be obtained using the
<del>appropriate :ref:`view`: ::
<add>appropriate :ref:`view`::
<ide>
<del> >>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")],
<add> >>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")],
<ide> ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
<del> >>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)),
<add> >>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)),
<ide> ... type=np.recarray)
<ide>
<del>For convenience, viewing an ndarray as type `np.recarray` will automatically
<del>convert to `np.record` datatype, so the dtype can be left out of the view: ::
<add>For convenience, viewing an ndarray as type :class:`np.recarray` will
<add>automatically convert to :class:`np.record` datatype, so the dtype can be left
<add>out of the view::
<ide>
<ide> >>> recordarr = arr.view(np.recarray)
<ide> >>> recordarr.dtype
<ide> dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
<ide>
<ide> To get back to a plain ndarray both the dtype and type must be reset. The
<ide> following view does so, taking into account the unusual case that the
<del>recordarr was not a structured type: ::
<add>recordarr was not a structured type::
<ide>
<ide> >>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
<ide>
<ide> Record array fields accessed by index or by attribute are returned as a record
<ide> array if the field has a structured type but as a plain ndarray otherwise. ::
<ide>
<del> >>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))],
<add> >>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))],
<ide> ... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
<ide> >>> type(recordarr.foo)
<ide> <type 'numpy.ndarray'>
<ide>
<ide> Note that if a field has the same name as an ndarray attribute, the ndarray
<ide> attribute takes precedence. Such fields will be inaccessible by attribute but
<del>may still be accessed by index.
<del>
<add>will still be accessible by index.
<ide>
<ide> """
<ide> from __future__ import division, absolute_import, print_function | 1 |
Python | Python | add support for new xlm-roberta model | 69f4f058fa5ecc6fea8c65ae59694442bba795e6 | <ide><path>transformers/modeling_xlm_roberta.py
<add># coding=utf-8
<add># Copyright 2019 Facebook AI Research and the HuggingFace Inc. team.
<add># Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>"""PyTorch XLM-RoBERTa model. """
<add>
<add>from __future__ import (absolute_import, division, print_function,
<add> unicode_literals)
<add>
<add>import logging
<add>
<add>from .modeling_roberta import RobertaModel, RobertaForMaskedLM, RobertaForSequenceClassification, RobertaForMultipleChoice, RobertaForTokenClassification
<add>from .configuration_xlm_roberta import XLMRobertaConfig
<add>from .file_utils import add_start_docstrings
<add>
<add>logger = logging.getLogger(__name__)
<add>
<add>XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
<add> 'xlm-roberta-large': "https://schweter.eu/cloud/transformers/xlm-roberta-large-pytorch_model.bin",
<add>}
<add>
<add>
<add>XLM_ROBERTA_START_DOCSTRING = r""" The XLM-RoBERTa model was proposed in
<add> `Unsupervised Cross-lingual Representation Learning at Scale`_
<add> by Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. It is based on Facebook's RoBERTa model released in 2019.
<add>
<add> It is a large multi-lingual language model, trained on 2.5TB of filtered CommonCrawl data.
<add>
<add> This implementation is the same as RoBERTa.
<add>
<add> This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
<add> refer to the PyTorch documentation for all matter related to general usage and behavior.
<add>
<add> .. _`Unsupervised Cross-lingual Representation Learning at Scale`:
<add> https://arxiv.org/abs/1911.02116
<add>
<add> .. _`torch.nn.Module`:
<add> https://pytorch.org/docs/stable/nn.html#module
<add>
<add> Parameters:
<add> config (:class:`~transformers.XLMRobertaConfig`): Model configuration class with all the parameters of the
<add> model. Initializing with a config file does not load the weights associated with the model, only the configuration.
<add> Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
<add>"""
<add>
<add>XLM_ROBERTA_INPUTS_DOCSTRING = r"""
<add> Inputs:
<add> **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
<add> Indices of input sequence tokens in the vocabulary.
<add> To match pre-training, XLM-RoBERTa input sequence should be formatted with <s> and </s> tokens as follows:
<add>
<add> (a) For sequence pairs:
<add>
<add> ``tokens: <s> Is this Jacksonville ? </s> </s> No it is not . </s>``
<add>
<add> (b) For single sequences:
<add>
<add> ``tokens: <s> the dog is hairy . </s>``
<add>
<add> Fully encoded sequences or sequence pairs can be obtained using the XLMRobertaTokenizer.encode function with
<add> the ``add_special_tokens`` parameter set to ``True``.
<add>
<add> XLM-RoBERTa is a model with absolute position embeddings so it's usually advised to pad the inputs on
<add> the right rather than the left.
<add>
<add> See :func:`transformers.PreTrainedTokenizer.encode` and
<add> :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
<add> **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
<add> Mask to avoid performing attention on padding token indices.
<add> Mask values selected in ``[0, 1]``:
<add> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
<add> **token_type_ids**: (`optional` need to be trained) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
<add> Optional segment token indices to indicate first and second portions of the inputs.
<add> This embedding matrice is not trained (not pretrained during XLM-RoBERTa pretraining), you will have to train it
<add> during finetuning.
<add> Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
<add> corresponds to a `sentence B` token
<add> (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
<add> **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
<add> Indices of positions of each input sequence tokens in the position embeddings.
<add> Selected in the range ``[0, config.max_position_embeddings - 1[``.
<add> **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
<add> Mask to nullify selected heads of the self-attention modules.
<add> Mask values selected in ``[0, 1]``:
<add> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
<add> **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
<add> Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
<add> This is useful if you want more control over how to convert `input_ids` indices into associated vectors
<add> than the model's internal embedding lookup matrix.
<add>"""
<add>
<add>@add_start_docstrings("The bare XLM-RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
<add> XLM_ROBERTA_START_DOCSTRING, XLM_ROBERTA_INPUTS_DOCSTRING)
<add>class XLMRobertaModel(RobertaModel):
<add> r"""
<add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
<add> **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
<add> Sequence of hidden-states at the output of the last layer of the model.
<add> **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
<add> Last layer hidden-state of the first token of the sequence (classification token)
<add> further processed by a Linear layer and a Tanh activation function. The Linear
<add> layer weights are trained from the next sentence prediction (classification)
<add> eo match pre-training, XLM-RoBERTa input sequence should be formatted with [CLS] and [SEP] tokens as follows:
<add>
<add> (a) For sequence pairs:
<add>
<add> ``tokens: [CLS] is this jack ##son ##ville ? [SEP] [SEP] no it is not . [SEP]``
<add>
<add> ``token_type_ids: 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
<add>
<add> (b) For single sequences:
<add>
<add> ``tokens: [CLS] the dog is hairy . [SEP]``
<add>
<add> ``token_type_ids: 0 0 0 0 0 0 0``
<add>
<add> objective during Bert pretraining. This output is usually *not* a good summary
<add> of the semantic content of the input, you're often better with averaging or pooling
<add> the sequence of hidden-states for the whole input sequence.
<add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
<add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
<add> of shape ``(batch_size, sequence_length, hidden_size)``:
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> **attentions**: (`optional`, returned when ``config.output_attentions=True``)
<add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
<add>
<add> Examples::
<add>
<add> tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large')
<add> model = XLMRobertaModel.from_pretrained('xlm-roberta-large')
<add> input_ids = torch.tensor(tokenizer.encode("Schloß Nymphenburg ist sehr schön .")).unsqueeze(0) # Batch size 1
<add> outputs = model(input_ids)
<add> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
<add>
<add> """
<add> config_class = XLMRobertaConfig
<add> pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
<add>
<add>
<add>@add_start_docstrings("""XLM-RoBERTa Model with a `language modeling` head on top. """,
<add> XLM_ROBERTA_START_DOCSTRING, XLM_ROBERTA_INPUTS_DOCSTRING)
<add>class XLMRobertaForMaskedLM(RobertaForMaskedLM):
<add> r"""
<add> **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
<add> Labels for computing the masked language modeling loss.
<add> Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
<add> Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
<add> in ``[0, ..., config.vocab_size]``
<add>
<add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
<add> **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
<add> Masked language modeling loss.
<add> **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
<add> Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
<add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
<add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
<add> of shape ``(batch_size, sequence_length, hidden_size)``:
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> **attentions**: (`optional`, returned when ``config.output_attentions=True``)
<add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
<add>
<add> Examples::
<add>
<add> tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large')
<add> model = XLMRobertaForMaskedLM.from_pretrained('xlm-roberta-large')
<add> input_ids = torch.tensor(tokenizer.encode("Schloß Nymphenburg ist sehr schön .")).unsqueeze(0) # Batch size 1
<add> outputs = model(input_ids, masked_lm_labels=input_ids)
<add> loss, prediction_scores = outputs[:2]
<add>
<add> """
<add> config_class = XLMRobertaConfig
<add> pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
<add>
<add>
<add>@add_start_docstrings("""XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
<add> on top of the pooled output) e.g. for GLUE tasks. """,
<add> XLM_ROBERTA_START_DOCSTRING, XLM_ROBERTA_INPUTS_DOCSTRING)
<add>class XLMRobertaForSequenceClassification(RobertaForSequenceClassification):
<add> r"""
<add> **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
<add> Labels for computing the sequence classification/regression loss.
<add> Indices should be in ``[0, ..., config.num_labels]``.
<add> If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
<add> If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
<add>
<add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
<add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
<add> Classification (or regression if config.num_labels==1) loss.
<add> **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
<add> Classification (or regression if config.num_labels==1) scores (before SoftMax).
<add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
<add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
<add> of shape ``(batch_size, sequence_length, hidden_size)``:
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> **attentions**: (`optional`, returned when ``config.output_attentions=True``)
<add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
<add>
<add> Examples::
<add>
<add> tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large')
<add> model = XLMRobertaForSequenceClassification.from_pretrained('xlm-roberta-large')
<add> input_ids = torch.tensor(tokenizer.encode("Schloß Nymphenburg ist sehr schön .")).unsqueeze(0) # Batch size 1
<add> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
<add> outputs = model(input_ids, labels=labels)
<add> loss, logits = outputs[:2]
<add>
<add> """
<add> config_class = XLMRobertaConfig
<add> pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
<add>
<add>
<add>@add_start_docstrings("""XLM-RoBERTa Model with a multiple choice classification head on top (a linear layer on top of
<add> the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
<add> XLM_ROBERTA_START_DOCSTRING, XLM_ROBERTA_INPUTS_DOCSTRING)
<add>class XLMRobertaForMultipleChoice(RobertaForMultipleChoice):
<add> r"""
<add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
<add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
<add> Classification loss.
<add> **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
<add> of the input tensors. (see `input_ids` above).
<add> Classification scores (before SoftMax).
<add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
<add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
<add> of shape ``(batch_size, sequence_length, hidden_size)``:
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> **attentions**: (`optional`, returned when ``config.output_attentions=True``)
<add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
<add>
<add> Examples::
<add>
<add> tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large')
<add> model = XLMRobertaForMultipleChoice.from_pretrained('xlm-roberta-large')
<add> choices = ["Schloß Nymphenburg ist sehr schön .", "Der Schloßkanal auch !"]
<add> input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
<add> labels = torch.tensor(1).unsqueeze(0) # Batch size 1
<add> outputs = model(input_ids, labels=labels)
<add> loss, classification_scores = outputs[:2]
<add>
<add> """
<add> config_class = XLMRobertaConfig
<add> pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
<add>
<add>
<add>@add_start_docstrings("""XLM-RoBERTa Model with a token classification head on top (a linear layer on top of
<add> the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
<add> XLM_ROBERTA_START_DOCSTRING, XLM_ROBERTA_INPUTS_DOCSTRING)
<add>class XLMRobertaForTokenClassification(RobertaForTokenClassification):
<add> r"""
<add> **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
<add> Labels for computing the token classification loss.
<add> Indices should be in ``[0, ..., config.num_labels - 1]``.
<add>
<add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
<add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
<add> Classification loss.
<add> **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
<add> Classification scores (before SoftMax).
<add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
<add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
<add> of shape ``(batch_size, sequence_length, hidden_size)``:
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> **attentions**: (`optional`, returned when ``config.output_attentions=True``)
<add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
<add>
<add> Examples::
<add>
<add> tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large')
<add> model = XLMRobertaForTokenClassification.from_pretrained('xlm-roberta-large')
<add> input_ids = torch.tensor(tokenizer.encode("Schloß Nymphenburg ist sehr schön .", add_special_tokens=True)).unsqueeze(0) # Batch size 1
<add> labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
<add> outputs = model(input_ids, labels=labels)
<add> loss, scores = outputs[:2]
<add>
<add> """
<add> config_class = XLMRobertaConfig
<add> pretrained_model_archive_map = XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP | 1 |
Ruby | Ruby | replace puts with ohai | fe71d891538e59df3227fd0b4a5de08dfa80c598 | <ide><path>Library/Homebrew/dev-cmd/bump.rb
<ide>
<ide> require "cli/parser"
<ide> require "utils/popen"
<add>require "utils/repology"
<ide>
<ide> module Homebrew
<ide> module_function
<ide> def bump
<ide> end
<ide>
<ide> def display(outdated_packages)
<del> ohai "Outdated Formulae"
<add> ohai "Outdated Formulae\n"
<ide>
<ide> outdated_packages.each do |formula, package_details|
<del> puts ""
<del> puts "Formula: #{formula}"
<del> puts "Current formula version: #{package_details["current_formula_version"]}"
<del> puts "Latest repology version: #{package_details["repology_latest_version"]}"
<del> puts "Latest livecheck version: #{package_details["livecheck_latest_version"]}"
<del> puts "Open pull requests: #{package_details["open_pull_requests"]}"
<add> ohai formula
<add> ohai "Current formula version: #{package_details["current_formula_version"]}"
<add> ohai "Latest repology version: #{package_details["repology_latest_version"]}"
<add> ohai "Latest livecheck version: #{package_details["livecheck_latest_version"]}"
<add> ohai "Open pull requests: #{package_details["open_pull_requests"]}"
<ide> end
<ide> end
<ide> end | 1 |
Text | Text | remove references to atom-docs | 8cca42a6b89574dbb7f187e59dfda84f7a2435cc | <ide><path>README.md
<ide>
<ide> 
<ide>
<del>Check out our [guides](https://atom-docs.githubapp.com/v26.0/index.html) and [API documentation](https://atom-docs.githubapp.com/v26.0/api/index.html).
<add>Check out our [guides](https://www.atom.io/docs/latest/) and [API documentation](https://www.atom.io/docs/api/v34.0.0/api/)
<ide>
<ide> ## Installing
<ide> | 1 |
Ruby | Ruby | compute ext in initialize, and use an attr_reader | 2dbb73bdda3b81947fd112486ac4285fb1a6e3a9 | <ide><path>actionpack/lib/action_dispatch/middleware/static.rb
<ide>
<ide> module ActionDispatch
<ide> class FileHandler
<add> attr_reader :ext
<add>
<ide> def initialize(at, root)
<ide> @at, @root = at.chomp('/'), root.chomp('/')
<ide> @compiled_at = (Regexp.compile(/^#{Regexp.escape(at)}/) unless @at.blank?)
<ide> @compiled_root = Regexp.compile(/^#{Regexp.escape(root)}/)
<ide> @file_server = ::Rack::File.new(@root)
<add>
<add> ext = ::ActionController::Base.page_cache_extension
<add> @ext = "{,#{ext},/index#{ext}}"
<ide> end
<ide>
<ide> def match?(path)
<ide> def match?(path)
<ide> def call(env)
<ide> @file_server.call(env)
<ide> end
<del>
<del> def ext
<del> @ext ||= begin
<del> ext = ::ActionController::Base.page_cache_extension
<del> "{,#{ext},/index#{ext}}"
<del> end
<del> end
<ide> end
<ide>
<ide> class Static | 1 |
Go | Go | replace uses of fmt.sprintf() | f586a473cf8dc9ac1edf893f70ccf37c2e217035 | <ide><path>pkg/namesgenerator/names-generator.go
<ide> package namesgenerator // import "github.com/docker/docker/pkg/namesgenerator"
<ide>
<ide> import (
<del> "fmt"
<ide> "math/rand"
<add> "strconv"
<ide> )
<ide>
<ide> var (
<ide> var (
<ide> // integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3`
<ide> func GetRandomName(retry int) string {
<ide> begin:
<del> name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand)
<add> name := left[rand.Intn(len(left))] + "_" + right[rand.Intn(len(right))] //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand)
<ide> if name == "boring_wozniak" /* Steve Wozniak is not boring */ {
<ide> goto begin
<ide> }
<ide>
<ide> if retry > 0 {
<del> name = fmt.Sprintf("%s%d", name, rand.Intn(10)) //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand)
<add> name += strconv.Itoa(rand.Intn(10)) //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand)
<ide> }
<ide> return name
<ide> }
<ide><path>pkg/namesgenerator/names-generator_test.go
<ide> func TestNameRetries(t *testing.T) {
<ide> }
<ide>
<ide> }
<add>
<add>func BenchmarkGetRandomName(b *testing.B) {
<add> b.ReportAllocs()
<add> var out string
<add> for n := 0; n < b.N; n++ {
<add> out = GetRandomName(5)
<add> }
<add> b.Log("Last result:", out)
<add>} | 2 |
Text | Text | add process for handling premature disclosure | 9cf23414d5883b1aabcfe5bddeb8a23396064f2e | <ide><path>doc/guides/collaborator-guide.md
<ide> * [Closing issues and pull requests](#closing-issues-and-pull-requests)
<ide> * [Author ready pull requests](#author-ready-pull-requests)
<ide> * [Handling own pull requests](#handling-own-pull-requests)
<add> * [Security issues](#managing-security-issues)
<ide> * [Accepting modifications](#accepting-modifications)
<ide> * [Code reviews](#code-reviews)
<ide> * [Consensus seeking](#consensus-seeking)
<ide> to land but is [author ready](#author-ready-pull-requests), add the
<ide> `author ready` label. If you wish to land the pull request yourself, use the
<ide> "assign yourself" link to self-assign it.
<ide>
<add>### Managing security issues
<add>
<add>Security issues should ideally be reported through the processes outlined in
<add>[SECURITY.md][security reporting]. This allows the collaborators to
<add>appropriately triage the report and address vulnerabilities in a planned
<add>security release. If an issue is opened in the public repo
<add>which describes a security issue, or if an issue is later identified to be
<add>describing a security issue, take the following steps:
<add>
<add>* Ask the originator to submit a report through Hacker one as outlined in
<add> [SECURITY.md][security reporting].
<add>* Move the issue to the private repo called
<add> [premature-disclosures](https://github.com/nodejs/premature-disclosures).
<add>* For any related pull requests create an associated issue in the
<add> `premature-disclosures` repo and add a copy of the patch for the
<add> pull request, and screenshots of discussion on the PR to the issue.
<add>* Open a ticket with GitHub asking that the PRs be deleted through
<add> [GitHub suppport](https://support.github.com/contact)
<add> using Node.js(team) as the account organization.
<add>* Open a new issue in the repository in which the issue was originally
<add> reported with a brief FYI to the originator. `FYI @xxxx we asked github
<add> to delete your PR while we work on releases in private.` with the title
<add> `FYI - PR deleted #YYYY`.
<add>* Email `tsc@iojs.org` with the link to the issues in the
<add> `premature-disclosures` repo so that the TSC is aware that they
<add> may need to expedite handling of the issue due to premature
<add> disclosure.
<add>
<ide> ## Accepting modifications
<ide>
<ide> Contributors propose modifications to Node.js using GitHub pull requests. This
<ide> If you cannot find who to cc for a file, `git shortlog -n -s <file>` can help.
<ide> [git-username]: https://help.github.com/articles/setting-your-username-in-git/
<ide> [node-core-utils-credentials]: https://github.com/nodejs/node-core-utils#setting-up-credentials
<ide> [node-core-utils-issues]: https://github.com/nodejs/node-core-utils/issues
<add>[security reporting]: https://github.com/nodejs/node/blob/HEAD/SECURITY.md
<ide> [unreliable tests]: https://github.com/nodejs/node/issues?q=is%3Aopen+is%3Aissue+label%3A%22CI+%2F+flaky+test%22 | 1 |
Python | Python | create bitonicsort.py (#386) | a0d5c9aaf0f573ff11beacb6a30a91f90312dd08 | <ide><path>sorts/BitonicSort.py
<add># Python program for Bitonic Sort. Note that this program
<add># works only when size of input is a power of 2.
<add>
<add># The parameter dir indicates the sorting direction, ASCENDING
<add># or DESCENDING; if (a[i] > a[j]) agrees with the direction,
<add># then a[i] and a[j] are interchanged.*/
<add>def compAndSwap(a, i, j, dire):
<add> if (dire == 1 and a[i] > a[j]) or (dire == 0 and a[i] < a[j]):
<add> a[i], a[j] = a[j], a[i]
<add>
<add> # It recursively sorts a bitonic sequence in ascending order,
<add>
<add>
<add># if dir = 1, and in descending order otherwise (means dir=0).
<add># The sequence to be sorted starts at index position low,
<add># the parameter cnt is the number of elements to be sorted.
<add>def bitonicMerge(a, low, cnt, dire):
<add> if cnt > 1:
<add> k = int(cnt / 2)
<add> for i in range(low, low + k):
<add> compAndSwap(a, i, i + k, dire)
<add> bitonicMerge(a, low, k, dire)
<add> bitonicMerge(a, low + k, k, dire)
<add>
<add> # This funcion first produces a bitonic sequence by recursively
<add>
<add>
<add># sorting its two halves in opposite sorting orders, and then
<add># calls bitonicMerge to make them in the same order
<add>def bitonicSort(a, low, cnt, dire):
<add> if cnt > 1:
<add> k = int(cnt / 2)
<add> bitonicSort(a, low, k, 1)
<add> bitonicSort(a, low + k, k, 0)
<add> bitonicMerge(a, low, cnt, dire)
<add>
<add> # Caller of bitonicSort for sorting the entire array of length N
<add>
<add>
<add># in ASCENDING order
<add>def sort(a, N, up):
<add> bitonicSort(a, 0, N, up)
<add>
<add>
<add># Driver code to test above
<add>a = []
<add>
<add>n = int(input())
<add>for i in range(n):
<add> a.append(int(input()))
<add>up = 1
<add>
<add>sort(a, n, up)
<add>print("\n\nSorted array is")
<add>for i in range(n):
<add> print("%d" % a[i]) | 1 |
Javascript | Javascript | fix typing for progressbarandroid | bc4825ee9d49d7c8044bd08ec3ca49050435e17c | <ide><path>Libraries/Components/ActivityIndicator/ActivityIndicator.js
<ide> const ActivityIndicator = (props: Props, forwardedRef?: any) => {
<ide> // $FlowFixMe Flow doesn't know when this is the android component
<ide> <PlatformActivityIndicator {...nativeProps} {...androidProps} />
<ide> ) : (
<add> // $FlowFixMe Flow doesn't know when this is the iOS component
<ide> <PlatformActivityIndicator {...nativeProps} />
<ide> )}
<ide> </View>
<ide><path>Libraries/Components/ProgressBarAndroid/ProgressBarAndroid.android.js
<ide> ProgressBarAndroidToExport.defaultProps = {
<ide> /* $FlowFixMe(>=0.89.0 site=react_native_android_fb) This comment suppresses an
<ide> * error found when Flow v0.89 was deployed. To see the error, delete this
<ide> * comment and run Flow. */
<del>module.exports = (ProgressBarAndroidToExport: ProgressBarAndroidNativeComponent);
<add>module.exports = (ProgressBarAndroidToExport: typeof ProgressBarAndroidNativeComponent);
<ide><path>Libraries/Components/ProgressBarAndroid/__tests__/ProgressBarAndroid-test.js
<ide> const render = require('../../../../jest/renderer');
<ide> describe('<ProgressBarAndroid />', () => {
<ide> it('should render as <ProgressBarAndroid> when mocked', () => {
<ide> const instance = render.create(
<del> <ProgressBarAndroid styleAttr="Horizontal" />,
<add> <ProgressBarAndroid styleAttr="Horizontal" indeterminate={true} />,
<ide> );
<ide> expect(instance).toMatchSnapshot();
<ide> });
<ide>
<ide> it('should shallow render as <ForwardRef(ProgressBarAndroid)> when mocked', () => {
<ide> const output = render.shallow(
<del> <ProgressBarAndroid styleAttr="Horizontal" />,
<add> <ProgressBarAndroid styleAttr="Horizontal" indeterminate={true} />,
<ide> );
<ide> expect(output).toMatchSnapshot();
<ide> });
<ide> describe('<ProgressBarAndroid />', () => {
<ide> jest.dontMock('../ProgressBarAndroid');
<ide>
<ide> const output = render.shallow(
<del> <ProgressBarAndroid styleAttr="Horizontal" />,
<add> <ProgressBarAndroid styleAttr="Horizontal" indeterminate={true} />,
<ide> );
<ide> expect(output).toMatchSnapshot();
<ide> });
<ide> describe('<ProgressBarAndroid />', () => {
<ide> jest.dontMock('../ProgressBarAndroid');
<ide>
<ide> const instance = render.create(
<del> <ProgressBarAndroid styleAttr="Horizontal" />,
<add> <ProgressBarAndroid styleAttr="Horizontal" indeterminate={true} />,
<ide> );
<ide> expect(instance).toMatchSnapshot();
<ide> }); | 3 |
Text | Text | fix missing closing tag in challenge description | 996b466d87b618851a4d1d5aa160ab07946fb074 | <ide><path>curriculum/challenges/english/06-quality-assurance/quality-assurance-and-testing-with-chai/compare-the-properties-of-two-elements.md
<ide> dashedName: compare-the-properties-of-two-elements
<ide>
<ide> # --description--
<ide>
<del>As a reminder, this project is being built upon the following starter project on <a href="https://replit.com/github/freeCodeCamp/boilerplate-mochachai" target="_blank" rel="noopener noreferrer nofollow">Replit</a>, or cloned from <a href="https://github.com/freeCodeCamp/boilerplate-mochachai/" target="_blank" rel="noopener noreferrer nofollow">GitHub<a>.
<add>As a reminder, this project is being built upon the following starter project on <a href="https://replit.com/github/freeCodeCamp/boilerplate-mochachai" target="_blank" rel="noopener noreferrer nofollow">Replit</a>, or cloned from <a href="https://github.com/freeCodeCamp/boilerplate-mochachai/" target="_blank" rel="noopener noreferrer nofollow">GitHub</a>.
<ide>
<ide> # --instructions--
<ide> | 1 |
Javascript | Javascript | allow filtering of tests in statscases | 5b30edebfcbb12ec78168061502cb2d618bf01ae | <ide><path>test/StatsTestCases.test.js
<ide> const tests = fs
<ide> testName =>
<ide> fs.existsSync(path.join(base, testName, "index.js")) ||
<ide> fs.existsSync(path.join(base, testName, "webpack.config.js"))
<del> );
<add> )
<add> .filter(testName => {
<add> const testDirectory = path.join(base, testName);
<add> const filterPath = path.join(testDirectory, "test.filter.js");
<add> if (fs.existsSync(filterPath) && !require(filterPath)()) {
<add> describe.skip(testName, () => it("filtered"));
<add> return false;
<add> }
<add> return true;
<add> });
<ide>
<ide> describe("StatsTestCases", () => {
<ide> tests.forEach(testName => { | 1 |
Text | Text | add overlay networking security model node | cc5debcb2e8621358721eb860c07f33f8b83d684 | <ide><path>docs/reference/commandline/network_connect.md
<ide> You can connect a container to one or more networks. The networks need not be th
<ide> * [network disconnect](network_disconnect.md)
<ide> * [network ls](network_ls.md)
<ide> * [network rm](network_rm.md)
<del>* [Understand Docker container networks](../../userguide/networking/dockernetworks.md)
<add>* [Understand Docker container networks](../../userguide/networking/index.md)
<ide> * [Work with networks](../../userguide/networking/work-with-networks.md)
<ide><path>docs/reference/commandline/network_create.md
<ide> to create an externally isolated `overlay` network, you can specify the
<ide> * [network disconnect](network_disconnect.md)
<ide> * [network ls](network_ls.md)
<ide> * [network rm](network_rm.md)
<del>* [Understand Docker container networks](../../userguide/networking/dockernetworks.md)
<add>* [Understand Docker container networks](../../userguide/networking/index.md)
<ide><path>docs/reference/commandline/network_disconnect.md
<ide> Disconnects a container from a network. The container must be running to disconn
<ide> * [network create](network_create.md)
<ide> * [network ls](network_ls.md)
<ide> * [network rm](network_rm.md)
<del>* [Understand Docker container networks](../../userguide/networking/dockernetworks.md)
<add>* [Understand Docker container networks](../../userguide/networking/index.md)
<ide><path>docs/reference/commandline/network_inspect.md
<ide> $ docker network inspect simple-network
<ide> * [network create](network_create.md)
<ide> * [network ls](network_ls.md)
<ide> * [network rm](network_rm.md)
<del>* [Understand Docker container networks](../../userguide/networking/dockernetworks.md)
<add>* [Understand Docker container networks](../../userguide/networking/index.md)
<ide><path>docs/reference/commandline/network_ls.md
<ide> d1584f8dc718: host
<ide> * [network create](network_create.md)
<ide> * [network inspect](network_inspect.md)
<ide> * [network rm](network_rm.md)
<del>* [Understand Docker container networks](../../userguide/networking/dockernetworks.md)
<add>* [Understand Docker container networks](../../userguide/networking/index.md)
<ide><path>docs/reference/commandline/network_rm.md
<ide> deletion.
<ide> * [network create](network_create.md)
<ide> * [network ls](network_ls.md)
<ide> * [network inspect](network_inspect.md)
<del>* [Understand Docker container networks](../../userguide/networking/dockernetworks.md)
<add>* [Understand Docker container networks](../../userguide/networking/index.md)
<ide><path>docs/security/security.md
<ide> certificates](https.md).
<ide>
<ide> The daemon is also potentially vulnerable to other inputs, such as image
<ide> loading from either disk with 'docker load', or from the network with
<del>'docker pull'. As of Docker 1.3.2, images are now extracted in a chrooted
<del>subprocess on Linux/Unix platforms, being the first-step in a wider effort
<del>toward privilege separation. As of Docker 1.10.0, all images are stored and
<del>accessed by the cryptographic checksums of their contents, limiting the
<add>'docker pull'. As of Docker 1.3.2, images are now extracted in a chrooted
<add>subprocess on Linux/Unix platforms, being the first-step in a wider effort
<add>toward privilege separation. As of Docker 1.10.0, all images are stored and
<add>accessed by the cryptographic checksums of their contents, limiting the
<ide> possibility of an attacker causing a collision with an existing image.
<ide>
<ide> Eventually, it is expected that the Docker daemon will run restricted
<ide> pull requests, and communicate via the mailing list.
<ide> * [Seccomp security profiles for Docker](../security/seccomp.md)
<ide> * [AppArmor security profiles for Docker](../security/apparmor.md)
<ide> * [On the Security of Containers (2014)](https://medium.com/@ewindisch/on-the-security-of-containers-2c60ffe25a9e)
<add>* [Docker swarm mode overlay network security model](../userguide/networking/overlay-security-model.md)
<ide><path>docs/userguide/index.md
<ide> This guide helps users learn how to use Docker Engine.
<ide>
<ide> ## Configure networks
<ide>
<del>- [Understand Docker container networks](networking/dockernetworks.md)
<add>- [Understand Docker container networks](networking/index.md)
<ide> - [Embedded DNS server in user-defined networks](networking/configure-dns.md)
<ide> - [Get started with multi-host networking](networking/get-started-overlay.md)
<ide> - [Work with network commands](networking/work-with-networks.md)
<ide> This guide helps users learn how to use Docker Engine.
<ide> - [Binding container ports to the host](networking/default_network/binding.md)
<ide> - [Build your own bridge](networking/default_network/build-bridges.md)
<ide> - [Configure container DNS](networking/default_network/configure-dns.md)
<del>- [Customize the docker0 bridge](networking/default_network/custom-docker0.md)
<del>- [IPv6 with Docker](networking/default_network/ipv6.md)
<add>- [Customize the docker0 bridge](networking/default_network/custom-docker0.md)
<add>- [IPv6 with Docker](networking/default_network/ipv6.md)
<ide>
<ide> ## Misc
<ide>
<ide><path>docs/userguide/networking/default_network/binding.md
<ide> parent = "smn_networking_def"
<ide>
<ide> The information in this section explains binding container ports within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker.
<ide>
<del>> **Note**: The [Docker networks feature](../dockernetworks.md) allows you to
<add>> **Note**: The [Docker networks feature](../index.md) allows you to
<ide> create user-defined networks in addition to the default bridge network.
<ide>
<ide> By default Docker containers can make connections to the outside world, but the
<ide> address: this alternative is preferred for performance reasons.
<ide>
<ide> ## Related information
<ide>
<del>- [Understand Docker container networks](../dockernetworks.md)
<add>- [Understand Docker container networks](../index.md)
<ide> - [Work with network commands](../work-with-networks.md)
<ide> - [Legacy container links](dockerlinks.md)
<ide><path>docs/userguide/networking/default_network/build-bridges.md
<ide> This section explains how to build your own bridge to replace the Docker default
<ide> bridge. This is a `bridge` network named `bridge` created automatically when you
<ide> install Docker.
<ide>
<del>> **Note**: The [Docker networks feature](../dockernetworks.md) allows you to
<add>> **Note**: The [Docker networks feature](../index.md) allows you to
<ide> create user-defined networks in addition to the default bridge network.
<ide>
<ide> You can set up your own bridge before starting Docker and use `-b BRIDGE` or
<ide><path>docs/userguide/networking/default_network/configure-dns.md
<ide> The information in this section explains configuring container DNS within
<ide> the Docker default bridge. This is a `bridge` network named `bridge` created
<ide> automatically when you install Docker.
<ide>
<del>> **Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network. Please refer to the [Docker Embedded DNS](../configure-dns.md) section for more information on DNS configurations in user-defined networks.
<add>> **Note**: The [Docker networks feature](../index.md) allows you to create user-defined networks in addition to the default bridge network. Please refer to the [Docker Embedded DNS](../configure-dns.md) section for more information on DNS configurations in user-defined networks.
<ide>
<ide> How can Docker supply each container with a hostname and DNS configuration, without having to build a custom image with the hostname written inside? Its trick is to overlay three crucial `/etc` files inside the container with virtual files where it can write fresh information. You can see this by running `mount` inside a container:
<ide>
<ide><path>docs/userguide/networking/default_network/container-communication.md
<ide> The information in this section explains container communication within the
<ide> Docker default bridge. This is a `bridge` network named `bridge` created
<ide> automatically when you install Docker.
<ide>
<del>**Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network.
<add>**Note**: The [Docker networks feature](../index.md) allows you to create user-defined networks in addition to the default bridge network.
<ide>
<ide> ## Communicating to the outside world
<ide>
<ide><path>docs/userguide/networking/default_network/custom-docker0.md
<ide> parent = "smn_networking_def"
<ide>
<ide> The information in this section explains how to customize the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker.
<ide>
<del>**Note**: The [Docker networks feature](../dockernetworks.md) allows you to create user-defined networks in addition to the default bridge network.
<add>**Note**: The [Docker networks feature](../index.md) allows you to create user-defined networks in addition to the default bridge network.
<ide>
<ide> By default, the Docker server creates and configures the host system's `docker0` interface as an _Ethernet bridge_ inside the Linux kernel that can pass packets back and forth between other physical or virtual network interfaces so that they behave as a single Ethernet network.
<ide>
<ide><path>docs/userguide/networking/default_network/dockerlinks.md
<ide> weight=-2
<ide>
<ide> The information in this section explains legacy container links within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker.
<ide>
<del>Before the [Docker networks feature](../dockernetworks.md), you could use the
<add>Before the [Docker networks feature](../index.md), you could use the
<ide> Docker link feature to allow containers to discover each other and securely
<ide> transfer information about one container to another container. With the
<ide> introduction of the Docker networks feature, you can still create links but they
<ide><path>docs/userguide/networking/get-started-overlay.md
<ide> weight=-3
<ide> This article uses an example to explain the basics of creating a multi-host
<ide> network. Docker Engine supports multi-host networking out-of-the-box through the
<ide> `overlay` network driver. Unlike `bridge` networks, overlay networks require
<del>some pre-existing conditions before you can create one. These conditions are:
<add>some pre-existing conditions before you can create one:
<ide>
<del>* Access to a key-value store. Docker supports Consul, Etcd, and ZooKeeper (Distributed store) key-value stores.
<add>* [Docker Engine running in swarm mode](#overlay-networking-and-swarm-mode)
<add>
<add>OR
<add>
<add>* [A cluster of hosts using a key value store](#overlay-networking-with-an-external-key-value-store)
<add>
<add>## Overlay networking and swarm mode
<add>
<add>Using docker engine running in [swarm mode](../../swarm/swarm-mode.md), you can create an overlay network on a manager node.
<add>
<add>The swarm makes the overlay network available only to nodes in the swarm that
<add>require it for a service. When you create a service that uses an overlay
<add>network, the manager node automatically extends the overlay network to nodes
<add>that run service tasks.
<add>
<add>To learn more about running Docker Engine in swarm mode, refer to the
<add>[Swarm mode overview](../../swarm/index.md).
<add>
<add>The example below shows how to create a network and use it for a service from a manager node in the swarm:
<add>
<add>```bash
<add># Create an overlay network `my-multi-host-network`.
<add>$ docker network create \
<add> --driver overlay \
<add> --subnet 10.0.9.0/24 \
<add> my-multi-host-network
<add>
<add>400g6bwzd68jizzdx5pgyoe95
<add>
<add># Create an nginx service and extend the my-multi-host-network to nodes where
<add># the service's tasks run.
<add>$ $ docker service create --replicas 2 --network my-multi-host-network --name my-web nginx
<add>
<add>716thylsndqma81j6kkkb5aus
<add>```
<add>
<add>Overlay networks for a swarm are not available to unmanaged containers. For more information refer to [Docker swarm mode overlay network security model](overlay-security-model.md).
<add>
<add>
<add>## Overlay networking with an external key-value store
<add>
<add>To use an Docker engine with an external key-value store, you need the
<add>following:
<add>
<add>* Access to the key-value store. Docker supports Consul, Etcd, and ZooKeeper
<add>(Distributed store) key-value stores.
<ide> * A cluster of hosts with connectivity to the key-value store.
<ide> * A properly configured Engine `daemon` on each host in the cluster.
<del>* Hosts within the cluster must have unique hostnames because the key-value store uses the hostnames to identify cluster members.
<add>* Hosts within the cluster must have unique hostnames because the key-value
<add>store uses the hostnames to identify cluster members.
<ide>
<ide> Though Docker Machine and Docker Swarm are not mandatory to experience Docker
<del>multi-host networking, this example uses them to illustrate how they are
<del>integrated. You'll use Machine to create both the key-value store
<del>server and the host cluster. This example creates a Swarm cluster.
<add>multi-host networking with a key-value store, this example uses them to
<add>illustrate how they are integrated. You'll use Machine to create both the
<add>key-value store server and the host cluster. This example creates a Swarm
<add>cluster.
<add>
<add>>**Note:** Docker Engine running in swarm mode is not compatible with networking
<add>with an external key-value store.
<ide>
<del>## Prerequisites
<add>### Prerequisites
<ide>
<ide> Before you begin, make sure you have a system on your network with the latest
<ide> version of Docker Engine and Docker Machine installed. The example also relies
<ide> If you have not already done so, make sure you upgrade Docker Engine and Docker
<ide> Machine to the latest versions.
<ide>
<ide>
<del>## Step 1: Set up a key-value store
<add>### Set up a key-value store
<ide>
<ide> An overlay network requires a key-value store. The key-value store holds
<ide> information about the network state which includes discovery, networks,
<ide> key-value stores. This example uses Consul.
<ide> Keep your terminal open and move onto the next step.
<ide>
<ide>
<del>## Step 2: Create a Swarm cluster
<add>### Create a Swarm cluster
<ide>
<ide> In this step, you use `docker-machine` to provision the hosts for your network.
<ide> At this point, you won't actually create the network. You'll create several
<ide> At this point you have a set of hosts running on your network. You are ready to
<ide>
<ide> Leave your terminal open and go onto the next step.
<ide>
<del>## Step 3: Create the overlay Network
<add>### Create the overlay Network
<ide>
<ide> To create an overlay network
<ide>
<ide> To create an overlay network
<ide> Both agents report they have the `my-net` network with the `6b07d0be843f` ID.
<ide> You now have a multi-host container network running!
<ide>
<del>## Step 4: Run an application on your Network
<add>### Run an application on your Network
<ide>
<ide> Once your network is created, you can start a container on any of the hosts and it automatically is part of the network.
<ide>
<ide> Once your network is created, you can start a container on any of the hosts and
<ide> </html>
<ide> - 100% |*******************************| 612 0:00:00 ETA
<ide>
<del>## Step 5: Check external connectivity
<add>### Check external connectivity
<ide>
<ide> As you've seen, Docker's built-in overlay network driver provides out-of-the-box
<ide> connectivity between the containers on multiple hosts within the same network.
<ide> to have external connectivity outside of their cluster.
<ide> the `my-net` overlay network. While the `eth1` interface represents the
<ide> container interface that is connected to the `docker_gwbridge` network.
<ide>
<del>## Step 6: Extra Credit with Docker Compose
<add>### Extra Credit with Docker Compose
<ide>
<ide> Please refer to the Networking feature introduced in [Compose V2 format]
<ide> (https://docs.docker.com/compose/networking/) and execute the
<ide> multi-host networking scenario in the Swarm cluster used above.
<ide>
<ide> ## Related information
<ide>
<del>* [Understand Docker container networks](dockernetworks.md)
<add>* [Understand Docker container networks](index.md)
<ide> * [Work with network commands](work-with-networks.md)
<ide> * [Docker Swarm overview](https://docs.docker.com/swarm)
<ide> * [Docker Machine overview](https://docs.docker.com/machine)
<ide><path>docs/userguide/networking/index.md
<ide> <!--[metadata]>
<ide> +++
<del>title = "Network configuration"
<del>description = "Docker networking feature is introduced"
<del>keywords = ["network, networking, bridge, docker, documentation"]
<add>aliases=[
<add>"/engine/userguide/networking/dockernetworks/"
<add>]
<add>title = "Docker container networking"
<add>description = "How do we connect docker containers within and across hosts ?"
<add>keywords = ["Examples, Usage, network, docker, documentation, user guide, multihost, cluster"]
<ide> [menu.main]
<del>identifier="smn_networking"
<del>parent= "engine_guide"
<del>weight=7
<add>identifier="networking_index"
<add>parent = "smn_networking"
<add>weight = -5
<ide> +++
<ide> <![end-metadata]-->
<ide>
<del># Docker networks feature overview
<add># Understand Docker container networks
<ide>
<del>This sections explains how to use the Docker networks feature. This feature allows users to define their own networks and connect containers to them. Using this feature you can create a network on a single host or a network that spans across multiple hosts.
<add>This section provides an overview of the default networking behavior that Docker
<add>Engine delivers natively. It describes the type of networks created by default
<add>and how to create your own, user-defined networks. It also describes the
<add>resources required to create networks on a single host or across a cluster of
<add>hosts.
<add>
<add>## Default Networks
<add>
<add>When you install Docker, it creates three networks automatically. You can list
<add>these networks using the `docker network ls` command:
<add>
<add>```
<add>$ docker network ls
<add>
<add>NETWORK ID NAME DRIVER
<add>7fca4eb8c647 bridge bridge
<add>9f904ee27bf5 none null
<add>cf03ee007fb4 host host
<add>```
<add>
<add>Historically, these three networks are part of Docker's implementation. When
<add>you run a container you can use the `--network` flag to specify which network you
<add>want to run a container on. These three networks are still available to you.
<add>
<add>The `bridge` network represents the `docker0` network present in all Docker
<add>installations. Unless you specify otherwise with the `docker run
<add>--network=<NETWORK>` option, the Docker daemon connects containers to this network
<add>by default. You can see this bridge as part of a host's network stack by using
<add>the `ifconfig` command on the host.
<add>
<add>```
<add>$ ifconfig
<add>
<add>docker0 Link encap:Ethernet HWaddr 02:42:47:bc:3a:eb
<add> inet addr:172.17.0.1 Bcast:0.0.0.0 Mask:255.255.0.0
<add> inet6 addr: fe80::42:47ff:febc:3aeb/64 Scope:Link
<add> UP BROADCAST RUNNING MULTICAST MTU:9001 Metric:1
<add> RX packets:17 errors:0 dropped:0 overruns:0 frame:0
<add> TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
<add> collisions:0 txqueuelen:0
<add> RX bytes:1100 (1.1 KB) TX bytes:648 (648.0 B)
<add>```
<add>
<add>The `none` network adds a container to a container-specific network stack. That container lacks a network interface. Attaching to such a container and looking at its stack you see this:
<add>
<add>```
<add>$ docker attach nonenetcontainer
<add>
<add>root@0cb243cd1293:/# cat /etc/hosts
<add>127.0.0.1 localhost
<add>::1 localhost ip6-localhost ip6-loopback
<add>fe00::0 ip6-localnet
<add>ff00::0 ip6-mcastprefix
<add>ff02::1 ip6-allnodes
<add>ff02::2 ip6-allrouters
<add>root@0cb243cd1293:/# ifconfig
<add>lo Link encap:Local Loopback
<add> inet addr:127.0.0.1 Mask:255.0.0.0
<add> inet6 addr: ::1/128 Scope:Host
<add> UP LOOPBACK RUNNING MTU:65536 Metric:1
<add> RX packets:0 errors:0 dropped:0 overruns:0 frame:0
<add> TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
<add> collisions:0 txqueuelen:0
<add> RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
<add>
<add>root@0cb243cd1293:/#
<add>```
<add>>**Note**: You can detach from the container and leave it running with `CTRL-p CTRL-q`.
<add>
<add>The `host` network adds a container on the hosts network stack. You'll find the
<add>network configuration inside the container is identical to the host.
<add>
<add>With the exception of the `bridge` network, you really don't need to
<add>interact with these default networks. While you can list and inspect them, you
<add>cannot remove them. They are required by your Docker installation. However, you
<add>can add your own user-defined networks and these you can remove when you no
<add>longer need them. Before you learn more about creating your own networks, it is
<add>worth looking at the default `bridge` network a bit.
<add>
<add>
<add>### The default bridge network in detail
<add>The default `bridge` network is present on all Docker hosts. The `docker network inspect`
<add>command returns information about a network:
<add>
<add>```
<add>$ docker network inspect bridge
<add>
<add>[
<add> {
<add> "Name": "bridge",
<add> "Id": "f7ab26d71dbd6f557852c7156ae0574bbf62c42f539b50c8ebde0f728a253b6f",
<add> "Scope": "local",
<add> "Driver": "bridge",
<add> "IPAM": {
<add> "Driver": "default",
<add> "Config": [
<add> {
<add> "Subnet": "172.17.0.1/16",
<add> "Gateway": "172.17.0.1"
<add> }
<add> ]
<add> },
<add> "Containers": {},
<add> "Options": {
<add> "com.docker.network.bridge.default_bridge": "true",
<add> "com.docker.network.bridge.enable_icc": "true",
<add> "com.docker.network.bridge.enable_ip_masquerade": "true",
<add> "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
<add> "com.docker.network.bridge.name": "docker0",
<add> "com.docker.network.driver.mtu": "9001"
<add> }
<add> }
<add>]
<add>```
<add>The Engine automatically creates a `Subnet` and `Gateway` to the network.
<add>The `docker run` command automatically adds new containers to this network.
<add>
<add>```
<add>$ docker run -itd --name=container1 busybox
<add>
<add>3386a527aa08b37ea9232cbcace2d2458d49f44bb05a6b775fba7ddd40d8f92c
<add>
<add>$ docker run -itd --name=container2 busybox
<add>
<add>94447ca479852d29aeddca75c28f7104df3c3196d7b6d83061879e339946805c
<add>```
<add>
<add>Inspecting the `bridge` network again after starting two containers shows both newly launched containers in the network. Their ids show up in the "Containers" section of `docker network inspect`:
<add>
<add>```
<add>$ docker network inspect bridge
<add>
<add>{[
<add> {
<add> "Name": "bridge",
<add> "Id": "f7ab26d71dbd6f557852c7156ae0574bbf62c42f539b50c8ebde0f728a253b6f",
<add> "Scope": "local",
<add> "Driver": "bridge",
<add> "IPAM": {
<add> "Driver": "default",
<add> "Config": [
<add> {
<add> "Subnet": "172.17.0.1/16",
<add> "Gateway": "172.17.0.1"
<add> }
<add> ]
<add> },
<add> "Containers": {
<add> "3386a527aa08b37ea9232cbcace2d2458d49f44bb05a6b775fba7ddd40d8f92c": {
<add> "EndpointID": "647c12443e91faf0fd508b6edfe59c30b642abb60dfab890b4bdccee38750bc1",
<add> "MacAddress": "02:42:ac:11:00:02",
<add> "IPv4Address": "172.17.0.2/16",
<add> "IPv6Address": ""
<add> },
<add> "94447ca479852d29aeddca75c28f7104df3c3196d7b6d83061879e339946805c": {
<add> "EndpointID": "b047d090f446ac49747d3c37d63e4307be745876db7f0ceef7b311cbba615f48",
<add> "MacAddress": "02:42:ac:11:00:03",
<add> "IPv4Address": "172.17.0.3/16",
<add> "IPv6Address": ""
<add> }
<add> },
<add> "Options": {
<add> "com.docker.network.bridge.default_bridge": "true",
<add> "com.docker.network.bridge.enable_icc": "true",
<add> "com.docker.network.bridge.enable_ip_masquerade": "true",
<add> "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
<add> "com.docker.network.bridge.name": "docker0",
<add> "com.docker.network.driver.mtu": "9001"
<add> }
<add> }
<add>]
<add>```
<add>
<add>The `docker network inspect` command above shows all the connected containers and their network resources on a given network. Containers in this default network are able to communicate with each other using IP addresses. Docker does not support automatic service discovery on the default bridge network. If you want to communicate with container names in this default bridge network, you must connect the containers via the legacy `docker run --link` option.
<add>
<add>You can `attach` to a running `container` and investigate its configuration:
<add>
<add>```
<add>$ docker attach container1
<add>
<add>root@0cb243cd1293:/# ifconfig
<add>ifconfig
<add>eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02
<add> inet addr:172.17.0.2 Bcast:0.0.0.0 Mask:255.255.0.0
<add> inet6 addr: fe80::42:acff:fe11:2/64 Scope:Link
<add> UP BROADCAST RUNNING MULTICAST MTU:9001 Metric:1
<add> RX packets:16 errors:0 dropped:0 overruns:0 frame:0
<add> TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
<add> collisions:0 txqueuelen:0
<add> RX bytes:1296 (1.2 KiB) TX bytes:648 (648.0 B)
<add>
<add>lo Link encap:Local Loopback
<add> inet addr:127.0.0.1 Mask:255.0.0.0
<add> inet6 addr: ::1/128 Scope:Host
<add> UP LOOPBACK RUNNING MTU:65536 Metric:1
<add> RX packets:0 errors:0 dropped:0 overruns:0 frame:0
<add> TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
<add> collisions:0 txqueuelen:0
<add> RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
<add>```
<add>
<add>Then use `ping`to send three ICMP requests and test the connectivity of the
<add>containers on this `bridge` network.
<add>
<add>```
<add>root@0cb243cd1293:/# ping -w3 172.17.0.3
<add>
<add>PING 172.17.0.3 (172.17.0.3): 56 data bytes
<add>64 bytes from 172.17.0.3: seq=0 ttl=64 time=0.096 ms
<add>64 bytes from 172.17.0.3: seq=1 ttl=64 time=0.080 ms
<add>64 bytes from 172.17.0.3: seq=2 ttl=64 time=0.074 ms
<add>
<add>--- 172.17.0.3 ping statistics ---
<add>3 packets transmitted, 3 packets received, 0% packet loss
<add>round-trip min/avg/max = 0.074/0.083/0.096 ms
<add>```
<add>
<add>Finally, use the `cat` command to check the `container1` network configuration:
<add>
<add>```
<add>root@0cb243cd1293:/# cat /etc/hosts
<add>
<add>172.17.0.2 3386a527aa08
<add>127.0.0.1 localhost
<add>::1 localhost ip6-localhost ip6-loopback
<add>fe00::0 ip6-localnet
<add>ff00::0 ip6-mcastprefix
<add>ff02::1 ip6-allnodes
<add>ff02::2 ip6-allrouters
<add>```
<add>To detach from a `container1` and leave it running use `CTRL-p CTRL-q`.Then, attach to `container2` and repeat these three commands.
<add>
<add>```
<add>$ docker attach container2
<add>
<add>root@0cb243cd1293:/# ifconfig
<add>
<add>eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:03
<add> inet addr:172.17.0.3 Bcast:0.0.0.0 Mask:255.255.0.0
<add> inet6 addr: fe80::42:acff:fe11:3/64 Scope:Link
<add> UP BROADCAST RUNNING MULTICAST MTU:9001 Metric:1
<add> RX packets:15 errors:0 dropped:0 overruns:0 frame:0
<add> TX packets:13 errors:0 dropped:0 overruns:0 carrier:0
<add> collisions:0 txqueuelen:0
<add> RX bytes:1166 (1.1 KiB) TX bytes:1026 (1.0 KiB)
<add>
<add>lo Link encap:Local Loopback
<add> inet addr:127.0.0.1 Mask:255.0.0.0
<add> inet6 addr: ::1/128 Scope:Host
<add> UP LOOPBACK RUNNING MTU:65536 Metric:1
<add> RX packets:0 errors:0 dropped:0 overruns:0 frame:0
<add> TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
<add> collisions:0 txqueuelen:0
<add> RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)
<add>
<add>root@0cb243cd1293:/# ping -w3 172.17.0.2
<add>
<add>PING 172.17.0.2 (172.17.0.2): 56 data bytes
<add>64 bytes from 172.17.0.2: seq=0 ttl=64 time=0.067 ms
<add>64 bytes from 172.17.0.2: seq=1 ttl=64 time=0.075 ms
<add>64 bytes from 172.17.0.2: seq=2 ttl=64 time=0.072 ms
<add>
<add>--- 172.17.0.2 ping statistics ---
<add>3 packets transmitted, 3 packets received, 0% packet loss
<add>round-trip min/avg/max = 0.067/0.071/0.075 ms
<add>/ # cat /etc/hosts
<add>172.17.0.3 94447ca47985
<add>127.0.0.1 localhost
<add>::1 localhost ip6-localhost ip6-loopback
<add>fe00::0 ip6-localnet
<add>ff00::0 ip6-mcastprefix
<add>ff02::1 ip6-allnodes
<add>ff02::2 ip6-allrouters
<add>```
<add>
<add>The default `docker0` bridge network supports the use of port mapping and `docker run --link` to allow communications between containers in the `docker0` network. These techniques are cumbersome to set up and prone to error. While they are still available to you as techniques, it is better to avoid them and define your own bridge networks instead.
<add>
<add>## User-defined networks
<add>
<add>You can create your own user-defined networks that better isolate containers.
<add>Docker provides some default **network drivers** for creating these networks.
<add>You can create a new **bridge network**, **overlay network** or **MACVLAN
<add>network**. You can also create a **network plugin** or **remote network**
<add>written to your own specifications.
<add>
<add>You can create multiple networks. You can add containers to more than one
<add>network. Containers can only communicate within networks but not across
<add>networks. A container attached to two networks can communicate with member
<add>containers in either network. When a container is connected to multiple
<add>networks, its external connectivity is provided via the first non-internal
<add>network, in lexical order.
<add>
<add>The next few sections describe each of Docker's built-in network drivers in
<add>greater detail.
<add>
<add>### A bridge network
<add>
<add>The easiest user-defined network to create is a `bridge` network. This network
<add>is similar to the historical, default `docker0` network. There are some added
<add>features and some old features that aren't available.
<add>
<add>```
<add>$ docker network create --driver bridge isolated_nw
<add>1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b
<add>
<add>$ docker network inspect isolated_nw
<add>
<add>[
<add> {
<add> "Name": "isolated_nw",
<add> "Id": "1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b",
<add> "Scope": "local",
<add> "Driver": "bridge",
<add> "IPAM": {
<add> "Driver": "default",
<add> "Config": [
<add> {
<add> "Subnet": "172.21.0.0/16",
<add> "Gateway": "172.21.0.1/16"
<add> }
<add> ]
<add> },
<add> "Containers": {},
<add> "Options": {}
<add> }
<add>]
<add>
<add>$ docker network ls
<add>
<add>NETWORK ID NAME DRIVER
<add>9f904ee27bf5 none null
<add>cf03ee007fb4 host host
<add>7fca4eb8c647 bridge bridge
<add>c5ee82f76de3 isolated_nw bridge
<add>
<add>```
<add>
<add>After you create the network, you can launch containers on it using the `docker run --network=<NETWORK>` option.
<add>
<add>```
<add>$ docker run --network=isolated_nw -itd --name=container3 busybox
<add>
<add>8c1a0a5be480921d669a073393ade66a3fc49933f08bcc5515b37b8144f6d47c
<add>
<add>$ docker network inspect isolated_nw
<add>[
<add> {
<add> "Name": "isolated_nw",
<add> "Id": "1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b",
<add> "Scope": "local",
<add> "Driver": "bridge",
<add> "IPAM": {
<add> "Driver": "default",
<add> "Config": [
<add> {}
<add> ]
<add> },
<add> "Containers": {
<add> "8c1a0a5be480921d669a073393ade66a3fc49933f08bcc5515b37b8144f6d47c": {
<add> "EndpointID": "93b2db4a9b9a997beb912d28bcfc117f7b0eb924ff91d48cfa251d473e6a9b08",
<add> "MacAddress": "02:42:ac:15:00:02",
<add> "IPv4Address": "172.21.0.2/16",
<add> "IPv6Address": ""
<add> }
<add> },
<add> "Options": {}
<add> }
<add>]
<add>```
<add>
<add>The containers you launch into this network must reside on the same Docker host.
<add>Each container in the network can immediately communicate with other containers
<add>in the network. Though, the network itself isolates the containers from external
<add>networks.
<add>
<add>
<add>
<add>Within a user-defined bridge network, linking is not supported. You can
<add>expose and publish container ports on containers in this network. This is useful
<add>if you want to make a portion of the `bridge` network available to an outside
<add>network.
<add>
<add>
<add>
<add>A bridge network is useful in cases where you want to run a relatively small
<add>network on a single host. You can, however, create significantly larger networks
<add>by creating an `overlay` network.
<add>
<add>
<add>### An overlay network with Docker Engine swarm mode
<add>
<add>You can create an overlay network on a manager node running in swarm mode
<add>without an external key-value store. The swarm makes the overlay network
<add>available only to nodes in the swarm that require it for a service. When you
<add>create a service that uses the overlay network, the manager node automatically
<add>extends the overlay network to nodes that run service tasks.
<add>
<add>To learn more about running Docker Engine in swarm mode, refer to the
<add>[Swarm mode overview](../../swarm/index.md).
<add>
<add>The example below shows how to create a network and use it for a service from a manager node in the swarm:
<add>
<add>```bash
<add># Create an overlay network `my-multi-host-network`.
<add>$ docker network create \
<add> --driver overlay \
<add> --subnet 10.0.9.0/24 \
<add> my-multi-host-network
<add>
<add>400g6bwzd68jizzdx5pgyoe95
<add>
<add># Create an nginx service and extend the my-multi-host-network to nodes where
<add># the service's tasks run.
<add>$ $ docker service create --replicas 2 --network my-multi-host-network --name my-web nginx
<add>
<add>716thylsndqma81j6kkkb5aus
<add>```
<add>
<add>Overlay networks for a swarm are not available to containers started with
<add>`docker run` that don't run as part of a swarm mode service. For more
<add>information refer to [Docker swarm mode overlay network security model](overlay-security-model.md).
<add>
<add>### An overlay network with an external key-value store
<add>
<add>If you are not using Docker Engine in swarm mode, the `overlay` network requires
<add>a valid key-value store service. Supported key-value stores include Consul,
<add>Etcd, and ZooKeeper (Distributed store). Before creating a network on this
<add>version of the Engine, you must install and configure your chosen key-value
<add>store service. The Docker hosts that you intend to network and the service must
<add>be able to communicate.
<add>
<add>>**Note:** Docker Engine running in swarm mode is not compatible with networking
<add>with an external key-value store.
<add>
<add>
<add>
<add>Each host in the network must run a Docker Engine instance. The easiest way to
<add>provision the hosts is with Docker Machine.
<add>
<add>
<add>
<add>You should open the following ports between each of your hosts.
<add>
<add>| Protocol | Port | Description |
<add>|----------|------|-----------------------|
<add>| udp | 4789 | Data plane (VXLAN) |
<add>| tcp/udp | 7946 | Control plane |
<add>
<add>Your key-value store service may require additional ports.
<add>Check your vendor's documentation and open any required ports.
<add>
<add>Once you have several machines provisioned, you can use Docker Swarm to quickly
<add>form them into a swarm which includes a discovery service as well.
<add>
<add>To create an overlay network, you configure options on the `daemon` on each
<add>Docker Engine for use with `overlay` network. There are three options to set:
<add>
<add><table>
<add> <thead>
<add> <tr>
<add> <th>Option</th>
<add> <th>Description</th>
<add> </tr>
<add> </thead>
<add> <tbody>
<add> <tr>
<add> <td><pre>--cluster-store=PROVIDER://URL</pre></td>
<add> <td>Describes the location of the KV service.</td>
<add> </tr>
<add> <tr>
<add> <td><pre>--cluster-advertise=HOST_IP|HOST_IFACE:PORT</pre></td>
<add> <td>The IP address or interface of the HOST used for clustering.</td>
<add> </tr>
<add> <tr>
<add> <td><pre>--cluster-store-opt=KEY-VALUE OPTIONS</pre></td>
<add> <td>Options such as TLS certificate or tuning discovery Timers</td>
<add> </tr>
<add> </tbody>
<add></table>
<add>
<add>Create an `overlay` network on one of the machines in the Swarm.
<add>
<add> $ docker network create --driver overlay my-multi-host-network
<add>
<add>This results in a single network spanning multiple hosts. An `overlay` network
<add>provides complete isolation for the containers.
<add>
<add>
<add>
<add>Then, on each host, launch containers making sure to specify the network name.
<add>
<add> $ docker run -itd --network=my-multi-host-network busybox
<add>
<add>Once connected, each container has access to all the containers in the network
<add>regardless of which Docker host the container was launched on.
<add>
<add>
<add>
<add>If you would like to try this for yourself, see the [Getting started for
<add>overlay](get-started-overlay.md).
<add>
<add>### Custom network plugin
<add>
<add>If you like, you can write your own network driver plugin. A network
<add>driver plugin makes use of Docker's plugin infrastructure. In this
<add>infrastructure, a plugin is a process running on the same Docker host as the
<add>Docker `daemon`.
<add>
<add>Network plugins follow the same restrictions and installation rules as other
<add>plugins. All plugins make use of the plugin API. They have a lifecycle that
<add>encompasses installation, starting, stopping and activation.
<add>
<add>Once you have created and installed a custom network driver, you use it like the
<add>built-in network drivers. For example:
<add>
<add> $ docker network create --driver weave mynet
<add>
<add>You can inspect it, add containers to and from it, and so forth. Of course,
<add>different plugins may make use of different technologies or frameworks. Custom
<add>networks can include features not present in Docker's default networks. For more
<add>information on writing plugins, see [Extending Docker](../../extend/index.md) and
<add>[Writing a network driver plugin](../../extend/plugins_network.md).
<add>
<add>### Docker embedded DNS server
<add>
<add>Docker daemon runs an embedded DNS server to provide automatic service discovery
<add>for containers connected to user defined networks. Name resolution requests from
<add>the containers are handled first by the embedded DNS server. If the embedded DNS
<add>server is unable to resolve the request it will be forwarded to any external DNS
<add>servers configured for the container. To facilitate this when the container is
<add>created, only the embedded DNS server reachable at `127.0.0.11` will be listed
<add>in the container's `resolv.conf` file. More information on embedded DNS server on
<add>user-defined networks can be found in the [embedded DNS server in user-defined networks]
<add>(configure-dns.md)
<add>
<add>## Links
<add>
<add>Before the Docker network feature, you could use the Docker link feature to
<add>allow containers to discover each other. With the introduction of Docker networks,
<add>containers can be discovered by its name automatically. But you can still create
<add>links but they behave differently when used in the default `docker0` bridge network
<add>compared to user-defined networks. For more information, please refer to
<add>[Legacy Links](default_network/dockerlinks.md) for link feature in default `bridge` network
<add>and the [linking containers in user-defined networks](work-with-networks.md#linking-containers-in-user-defined-networks) for links
<add>functionality in user-defined networks.
<add>
<add>## Related information
<ide>
<del>- [Understand Docker container networks](dockernetworks.md)
<ide> - [Work with network commands](work-with-networks.md)
<ide> - [Get started with multi-host networking](get-started-overlay.md)
<del>
<del>If you are already familiar with Docker's default bridge network, `docker0` that network continues to be supported. It is created automatically in every installation. The default bridge network is also named `bridge`. To see a list of topics related to that network, read the articles listed in the [Docker default bridge network](default_network/index.md).
<add>- [Managing Data in Containers](../../tutorials/dockervolumes.md)
<add>- [Docker Machine overview](https://docs.docker.com/machine)
<add>- [Docker Swarm overview](https://docs.docker.com/swarm)
<add>- [Investigate the LibNetwork project](https://github.com/docker/libnetwork)
<ide><path>docs/userguide/networking/menu.md
<add><!--[metadata]>
<add>+++
<add>title = "Network configuration"
<add>description = "Docker networking feature is introduced"
<add>keywords = ["network, networking, bridge, docker, documentation"]
<add>type="menu"
<add>[menu.main]
<add>identifier="smn_networking"
<add>parent= "engine_guide"
<add>weight=7
<add>+++
<add><![end-metadata]-->
<add>
<add># Docker networks feature overview
<add>
<add>This sections explains how to use the Docker networks feature. This feature allows users to define their own networks and connect containers to them. Using this feature you can create a network on a single host or a network that spans across multiple hosts.
<add>
<add>- [Understand Docker container networks](index.md)
<add>- [Work with network commands](work-with-networks.md)
<add>- [Get started with multi-host networking](get-started-overlay.md)
<add>
<add>If you are already familiar with Docker's default bridge network, `docker0` that network continues to be supported. It is created automatically in every installation. The default bridge network is also named `bridge`. To see a list of topics related to that network, read the articles listed in the [Docker default bridge network](default_network/index.md).
<ide><path>docs/userguide/networking/overlay-security-model.md
<add><!--[metadata]>
<add>+++
<add>title = "Swarm mode overlay network security model"
<add>description = "Docker swarm mode overlay network security model"
<add>keywords = ["network, docker, documentation, user guide, multihost, swarm mode", "overlay"]
<add>[menu.main]
<add>parent = "smn_networking"
<add>weight=-2
<add>+++
<add><![end-metadata]-->
<add>
<add># Docker swarm mode overlay network security model
<add>
<add>Overlay networking for Docker Engine swarm mode comes secure out of the box. The
<add>swarm nodes exchange overlay network information using a gossip protocol. By
<add>default the nodes encrypt and authenticate information they exchange via gossip
<add>using the [AES algorithm](https://en.wikipedia.org/wiki/Galois/Counter_Mode) in
<add>GCM mode. Manager nodes in the swarm rotate the key used to encrypt gossip data
<add>every 12 hours.
<add>
<add>You can also encrypt data exchanged between containers on different nodes on the
<add>overlay network. To enable encryption, when you create an overlay network pass
<add>the `--opt encrypted` flag:
<add>
<add>```bash
<add>$ docker network create --opt encrypted --driver overlay my-multi-host-network
<add>
<add>dt0zvqn0saezzinc8a5g4worx
<add>```
<add>
<add>When you enable overlay encryption, Docker creates IPSEC tunnels between all the
<add>nodes where tasks are scheduled for services attached to the overlay network.
<add>These tunnels also use the AES algorithm in GCM mode and manager nodes
<add>automatically rotate the keys every 12 hours.
<add>
<add>## Swarm mode overlay networks and unmanaged containers
<add>
<add>Because the overlay networks for swarm mode use encryption keys from the manager
<add>nodes to encrypt the gossip communications, only containers running as tasks in
<add>the swarm have access to the keys. Consequently, containers started outside of
<add>swarm mode using `docker run` (unmanaged containers) cannot attach to the
<add>overlay network.
<add>
<add>For example:
<add>
<add>```bash
<add>$ docker run --network my-multi-host-network nginx
<add>
<add>docker: Error response from daemon: swarm-scoped network
<add>(my-multi-host-network) is not compatible with `docker create` or `docker
<add>run`. This network can only be used by a docker service.
<add>```
<add>
<add>To work around this situation, migrate the unmanaged containers to managed
<add>services. For instance:
<add>
<add>```bash
<add>$ docker service create --network my-multi-host-network my-image
<add>```
<add>
<add>Because [swarm mode](../../swarm/index.md) is an optional feature, the Docker
<add>Engine preserves backward compatibility. You can continue to rely on a
<add>third-party key-value store to support overlay networking if you wish.
<add>However, switching to swarm-mode is strongly encouraged. In addition to the
<add>security benefits described in this article, swarm mode enables you to leverage
<add>the substantially greater scalability provided by the new services API.
<ide><path>docs/userguide/networking/work-with-networks.md
<ide> available through the Docker Engine CLI. These commands are:
<ide> * `docker network inspect`
<ide>
<ide> While not required, it is a good idea to read [Understanding Docker
<del>network](dockernetworks.md) before trying the examples in this section. The
<add>network](index.md) before trying the examples in this section. The
<ide> examples for the rely on a `bridge` network so that you can try them
<ide> immediately. If you would prefer to experiment with an `overlay` network see
<ide> the [Getting started with multi-host networks](get-started-overlay.md) instead. | 19 |
PHP | PHP | use isset instead of double boolean cast | bc5dcbe9b9ef163d3610178d61a6a375d80b6c4f | <ide><path>src/Database/Statement/StatementDecorator.php
<ide> public function lastInsertId($table = null, $column = null) {
<ide> if ($column && $this->columnCount()) {
<ide> $row = $this->fetch('assoc');
<ide> }
<del> if ($column && $row) {
<add> if (isset($row[$column])) {
<ide> return $row[$column];
<ide> }
<ide> return $this->_driver->lastInsertId($table, $column); | 1 |
Text | Text | add naming convention for `-ui` themes | 739d1714625a82b55902f1b400a72dfa8b1cf44e | <ide><path>docs/creating-a-theme.md
<ide> Let's create your first theme.
<ide> To get started, hit `cmd-shift-P`, and start typing "Generate Syntax Theme" to
<ide> generate a new theme package. Select "Generate Syntax Theme," and you'll be
<ide> asked for the path where your theme will be created. Let's call ours
<del>_motif-syntax_. Please suffix syntax themes with _-syntax_ and ui themes with _-ui_.
<add>_motif-syntax_. __Tip:__ suffix syntax themes with _-syntax_!
<ide>
<ide> Atom will pop open a new window, showing the _motif-syntax_ theme, with a default set
<ide> of folders and files created for us. If you open the settings view (`cmd-,`)
<ide> To create an interface UI theme, do the following:
<ide> 2. Clone the forked repository to the local filesystem
<ide> 3. Open a terminal in the forked theme's directory
<ide> 4. Open your new theme in a Dev Mode Atom window run `atom --dev .` in the
<del> terminal or use the _View > Developer > Open in Dev Mode_ menu)
<add> terminal or use the _View > Developer > Open in Dev Mode_ menu
<ide> 5. Change the name of the theme in the theme's `package.json` file
<del>6. Run `apm link` to symlink your repository to `~/.atom/packages`
<del>7. Reload Atom using `cmd-alt-ctrl-L`
<del>8. Enable the theme via _UI Theme_ drop-down in the _Themes_ section of the
<add>6. Name your theme with a `-ui` suffix. i.e. `super-white-ui`
<add>7. Run `apm link` to symlink your repository to `~/.atom/packages`
<add>8. Reload Atom using `cmd-alt-ctrl-L`
<add>9. Enable the theme via _UI Theme_ drop-down in the _Themes_ section of the
<ide> settings view
<del>9. Make changes! Since you opened the theme in a Dev Mode window, changes will
<add>10. Make changes! Since you opened the theme in a Dev Mode window, changes will
<ide> be instantly reflected in the editor without having to reload.
<ide>
<ide> ## Development workflow | 1 |
Javascript | Javascript | emit error on stream object, not global | 90368770e6acc224460376f3086ad47f54d363d3 | <ide><path>lib/_stream_transform.js
<ide> function afterTransform(stream, er, data) {
<ide> var cb = ts.writecb;
<ide>
<ide> if (!cb)
<del> return this.emit('error', new Error('no writecb in Transform class'));
<add> return stream.emit('error', new Error('no writecb in Transform class'));
<ide>
<ide> ts.writechunk = null;
<ide> ts.writecb = null; | 1 |
Javascript | Javascript | check _events before _events.error | fbb963b5d520a70d9c3f2f9ec116d79a0c676f80 | <ide><path>lib/_stream_readable.js
<ide> Readable.prototype.pipe = function(dest, pipeOpts) {
<ide> }
<ide> // This is a brutally ugly hack to make sure that our error handler
<ide> // is attached before any userland ones. NEVER DO THIS.
<del> if (!dest._events.error)
<add> if (!dest._events || !dest._events.error)
<ide> dest.on('error', onerror);
<ide> else if (Array.isArray(dest._events.error))
<ide> dest._events.error.unshift(onerror); | 1 |
Text | Text | fix typos in readme | edc892680162ff6d41ba80c553c8f25539722912 | <ide><path>README.md
<ide> you then have to manually deserialize. Before, jobs would look like this:
<ide>
<ide> ```ruby
<ide> class TrashableCleanupJob
<del> def perfom(trashable_class, trashable_id, depth)
<add> def perform(trashable_class, trashable_id, depth)
<ide> trashable = trashable_class.constantize.find(trashable_id)
<ide> trashable.cleanup(depth)
<ide> end
<ide> Now you can simply do:
<ide>
<ide> ```ruby
<ide> class TrashableCleanupJob
<del> def perfom(trashable, depth)
<add> def perform(trashable, depth)
<ide> trashable.cleanup(depth)
<ide> end
<ide> end | 1 |
Javascript | Javascript | fix its vs it's typos | def5b57de872058874c05e214cc4007ae48aca0f | <ide><path>src/ngAnimate/animate.js
<ide> angular.module('ngAnimate', ['ng'])
<ide> * | 8. The animation ends and all generated CSS classes are removed from the element | class="my-animation" |
<ide> * | 9. The doneCallback() callback is fired (if provided) | class="my-animation" |
<ide> *
<del> * @param {DOMElement} element the element which will it's CSS classes changed
<add> * @param {DOMElement} element the element which will its CSS classes changed
<ide> * removed from it
<ide> * @param {string} add the CSS classes which will be added to the element
<ide> * @param {string} remove the CSS class which will be removed from the element
<ide><path>src/ngScenario/Future.js
<ide> angular.scenario.Future.prototype.execute = function(doneFn) {
<ide> };
<ide>
<ide> /**
<del> * Configures the future to convert it's final with a function fn(value)
<add> * Configures the future to convert its final with a function fn(value)
<ide> *
<ide> * @param {function()} fn function(value) that returns the parsed value
<ide> */
<ide> angular.scenario.Future.prototype.parsedWith = function(fn) {
<ide> };
<ide>
<ide> /**
<del> * Configures the future to parse it's final value from JSON
<add> * Configures the future to parse its final value from JSON
<ide> * into objects.
<ide> */
<ide> angular.scenario.Future.prototype.fromJson = function() {
<ide> return this.parsedWith(angular.fromJson);
<ide> };
<ide>
<ide> /**
<del> * Configures the future to convert it's final value from objects
<add> * Configures the future to convert its final value from objects
<ide> * into JSON.
<ide> */
<ide> angular.scenario.Future.prototype.toJson = function() {
<ide><path>test/ngRoute/directive/ngViewSpec.js
<ide> describe('ngView and transcludes', function() {
<ide> });
<ide> });
<ide>
<del> it("should compile it's content correctly (although we remove it later)", function() {
<add> it("should compile its content correctly (although we remove it later)", function() {
<ide> var testElement;
<ide> module(function($compileProvider, $routeProvider) {
<ide> $routeProvider.when('/view', {template: ' '}); | 3 |
Python | Python | fix platform detection on freebsd, sunos | e5649d4b3d0271c6fc248ac12ff73b6b253a91b7 | <ide><path>tools/gyp/pylib/gyp/common.py
<ide> def GetFlavor(params):
<ide> 'cygwin': 'win',
<ide> 'win32': 'win',
<ide> 'darwin': 'mac',
<del> 'sunos5': 'solaris',
<del> 'freebsd7': 'freebsd',
<del> 'freebsd8': 'freebsd',
<del> 'freebsd9': 'freebsd',
<ide> }
<del> flavor = flavors.get(sys.platform, 'linux')
<del> return params.get('flavor', flavor)
<add>
<add> if 'flavor' in params:
<add> return params['flavor']
<add> if sys.platform in flavors:
<add> return flavors[sys.platform]
<add> if sys.platform.startswith('sunos'):
<add> return 'solaris'
<add> if sys.platform.startswith('freebsd'):
<add> return 'freebsd'
<add>
<add> return 'linux'
<ide>
<ide>
<ide> def CopyTool(flavor, out_path): | 1 |
PHP | PHP | fix issue with identifier quoting | c6a93d9db34c7712d5c7c3827a00fb6d049e2faf | <ide><path>src/Database/Expression/ValuesExpression.php
<ide> public function columns($cols = null)
<ide> return $this;
<ide> }
<ide>
<add> /**
<add> * Get the bare column names.
<add> *
<add> * Because column names could be identifier quoted, we
<add> * need to strip the identifiers off of the columns.
<add> *
<add> * @return array
<add> */
<add> protected function _columnNames()
<add> {
<add> $columns = [];
<add> foreach ($this->_columns as $col) {
<add> $columns[] = trim($col, '`[]"');
<add> }
<add> return $columns;
<add> }
<add>
<ide> /**
<ide> * Sets the values to be inserted. If no params are passed, then it returns
<ide> * the currently stored values
<ide> public function sql(ValueBinder $generator)
<ide> $i = 0;
<ide> $columns = [];
<ide>
<del> // Remove identifier quoting so column names match keys.
<del> foreach ($this->_columns as $col) {
<del> $columns[] = trim($col, '`[]"');
<del> }
<add> $columns = $this->_columnNames();
<ide> $defaults = array_fill_keys($columns, null);
<ide> $placeholders = [];
<ide>
<ide> protected function _processExpressions()
<ide> $types = [];
<ide> $typeMap = $this->typeMap();
<ide>
<del> foreach ($this->_columns as $c) {
<add> $columns = $this->_columnNames();
<add> foreach ($columns as $c) {
<ide> if (!is_scalar($c)) {
<ide> continue;
<ide> } | 1 |
Ruby | Ruby | remove workaround for old debugger | 1abcb0b348c0d0b07ee92ed274c5d97550b39ee4 | <ide><path>Library/Homebrew/formula.rb
<ide> def system cmd, *args
<ide> raise BuildError.new(self, cmd, args, ENV.to_hash)
<ide> end
<ide> ensure
<del> log.close unless log.closed?
<add> log.close
<ide> end
<ide> end
<ide> | 1 |
Python | Python | remove unused numpy/f2py/info.py file | 7960edee9ccf1a6e8b48d269a13e54f658e560b8 | <ide><path>numpy/f2py/info.py
<del>"""Fortran to Python Interface Generator.
<del>
<del>"""
<del>from __future__ import division, absolute_import, print_function
<del>
<del>postpone_import = True
<ide><path>numpy/tests/test_public_api.py
<ide> def test_NPY_NO_EXPORT():
<ide> "f2py.f2py_testing",
<ide> "f2py.f90mod_rules",
<ide> "f2py.func2subr",
<del> "f2py.info",
<ide> "f2py.rules",
<ide> "f2py.use_rules",
<ide> "fft.helper", | 2 |
PHP | PHP | fix failing test when directories contain spaces | ac33e82737c3db242262d53f679ba710478952a3 | <ide><path>lib/Cake/Test/TestCase/Utility/XmlTest.php
<ide> public function testAmpInText() {
<ide> * @return void
<ide> */
<ide> public function testNoEntityLoading() {
<del> $file = CAKE . 'VERSION.txt';
<add> $file = str_replace(' ' , '%20' , CAKE . 'VERSION.txt');
<ide> $xml = <<<XML
<ide> <!DOCTYPE cakephp [
<ide> <!ENTITY payload SYSTEM "file://$file" >]> | 1 |
Python | Python | fix function serialization | 45e781c305edc0ee23af8a60bfa73a6e2e19839a | <ide><path>keras/utils/generic_utils.py
<ide> import marshal
<ide> import types as python_types
<ide> import inspect
<add>import codecs
<ide>
<ide> _GLOBAL_CUSTOM_OBJECTS = {}
<ide>
<ide> def func_dump(func):
<ide> # Returns
<ide> A tuple `(code, defaults, closure)`.
<ide> """
<del> code = marshal.dumps(func.__code__).decode('raw_unicode_escape')
<add> raw_code = marshal.dumps(func.__code__)
<add> code = codecs.encode(raw_code, 'base64').decode('ascii')
<ide> defaults = func.__defaults__
<ide> if func.__closure__:
<ide> closure = tuple(c.cell_contents for c in func.__closure__)
<ide> def func_load(code, defaults=None, closure=None, globs=None):
<ide> code, defaults, closure = code
<ide> if isinstance(defaults, list):
<ide> defaults = tuple(defaults)
<del> code = marshal.loads(code.encode('raw_unicode_escape'))
<add> raw_code = codecs.decode(code.encode('ascii'), 'base64')
<add> code = marshal.loads(raw_code)
<ide> if globs is None:
<ide> globs = globals()
<ide> return python_types.FunctionType(code, globs,
<ide><path>tests/keras/utils/generic_utils_test.py
<ide> from keras.utils.generic_utils import custom_object_scope
<ide> from keras.utils.generic_utils import has_arg
<ide> from keras.utils.generic_utils import Progbar
<add>from keras.utils.generic_utils import func_dump
<add>from keras.utils.generic_utils import func_load
<ide> from keras.utils.test_utils import keras_test
<ide> from keras import activations
<ide> from keras import regularizers
<ide> def test_has_arg_positional_only():
<ide> assert has_arg(pow, 'x') is False
<ide>
<ide>
<add>def test_func_dump_and_load():
<add> def test_func():
<add> return r'\u'
<add> serialized = func_dump(test_func)
<add> deserialized = func_load(serialized)
<add> assert deserialized.__code__ == test_func.__code__
<add> assert deserialized.__defaults__ == test_func.__defaults__
<add> assert deserialized.__closure__ == test_func.__closure__
<add>
<add>
<ide> if __name__ == '__main__':
<ide> pytest.main([__file__]) | 2 |
Javascript | Javascript | add defaults for deterministic id plugins | ca46218787e56196bea3d06418c6d8ea383cc3e7 | <ide><path>lib/WebpackOptionsApply.js
<ide> class WebpackOptionsApply extends OptionsApply {
<ide> new HashedModuleIdsPlugin().apply(compiler);
<ide> break;
<ide> case "deterministic":
<del> new DeterministicModuleIdsPlugin({
<del> maxLength: 3
<del> }).apply(compiler);
<add> new DeterministicModuleIdsPlugin().apply(compiler);
<ide> break;
<ide> case "size":
<ide> new OccurrenceModuleIdsPlugin({
<ide> class WebpackOptionsApply extends OptionsApply {
<ide> new NamedChunkIdsPlugin().apply(compiler);
<ide> break;
<ide> case "deterministic":
<del> new DeterministicChunkIdsPlugin({
<del> maxLength: 3
<del> }).apply(compiler);
<add> new DeterministicChunkIdsPlugin().apply(compiler);
<ide> break;
<ide> case "size":
<ide> new OccurrenceChunkIdsPlugin({
<ide><path>lib/ids/DeterministicChunkIdsPlugin.js
<ide> class DeterministicChunkIdsPlugin {
<ide> }),
<ide> chunk => getFullChunkName(chunk, chunkGraph, requestShortener),
<ide> compareNatural,
<del> this.options.maxLength,
<add> this.options.maxLength || 3,
<ide> getUsedChunkIds(compilation),
<ide> (chunk, id) => {
<ide> chunk.id = id;
<ide><path>lib/ids/DeterministicModuleIdsPlugin.js
<ide> class DeterministicModuleIdsPlugin {
<ide> compareModulesByPreOrderIndexOrIdentifier(
<ide> compilation.moduleGraph
<ide> ),
<del> this.options.maxLength,
<add> this.options.maxLength || 3,
<ide> getUsedModuleIds(compilation),
<ide> (module, id) => {
<ide> chunkGraph.setModuleId(module, id); | 3 |
Ruby | Ruby | remove `macho` check for `executable` | d8624f5fe754b3d3bcbb0836ecd9fdbb974b9ea1 | <ide><path>Library/Homebrew/unpack_strategy/executable.rb
<ide> require_relative "uncompressed"
<ide>
<del>require "vendor/macho/macho"
<del>
<ide> module UnpackStrategy
<ide> class Executable < Uncompressed
<ide> def self.can_extract?(path:, magic_number:)
<del> return true if magic_number.match?(/\A#!\s*\S+/n)
<del>
<del> begin
<del> path.file? && MachO.open(path).header.executable?
<del> rescue MachO::NotAMachOError
<del> false
<del> end
<add> magic_number.match?(/\A#!\s*\S+/n)
<ide> end
<ide> end
<ide> end | 1 |
Javascript | Javascript | fix redirect caching in httpuriplugin | c128f4fe44111206838faf3db6b21f531d5c6b3f | <ide><path>lib/schemes/HttpUriPlugin.js
<ide> class HttpUriPlugin {
<ide>
<ide> /**
<ide> * @param {string} url URL
<del> * @param {FetchResult} cachedResult result from cache
<add> * @param {FetchResult | RedirectFetchResult} cachedResult result from cache
<ide> * @param {function((Error | null)=, FetchResult=): void} callback callback
<ide> * @returns {void}
<ide> */
<ide> class HttpUriPlugin {
<ide> logger.debug(
<ide> `GET ${url} [${res.statusCode}] -> ${partialResult.location}`
<ide> );
<del> // we should follow redirect and not store partial result
<del> return callback(null, {
<del> ...partialResult,
<del> storeLock,
<del> storeCache,
<del> fresh: true,
<del> etag: undefined,
<del> validUntil: undefined
<del> });
<ide> } else {
<ide> logger.debug(
<ide> `GET ${url} [${res.statusCode}] ${Math.ceil(
<ide> class HttpUriPlugin {
<ide> res.statusCode >= 301 &&
<ide> res.statusCode <= 308
<ide> ) {
<del> return finishWith({
<add> const result = {
<ide> location: new URL(location, url).href
<del> });
<add> };
<add> if (
<add> !cachedResult ||
<add> !("location" in cachedResult) ||
<add> cachedResult.location !== result.location ||
<add> cachedResult.validUntil < validUntil ||
<add> cachedResult.storeLock !== storeLock ||
<add> cachedResult.storeCache !== storeCache ||
<add> cachedResult.etag !== etag
<add> ) {
<add> return finishWith(result);
<add> } else {
<add> logger.debug(`GET ${url} [${res.statusCode}] (unchanged)`);
<add> return callback(null, {
<add> ...result,
<add> fresh: true,
<add> storeLock,
<add> storeCache,
<add> validUntil,
<add> etag
<add> });
<add> }
<ide> }
<ide> const contentType = res.headers["content-type"] || "";
<ide> const bufferArr = []; | 1 |
Ruby | Ruby | remove bundler warnings from bin/setup output | b59634603862918b379e02b2594bc1bb8a5728df | <ide><path>railties/test/application/bin_setup_test.rb
<ide> def test_bin_setup_output
<ide>
<ide> # Ignore line that's only output by Bundler < 1.14
<ide> output.sub!(/^Resolving dependencies\.\.\.\n/, "")
<add> # Suppress Bundler platform warnings from output
<add> output.gsub!(/^The dependency .* will be unused .*\.\n/, "")
<ide>
<del> assert_equal(<<-OUTPUT, output)
<del>== Installing dependencies ==
<del>The Gemfile's dependencies are satisfied
<add> assert_equal(<<~OUTPUT, output)
<add> == Installing dependencies ==
<add> The Gemfile's dependencies are satisfied
<ide>
<del>== Preparing database ==
<del>Created database 'db/development.sqlite3'
<del>Created database 'db/test.sqlite3'
<add> == Preparing database ==
<add> Created database 'db/development.sqlite3'
<add> Created database 'db/test.sqlite3'
<ide>
<del>== Removing old logs and tempfiles ==
<add> == Removing old logs and tempfiles ==
<ide>
<del>== Restarting application server ==
<add> == Restarting application server ==
<ide> OUTPUT
<ide> end
<ide> end | 1 |
Python | Python | use e.value to get to the exceptioninfo value | f420aa1138f52c732102b6ad00825bab797792ec | <ide><path>spacy/tests/test_language.py
<ide> def test_language_init_invalid_vocab(value):
<ide> err_fragment = "invalid value"
<ide> with pytest.raises(ValueError) as e:
<ide> Language(value)
<del> assert err_fragment in str(e)
<add> assert err_fragment in str(e.value) | 1 |
Python | Python | add ٪ as punctuation | 42349471bc815ffa76a83b7c1c7174666d71edfe | <ide><path>spacy/lang/char_classes.py
<ide>
<ide> _units = ('km km² km³ m m² m³ dm dm² dm³ cm cm² cm³ mm mm² mm³ ha µm nm yd in ft '
<ide> 'kg g mg µg t lb oz m/s km/h kmh mph hPa Pa mbar mb MB kb KB gb GB tb '
<del> 'TB T G M K % ٪ км км² км³ м м² м³ дм дм² дм³ см см² см³ мм мм² мм³ нм '
<add> 'TB T G M K % км км² км³ м м² м³ дм дм² дм³ см см² см³ мм мм² мм³ нм '
<ide> 'кг г мг м/с км/ч кПа Па мбар Кб КБ кб Мб МБ мб Гб ГБ гб Тб ТБ тб')
<ide> _currency = r'\$ £ € ¥ ฿ US\$ C\$ A\$ ₽ ﷼'
<ide>
<ide> # These expressions contain various unicode variations, including characters
<ide> # used in Chinese (see #1333, #1340, #1351) – unless there are cross-language
<ide> # conflicts, spaCy's base tokenizer should handle all of those by default
<del>_punct = r'… …… , : ; \! \? ¿ ؟ ¡ \( \) \[ \] \{ \} < > _ # \* & 。 ? ! , 、 ; : ~ · । ، ؛'
<add>_punct = r'… …… , : ; \! \? ¿ ؟ ¡ \( \) \[ \] \{ \} < > _ # \* & 。 ? ! , 、 ; : ~ · । ، ؛ ٪'
<ide> _quotes = r'\' \'\' " ” “ `` ` ‘ ´ ‘‘ ’’ ‚ , „ » « 「 」 『 』 ( ) 〔 〕 【 】 《 》 〈 〉'
<ide> _hyphens = '- – — -- --- —— ~'
<ide> | 1 |
Text | Text | fix horizontal scrolling in docs | 6facb85f853365125b10e9f0843d013184f88d2e | <ide><path>docs/docs/optimizing-performance.md
<ide> Remember that you only need to do this for production builds. You shouldn't appl
<ide>
<ide> In the **development** mode, you can visualize how components mount, update, and unmount, using the performance tools in supported browsers. For example:
<ide>
<del><center><img src="/react/img/blog/react-perf-chrome-timeline.png" width="651" height="228" alt="React components in Chrome timeline" /></center>
<add><center><img src="/react/img/blog/react-perf-chrome-timeline.png" style="max-width:100%" alt="React components in Chrome timeline" /></center>
<ide>
<ide> To do this in Chrome:
<ide>
<ide> If you know that in some situations your component doesn't need to update, you c
<ide>
<ide> Here's a subtree of components. For each one, `SCU` indicates what `shouldComponentUpdate` returned, and `vDOMEq` indicates whether the rendered React elements were equivalent. Finally, the circle's color indicates whether the component had to be reconciled or not.
<ide>
<del><figure><img src="/react/img/docs/should-component-update.png" /></figure>
<add><figure><img src="/react/img/docs/should-component-update.png" style="max-width:100%" /></figure>
<ide>
<ide> Since `shouldComponentUpdate` returned `false` for the subtree rooted at C2, React did not attempt to render C2, and thus didn't even have to invoke `shouldComponentUpdate` on C4 and C5.
<ide> | 1 |
Go | Go | fix warning in build | ec488fa1231a9a5240dc0cd38c99ca3241fcaf7c | <ide><path>runtime.go
<ide> func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
<ide> return nil, nil, err
<ide> }
<ide>
<del> if img.Config != nil {
<del> if err := MergeConfig(config, img.Config); err != nil {
<del> return nil, nil, err
<add> checkDeprecatedExpose := func(config *Config) bool {
<add> if config != nil {
<add> if config.PortSpecs != nil {
<add> for _, p := range config.PortSpecs {
<add> if strings.Contains(p, ":") {
<add> return true
<add> }
<add> }
<add> }
<ide> }
<add> return false
<ide> }
<add>
<ide> warnings := []string{}
<del> if config.PortSpecs != nil {
<del> for _, p := range config.PortSpecs {
<del> if strings.Contains(p, ":") {
<del> warnings = append(warnings, "The mapping to public ports on your host has been deprecated. Use -p to publish the ports.")
<del> break
<del> }
<add> if checkDeprecatedExpose(img.Config) || checkDeprecatedExpose(config) {
<add> warnings = append(warnings, "The mapping to public ports on your host has been deprecated. Use -p to publish the ports.")
<add> }
<add>
<add> if img.Config != nil {
<add> if err := MergeConfig(config, img.Config); err != nil {
<add> return nil, nil, err
<ide> }
<ide> }
<ide> | 1 |
Python | Python | remove stray pprint | 83112cf9db6c5bf993865507bcfdbd7985d954fd | <ide><path>libcloud/test/dns/test_godaddy.py
<ide>
<ide> import sys
<ide> import unittest
<del>from pprint import pprint
<add>
<ide> from libcloud.utils.py3 import httplib
<ide> from libcloud.dns.drivers.godaddy import GoDaddyDNSDriver
<ide> from libcloud.test import MockHttp | 1 |
Python | Python | add lm_labels for the lm cross-entropy | dc580dd4c720c5daefe7411f604b6908da99681e | <ide><path>transformers/modeling_bert.py
<ide> def tie_weights(self):
<ide> self.bert.embeddings.word_embeddings)
<ide>
<ide> def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
<del> masked_lm_labels=None, encoder_hidden_states=None, encoder_attention_mask=None):
<add> masked_lm_labels=None, lm_labels=None, encoder_hidden_states=None, encoder_attention_mask=None):
<ide>
<ide> outputs = self.bert(input_ids,
<ide> attention_mask=attention_mask,
<ide> def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_
<ide> # of predictions for masked words.
<ide> # 2. If encoder hidden states are provided we are in a causal situation where we
<ide> # try to predict the next word for each input in the encoder.
<del> if masked_lm_labels is not None and encoder_hidden_states is not None:
<add> if masked_lm_labels is not None and lm_labels is not None:
<ide> raise AttributeError("Masked LM training with an encoder-decoder is not supported.")
<ide>
<ide> if masked_lm_labels is not None:
<ide> loss_fct = CrossEntropyLoss(ignore_index=-1) # -1 index = padding token
<ide> masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
<ide> outputs = (masked_lm_loss,) + outputs
<ide>
<del> if encoder_hidden_states is not None:
<add> if lm_labels is not None:
<ide> # we are doing next-token prediction; shift prediction scores and input ids by one
<ide> prediction_scores = prediction_scores[:, :-1, :]
<del> input_ids = input_ids[:, 1:, :]
<add> lm_labels = lm_labels[:, 1:, :]
<ide> loss_fct = CrossEntropyLoss(ignore_index=-1)
<del> seq2seq_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), input_ids.view(-1))
<add> seq2seq_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), lm_labels.view(-1))
<ide> outputs = (seq2seq_loss,) + outputs
<ide>
<ide> return outputs # (mlm_or_seq2seq_loss), prediction_scores, (hidden_states), (attentions) | 1 |
Javascript | Javascript | fix utf8stream references | 2663c69f8d8ea4df9fbc40b03186259186011e97 | <ide><path>lib/http.js
<ide> function debug (x) {
<ide>
<ide> var sys = require('sys');
<ide> var net = require('net');
<del>var Utf8Stream = require('utf8_stream').Utf8Stream;
<add>var Utf8Decoder = require('utf8decoder').Utf8Decoder;
<ide> var events = require('events');
<ide> var Buffer = require('buffer').Buffer;
<ide>
<ide> IncomingMessage.prototype.setEncoding = function (enc) {
<ide> // TODO check values, error out on bad, and deprecation message?
<ide> this._encoding = enc.toLowerCase();
<ide> if (this._encoding == 'utf-8' || this._encoding == 'utf8') {
<del> this._decoder = new Utf8Stream();
<add> this._decoder = new Utf8Decoder();
<ide> this._decoder.onString = function(str) {
<ide> this.emit('data', str);
<ide> }; | 1 |
PHP | PHP | apply fixes from styleci | bb91bf7c0883707dfef34a20a87e95a4f75e6c29 | <ide><path>src/Illuminate/View/Compilers/ComponentTagCompiler.php
<ide> protected function parseAttributeBag(string $attributeString)
<ide> */
<ide> protected function parseComponentTagClassStatements(string $attributeString)
<ide> {
<del> return preg_replace_callback(
<add> return preg_replace_callback(
<ide> '/@(class)(\( ( (?>[^()]+) | (?2) )* \))/x', function ($match) {
<ide> if ($match[1] === 'class') {
<ide> $match[2] = str_replace('"', "'", $match[2]); | 1 |
PHP | PHP | make stringable tappable | 7f1138e362c6e124f4e1b327fc14d83eb80fffee | <ide><path>src/Illuminate/Support/Stringable.php
<ide>
<ide> use Closure;
<ide> use Illuminate\Support\Traits\Macroable;
<add>use Illuminate\Support\Traits\Tappable;
<ide> use Symfony\Component\VarDumper\VarDumper;
<ide>
<ide> class Stringable
<ide> {
<del> use Macroable;
<add> use Tappable, Macroable;
<ide>
<ide> /**
<ide> * The underlying string value. | 1 |
Text | Text | fix a couple of typos in the js guide [ci skip] | 676ee7887e3689257811c8b00bd699f89a838910 | <ide><path>guides/source/working_with_javascript.md
<ide> ease! We will cover the following topics:
<ide> An introduction to AJAX
<ide> ------------------------
<ide>
<del>In order to understand AJAX, you must first understand what a web broswer does
<add>In order to understand AJAX, you must first understand what a web browser does
<ide> normally.
<ide>
<ide> When you type `http://localhost:3000` into your browser's address bar and hit
<ide> attributes, and attaches appropriate handlers.
<ide> ### form_for
<ide>
<ide> [`form_for`](http://api.rubyonrails.org/classes/ActionView/Helpers/FormHelper.html#method-i-form_for)
<del>is a helper that assists with writing `<form>`s. `form_for` takes a `:remote`
<add>is a helper that assists with writing forms. `form_for` takes a `:remote`
<ide> option. It works like this:
<ide>
<ide> ```
<ide> details.
<ide> ### link_to
<ide>
<ide> [`link_to`](http://api.rubyonrails.org/classes/ActionView/Helpers/UrlHelper.html#method-i-link_to)
<del>is a helper that assits with generating links. It has a `:remote` option you
<add>is a helper that assists with generating links. It has a `:remote` option you
<ide> can use like this:
<ide>
<ide> ```
<ide> $(document).ready ->
<ide> alert "fib of #{count} is: #{data}."
<ide> ```
<ide>
<del>Easy!
<del>
<ide> ### button_to
<ide>
<ide> [`button_to`](http://api.rubyonrails.org/classes/ActionView/Helpers/UrlHelper.html#method-i-button_to) is a helper that helps you create buttons. It has a `:remote` option that you can call like this: | 1 |
PHP | PHP | remove additional wrong ds | 3708f80b3d66155c07b343440d1963a3ea6ee05a | <ide><path>src/View/View.php
<ide> protected function _getTemplateFileName(?string $name = null): string
<ide> } elseif (!$plugin || $this->templatePath !== $this->name) {
<ide> $name = $templatePath . $subDir . $name;
<ide> } else {
<del> $name = DIRECTORY_SEPARATOR . $subDir . $name;
<add> $name = $subDir . $name;
<ide> }
<ide> }
<ide> | 1 |
PHP | PHP | use correct type hint for redirectto | 195529f6256464ec5df7b87d9a3ecfea5121e984 | <ide><path>src/Illuminate/Auth/AuthenticationException.php
<ide> class AuthenticationException extends Exception
<ide> /**
<ide> * The path the user should be redirected to.
<ide> *
<del> * @var string
<add> * @var string|null
<ide> */
<ide> protected $redirectTo;
<ide>
<ide> public function guards()
<ide> /**
<ide> * Get the path the user should be redirected to.
<ide> *
<del> * @return string
<add> * @return string|null
<ide> */
<ide> public function redirectTo()
<ide> { | 1 |
Javascript | Javascript | update rawmodule to es2015 syntax | 24594b345a2e9f64f34befe001745ae6c6e31b87 | <ide><path>lib/RawModule.js
<ide> MIT License http://www.opensource.org/licenses/mit-license.php
<ide> Author Tobias Koppers @sokra
<ide> */
<del>var Module = require("./Module");
<del>var OriginalSource = require("webpack-sources").OriginalSource;
<del>var RawSource = require("webpack-sources").RawSource;
<del>
<del>function RawModule(source, identifier, readableIdentifier) {
<del> Module.call(this);
<del> this.sourceStr = source;
<del> this.identifierStr = identifier || this.sourceStr;
<del> this.readableIdentifierStr = readableIdentifier || this.identifierStr;
<del> this.cacheable = true;
<del> this.built = false;
<add>"use strict";
<add>
<add>const Module = require("./Module");
<add>const OriginalSource = require("webpack-sources").OriginalSource;
<add>const RawSource = require("webpack-sources").RawSource;
<add>
<add>module.exports = class RawModule extends Module {
<add>
<add> constructor(source, identifier, readableIdentifier) {
<add> super()
<add> this.sourceStr = source;
<add> this.identifierStr = identifier || this.sourceStr;
<add> this.readableIdentifierStr = readableIdentifier || this.identifierStr;
<add> this.cacheable = true;
<add> this.built = false;
<add> }
<add>
<add> identifier() {
<add> return this.identifierStr;
<add> }
<add>
<add> size() {
<add> return this.sourceStr.length;
<add> }
<add>
<add> readableIdentifier(requestShortener) {
<add> return requestShortener.shorten(this.readableIdentifierStr);
<add> }
<add>
<add> needRebuild() {
<add> return false;
<add> }
<add>
<add> build(options, compilations, resolver, fs, callback) {
<add> this.builtTime = new Date().getTime();
<add> callback();
<add> }
<add>
<add> source() {
<add> if(this.useSourceMap)
<add> return new OriginalSource(this.sourceStr, this.identifier());
<add> else
<add> return new RawSource(this.sourceStr);
<add> }
<add>
<ide> }
<del>module.exports = RawModule;
<del>
<del>RawModule.prototype = Object.create(Module.prototype);
<del>RawModule.prototype.constructor = RawModule;
<del>
<del>RawModule.prototype.identifier = function() {
<del> return this.identifierStr;
<del>};
<del>
<del>RawModule.prototype.readableIdentifier = function(requestShortener) {
<del> return requestShortener.shorten(this.readableIdentifierStr);
<del>};
<del>
<del>RawModule.prototype.needRebuild = function() {
<del> return false;
<del>};
<del>
<del>RawModule.prototype.build = function(options, compilation, resolver, fs, callback) {
<del> this.builtTime = new Date().getTime();
<del> callback();
<del>};
<del>
<del>RawModule.prototype.source = function() {
<del> if(this.useSourceMap)
<del> return new OriginalSource(this.sourceStr, this.identifier());
<del> else
<del> return new RawSource(this.sourceStr);
<del>};
<del>
<del>RawModule.prototype.size = function() {
<del> return this.sourceStr.length;
<del>};
<ide><path>test/RawModule.test.js
<add>var RawModule = require("../lib/RawModule");
<add>var OriginalSource = require("webpack-sources").OriginalSource;
<add>var RawSource = require("webpack-sources").RawSource;
<add>var RequestShortener = require("../lib/RequestShortener");
<add>var should = require("should");
<add>var path = require("path")
<add>
<add>describe("RawModule", function() {
<add> var myRawModule;
<add>
<add> before(function() {
<add> var source = 'sourceStr attribute';
<add> var identifier = 'identifierStr attribute';
<add> var readableIdentifier = 'readableIdentifierStr attribute';
<add> myRawModule = new RawModule(source, identifier, readableIdentifier);
<add> });
<add>
<add> describe('identifier', function() {
<add> it('returns value for identifierStr attribute', function() {
<add> should(myRawModule.identifier()).be.exactly('identifierStr attribute');
<add> });
<add> });
<add>
<add> describe('size', function() {
<add> it('returns value for sourceStr attribute\'s length property', function() {
<add> var sourceStrLength = myRawModule.sourceStr.length;
<add> should(myRawModule.size()).be.exactly(sourceStrLength);
<add> });
<add> });
<add>
<add> describe('readableIdentifier', function() {
<add> it('returns result of calling provided requestShortener\'s shorten method\
<add> on readableIdentifierStr attribute', function() {
<add> var requestShortener = new RequestShortener(path.resolve());
<add> should.exist(myRawModule.readableIdentifier(requestShortener));
<add> });
<add> });
<add>
<add> describe('needRebuild', function() {
<add> it('returns false', function() {
<add> should(myRawModule.needRebuild()).be.false();
<add> });
<add> });
<add>
<add> describe('build', function() {
<add> it('sets builtTime attribute to current time value in milliseconds', function() {
<add> myRawModule.build('', '', '', '', () => {
<add> undefined
<add> })
<add> var currentTime = new Date().getTime();
<add> myRawModule.builtTime.should.be.exactly(currentTime);
<add> });
<add> });
<add>
<add> describe('source', function() {
<add> it('returns a new OriginalSource instance with sourceStr attribute and\
<add> return value of identifier() function provided as constructor arguments',
<add> function() {
<add> var originalSource = new OriginalSource(myRawModule.sourceStr, myRawModule.identifier());
<add> myRawModule.useSourceMap = true;
<add> myRawModule.source().should.match(originalSource);
<add> });
<add>
<add> it('returns a new RawSource instance with sourceStr attribute provided\
<add> as constructor argument if useSourceMap is falsey', function() {
<add> var rawSource = new RawSource(myRawModule.sourceStr);
<add> myRawModule.useSourceMap = false;
<add> myRawModule.source().should.match(rawSource);
<add> });
<add> });
<add>}); | 2 |
Python | Python | add diagflat. begin to add numarray compatibility | a1e9a90654afd97b7bf45d318019966684935c51 | <ide><path>numpy/lib/twodim_base.py
<ide>
<ide> """
<ide>
<del>__all__ = ['diag','eye','fliplr','flipud','rot90','tri','triu','tril',
<add>__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu','tril',
<ide> 'vander','histogram2d']
<ide>
<ide> from numpy.core.numeric import asanyarray, int_, equal, subtract, arange, \
<ide> def diag(v, k=0):
<ide> else:
<ide> raise ValueError, "Input must be 1- or 2-d."
<ide>
<del>
<add>def diagflat(v,k=0):
<add> try:
<add> wrap = v.__array_wrap__
<add> except AttributeError:
<add> wrap = None
<add> v = asarray(v).ravel()
<add> s = len(v)
<add> n = s + abs(k)
<add> res = zeros((n,n), v.dtype)
<add> if (k>=0):
<add> i = arange(0,n-k)
<add> fi = i+k+i*n
<add> else:
<add> i = arange(0,n+k)
<add> fi = i+(i-k)*n
<add> res.flat[fi] = v
<add> if not wrap:
<add> return res
<add> return wrap(res)
<add>
<ide> def tri(N, M=None, k=0, dtype=float):
<ide> """ returns a N-by-M array where all the diagonals starting from
<ide> lower left corner up to the k-th are all ones.
<ide><path>numpy/matlib.py
<ide> def randn(*args):
<ide> if isinstance(args[0], tuple):
<ide> args = args[0]
<ide> return asmatrix(N.random.rand(*args))
<add>
<add>
<ide><path>numpy/numarray/__init__.py
<ide> from util import *
<add>from numclass import *
<add>
<ide> __all__ = util.__all__
<add>__all__ += numclass.__all__
<ide><path>numpy/numarray/numclass.py
<add>from numpy.core.multiarray import ndarray
<add>import numerictypes as _nt
<add>import numpy as N
<add>import sys as _sys
<add>
<add>__all__ = ['NumArray']
<add>
<add>class NumArray(ndarray):
<add> def __new__(klass, shape=None, type=None, buffer=None,
<add> byteoffset=0, bytestride=None, byteorder=_sys.byteorder,
<add> aligned=1, real=None, imag=None):
<add>
<add> type = _nt.getType(type)
<add> dtype = N.dtype(type._dtype)
<add> if byteorder in ['little', 'big']:
<add> if byteorder is not _sys.byteorder:
<add> dtype = dtype.newbyteorder()
<add> else:
<add> raise ValueError("byteorder must be 'little' or 'big'")
<add>
<add> if buffer is None:
<add> self = ndarray.__new__(klass, shape, dtype)
<add> else:
<add> self = ndarray.__new__(klass, shape, dtype, buffer=buffer,
<add> offset=byteoffset, strides=bytestride)
<add>
<add> self._type = type
<add>
<add> if real is not None:
<add> self.real = real
<add>
<add> if imag is not None:
<add> self.imag = imag
<add>
<add> self._byteorder = byteorder
<add>
<add> return self
<add>
<add> def argmax(self, axis=-1):
<add> return ndarray.argmax(self, axis)
<add>
<add> def argmin(self, axis=-1):
<add> return ndarray.argmax(self, axis)
<add>
<add> def argsort(self, axis=-1, kind='quicksort'):
<add> return ndarray.argmax(self, axis, kind)
<add>
<add> def astype(self, type=None):
<add> return self.astype(_getdtype(type))
<add>
<add> def byteswap(self):
<add> ndarray.byteswap(self, True)
<add>
<add> def byteswapped(self):
<add> return ndarray.byteswap(self, False)
<add>
<add> def getdtypechar(self):
<add> return self.dtype.char
<add>
<add> def getimag(self):
<add> return self.imag
<add>
<add> getimaginary = getimag
<add>
<add> imaginary = property(getimaginary, None, "")
<add>
<add> def getreal(self):
<add> return self.real
<add>
<add> def is_c_array(self):
<add> return self.dtype.isnative and self.flags.carray
<add>
<add> def is_f_array(self):
<add> return self.dtype.isnative and self.flags.farray
<add>
<add> def is_fortran_contiguous(self):
<add> return self.flags.contiguous
<add>
<add> def new(self, type=None):
<add> if type is not None:
<add> dtype = _getdtype(type)
<add> return N.empty(self.shape, dtype)
<add> else:
<add> return N.empty_like(self)
<add>
<add> def setimag(self, value):
<add> self.imag = value
<add>
<add> setimaginary = setimag
<add>
<add> def setreal(self, value):
<add> self.real = value
<add>
<add> def sinfo(self):
<add> self.info()
<add>
<add> def sort(self, axis=-1, kind='quicksort'):
<add> ndarray.sort(self, axis, kind)
<add>
<add> def spacesaver(self):
<add> return False
<add>
<add> def stddev(self):
<add> return self.std()
<add>
<add> def sum(self, type=None):
<add> dtype = _getdtype(type)
<add> return ndarray.sum(self, dtype=dtype)
<add>
<add> def togglebyteorder(self):
<add> self.dtype = self.dtype.newbyteorder()
<add>
<add> def type(self):
<add> return self._type
<add>
<add> def typecode(self):
<add> return _numtypecode[self.dtype.char]
<add>
<add> dtypechar = property(getdtypechar, None, "")
<add>
<add> def info(self):
<add> print "class: ", self.__class__
<add> print "shape: ", self.shape
<add> print "strides: ", self.strides
<add> print "byteoffset: 0"
<add> print "bytestride: ", self.strides[0]
<add> print "itemsize: ", self.itemsize
<add> print "aligned: ", self.flags.isaligned
<add> print "contiguous: ", self.flags.contiguous
<add> print "buffer: ", self.data
<add> print "data pointer:", self._as_paramater_
<add> print "byteorder: ", self._byteorder
<add> print "byteswap: ", not self.dtype.isnative
<ide><path>numpy/numarray/numerictypes.py
<add>"""numerictypes: Define the numeric type objects
<add>
<add>This module is designed so 'from numerictypes import *' is safe.
<add>Exported symbols include:
<add>
<add> Dictionary with all registered number types (including aliases):
<add> typeDict
<add>
<add> Numeric type objects:
<add> Bool
<add> Int8 Int16 Int32 Int64
<add> UInt8 UInt16 UInt32 UInt64
<add> Float32 Double64
<add> Complex32 Complex64
<add>
<add> Numeric type classes:
<add> NumericType
<add> BooleanType
<add> SignedType
<add> UnsignedType
<add> IntegralType
<add> SignedIntegralType
<add> UnsignedIntegralType
<add> FloatingType
<add> ComplexType
<add>
<add>$Id: numerictypes.py,v 1.55 2005/12/01 16:22:03 jaytmiller Exp $
<add>"""
<add>
<add>MAX_ALIGN = 8
<add>MAX_INT_SIZE = 8
<add>
<add>import numpy
<add>LP64 = numpy.intp(0).itemsize == 8
<add>
<add>HasUInt64 = 0
<add>try:
<add> numpy.int64(0)
<add>except:
<add> HasUInt64 = 0
<add>
<add>#from typeconv import typeConverters as _typeConverters
<add>#import numinclude
<add>#from _numerictype import _numerictype, typeDict
<add>import types as _types
<add>import copy as _copy
<add>import sys as _sys
<add>
<add># Enumeration of numarray type codes
<add>typeDict = {}
<add>
<add>_tAny = 0
<add>_tBool = 1
<add>_tInt8 = 2
<add>_tUInt8 = 3
<add>_tInt16 = 4
<add>_tUInt16 = 5
<add>_tInt32 = 6
<add>_tUInt32 = 7
<add>_tInt64 = 8
<add>_tUInt64 = 9
<add>_tFloat32 = 10
<add>_tFloat64 = 11
<add>_tComplex32 = 12
<add>_tComplex64 = 13
<add>_tObject = 14
<add>
<add>def IsType(rep):
<add> """Determines whether the given object or string, 'rep', represents
<add> a numarray type."""
<add> return isinstance(rep, NumericType) or typeDict.has_key(rep)
<add>
<add>def _register(name, type, force=0):
<add> """Register the type object. Raise an exception if it is already registered
<add> unless force is true.
<add> """
<add> if typeDict.has_key(name) and not force:
<add> raise ValueError("Type %s has already been registered" % name)
<add> typeDict[name] = type
<add> return type
<add>
<add>
<add>class NumericType(object):
<add> """Numeric type class
<add>
<add> Used both as a type identification and the repository of
<add> characteristics and conversion functions.
<add> """
<add> def __new__(type, name, bytes, default, typeno):
<add> """__new__() implements a 'quasi-singleton pattern because attempts
<add> to create duplicate types return the first created instance of that
<add> particular type parameterization, i.e. the second time you try to
<add> create "Int32", you get the original Int32, not a new one.
<add> """
<add> if typeDict.has_key(name):
<add> self = typeDict[name]
<add> if self.bytes != bytes or self.default != default or \
<add> self.typeno != typeno:
<add> raise ValueError("Redeclaration of existing NumericType "\
<add> "with different parameters.")
<add> return self
<add> else:
<add> self = object.__new__(type)
<add> self.name = "no name"
<add> self.bytes = None
<add> self.default = None
<add> self.typeno = -1
<add> return self
<add>
<add> def __init__(self, name, bytes, default, typeno):
<add> if not isinstance(name, str):
<add> raise TypeError("name must be a string")
<add> self.name = name
<add> self.bytes = bytes
<add> self.default = default
<add> self.typeno = typeno
<add> self._conv = None
<add> _register(self.name, self)
<add>
<add> def __getnewargs__(self):
<add> """support the pickling protocol."""
<add> return (self.name, self.bytes, self.default, self.typeno)
<add>
<add> def __getstate__(self):
<add> """support pickling protocol... no __setstate__ required."""
<add> False
<add>
<add>class BooleanType(NumericType):
<add> pass
<add>
<add>class SignedType:
<add> """Marker class used for signed type check"""
<add> pass
<add>
<add>class UnsignedType:
<add> """Marker class used for unsigned type check"""
<add> pass
<add>
<add>class IntegralType(NumericType):
<add> pass
<add>
<add>class SignedIntegralType(IntegralType, SignedType):
<add> pass
<add>
<add>class UnsignedIntegralType(IntegralType, UnsignedType):
<add> pass
<add>
<add>class FloatingType(NumericType):
<add> pass
<add>
<add>class ComplexType(NumericType):
<add> pass
<add>
<add>class AnyType(NumericType):
<add> pass
<add>
<add>class ObjectType(NumericType):
<add> pass
<add>
<add># C-API Type Any
<add>
<add>Any = AnyType("Any", None, None, _tAny)
<add>
<add>Object = ObjectType("Object", None, None, _tObject)
<add>
<add># Numeric Types:
<add>
<add>Bool = BooleanType("Bool", 1, 0, _tBool)
<add>Int8 = SignedIntegralType( "Int8", 1, 0, _tInt8)
<add>Int16 = SignedIntegralType("Int16", 2, 0, _tInt16)
<add>Int32 = SignedIntegralType("Int32", 4, 0, _tInt32)
<add>Int64 = SignedIntegralType("Int64", 8, 0, _tInt64)
<add>
<add>Float32 = FloatingType("Float32", 4, 0.0, _tFloat32)
<add>Float64 = FloatingType("Float64", 8, 0.0, _tFloat64)
<add>
<add>UInt8 = UnsignedIntegralType( "UInt8", 1, 0, _tUInt8)
<add>UInt16 = UnsignedIntegralType("UInt16", 2, 0, _tUInt16)
<add>UInt32 = UnsignedIntegralType("UInt32", 4, 0, _tUInt32)
<add>UInt64 = UnsignedIntegralType("UInt64", 8, 0, _tUInt64)
<add>
<add>Complex32 = ComplexType("Complex32", 8, complex(0.0), _tComplex32)
<add>Complex64 = ComplexType("Complex64", 16, complex(0.0), _tComplex64)
<add>
<add># Aliases
<add>
<add>Byte = _register("Byte", Int8)
<add>Short = _register("Short", Int16)
<add>Int = _register("Int", Int32)
<add>if LP64:
<add> Long = _register("Long", Int64)
<add> if HasUInt64:
<add> _register("ULong", UInt64)
<add> MaybeLong = _register("MaybeLong", Int64)
<add>else:
<add> Long = _register("Long", Int32)
<add> _register("ULong", UInt32)
<add> MaybeLong = _register("MaybeLong", Int32)
<add>
<add>
<add>_register("UByte", UInt8)
<add>_register("UShort", UInt16)
<add>_register("UInt", UInt32)
<add>Float = _register("Float", Float64)
<add>Complex = _register("Complex", Complex64)
<add>
<add># short forms
<add>
<add>_register("b1", Bool)
<add>_register("u1", UInt8)
<add>_register("u2", UInt16)
<add>_register("u4", UInt32)
<add>_register("i1", Int8)
<add>_register("i2", Int16)
<add>_register("i4", Int32)
<add>
<add>_register("i8", Int64)
<add>if HasUInt64:
<add> _register("u8", UInt64)
<add>
<add>_register("f4", Float32)
<add>_register("f8", Float64)
<add>_register("c8", Complex32)
<add>_register("c16", Complex64)
<add>
<add># NumPy forms
<add>
<add>_register("1", Int8)
<add>_register("B", Bool)
<add>_register("c", Int8)
<add>_register("b", UInt8)
<add>_register("s", Int16)
<add>_register("w", UInt16)
<add>_register("i", Int32)
<add>_register("N", Int64)
<add>_register("u", UInt32)
<add>_register("U", UInt64)
<add>
<add>if LP64:
<add> _register("l", Int64)
<add>else:
<add> _register("l", Int32)
<add>
<add>_register("d", Float64)
<add>_register("f", Float32)
<add>_register("D", Complex64)
<add>_register("F", Complex32)
<add>
<add># scipy.base forms
<add>
<add>def _scipy_alias(scipy_type, numarray_type):
<add> _register(scipy_type, eval(numarray_type))
<add> globals()[scipy_type] = globals()[numarray_type]
<add>
<add>_scipy_alias("bool_", "Bool")
<add>_scipy_alias("bool8", "Bool")
<add>_scipy_alias("int8", "Int8")
<add>_scipy_alias("uint8", "UInt8")
<add>_scipy_alias("int16", "Int16")
<add>_scipy_alias("uint16", "UInt16")
<add>_scipy_alias("int32", "Int32")
<add>_scipy_alias("uint32", "UInt32")
<add>_scipy_alias("int64", "Int64")
<add>_scipy_alias("uint64", "UInt64")
<add>
<add>_scipy_alias("float64", "Float64")
<add>_scipy_alias("float32", "Float32")
<add>_scipy_alias("complex128", "Complex64")
<add>_scipy_alias("complex64", "Complex32")
<add>
<add># The rest is used by numeric modules to determine conversions
<add>
<add># Ranking of types from lowest to highest (sorta)
<add>if not HasUInt64:
<add> genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16',
<add> 'Int32', 'UInt32', 'Int64',
<add> 'Float32','Float64', 'Complex32', 'Complex64', 'Object']
<add>else:
<add> genericTypeRank = ['Bool','Int8','UInt8','Int16','UInt16',
<add> 'Int32', 'UInt32', 'Int64', 'UInt64',
<add> 'Float32','Float64', 'Complex32', 'Complex64', 'Object']
<add>
<add>if _sys.version_info >= (2,3,0):
<add> pythonTypeRank = [ bool, int, long, float, complex ]
<add>else:
<add> pythonTypeRank = [ int, long, float, complex ]
<add>
<add># The next line is not platform independent XXX Needs to be generalized
<add>if not LP64:
<add> pythonTypeMap = {
<add> int:("Int32","int"),
<add> long:("Int64","int"),
<add> float:("Float64","float"),
<add> complex:("Complex64","complex")}
<add>
<add> scalarTypeMap = {
<add> int:"Int32",
<add> long:"Int64",
<add> float:"Float64",
<add> complex:"Complex64"}
<add>else:
<add> pythonTypeMap = {
<add> int:("Int64","int"),
<add> long:("Int64","int"),
<add> float:("Float64","float"),
<add> complex:("Complex64","complex")}
<add>
<add> scalarTypeMap = {
<add> int:"Int64",
<add> long:"Int64",
<add> float:"Float64",
<add> complex:"Complex64"}
<add>
<add>if _sys.version_info >= (2,3,0):
<add> pythonTypeMap.update({bool:("Bool","bool") })
<add> scalarTypeMap.update({bool:"Bool"})
<add>
<add># Generate coercion matrix
<add>
<add>def _initGenericCoercions():
<add> global genericCoercions
<add> genericCoercions = {}
<add>
<add> # vector with ...
<add> for ntype1 in genericTypeRank:
<add> nt1 = typeDict[ntype1]
<add> rank1 = genericTypeRank.index(ntype1)
<add> ntypesize1, inttype1, signedtype1 = nt1.bytes, \
<add> isinstance(nt1, IntegralType), isinstance(nt1, SignedIntegralType)
<add> for ntype2 in genericTypeRank:
<add> # vector
<add> nt2 = typeDict[ntype2]
<add> ntypesize2, inttype2, signedtype2 = nt2.bytes, \
<add> isinstance(nt2, IntegralType), isinstance(nt2, SignedIntegralType)
<add> rank2 = genericTypeRank.index(ntype2)
<add> if (signedtype1 != signedtype2) and inttype1 and inttype2:
<add> # mixing of signed and unsigned ints is a special case
<add> # If unsigned same size or larger, final size needs to be bigger
<add> # if possible
<add> if signedtype1:
<add> if ntypesize2 >= ntypesize1:
<add> size = min(2*ntypesize2, MAX_INT_SIZE)
<add> else:
<add> size = ntypesize1
<add> else:
<add> if ntypesize1 >= ntypesize2:
<add> size = min(2*ntypesize1, MAX_INT_SIZE)
<add> else:
<add> size = ntypesize2
<add> outtype = "Int"+str(8*size)
<add> else:
<add> if rank1 >= rank2:
<add> outtype = ntype1
<add> else:
<add> outtype = ntype2
<add> genericCoercions[(ntype1, ntype2)] = outtype
<add>
<add> for ntype2 in pythonTypeRank:
<add> # scalar
<add> mapto, kind = pythonTypeMap[ntype2]
<add> if ((inttype1 and kind=="int") or (not inttype1 and kind=="float")):
<add> # both are of the same "kind" thus vector type dominates
<add> outtype = ntype1
<add> else:
<add> rank2 = genericTypeRank.index(mapto)
<add> if rank1 >= rank2:
<add> outtype = ntype1
<add> else:
<add> outtype = mapto
<add> genericCoercions[(ntype1, ntype2)] = outtype
<add> genericCoercions[(ntype2, ntype1)] = outtype
<add>
<add> # scalar-scalar
<add> for ntype1 in pythonTypeRank:
<add> maptype1 = scalarTypeMap[ntype1]
<add> genericCoercions[(ntype1,)] = maptype1
<add> for ntype2 in pythonTypeRank:
<add> maptype2 = scalarTypeMap[ntype2]
<add> genericCoercions[(ntype1, ntype2)] = genericCoercions[(maptype1, maptype2)]
<add>
<add> # Special cases more easily dealt with outside of the loop
<add> genericCoercions[("Complex32", "Float64")] = "Complex64"
<add> genericCoercions[("Float64", "Complex32")] = "Complex64"
<add> genericCoercions[("Complex32", "Int64")] = "Complex64"
<add> genericCoercions[("Int64", "Complex32")] = "Complex64"
<add> genericCoercions[("Complex32", "UInt64")] = "Complex64"
<add> genericCoercions[("UInt64", "Complex32")] = "Complex64"
<add>
<add> genericCoercions[("Int64","Float32")] = "Float64"
<add> genericCoercions[("Float32", "Int64")] = "Float64"
<add> genericCoercions[("UInt64","Float32")] = "Float64"
<add> genericCoercions[("Float32", "UInt64")] = "Float64"
<add>
<add> genericCoercions[(float, "Bool")] = "Float64"
<add> genericCoercions[("Bool", float)] = "Float64"
<add>
<add> genericCoercions[(float,float,float)] = "Float64" # for scipy.special
<add> genericCoercions[(int,int,float)] = "Float64" # for scipy.special
<add>
<add>_initGenericCoercions()
<add>
<add># If complex is subclassed, the following may not be necessary
<add>genericPromotionExclusions = {
<add> 'Bool': (),
<add> 'Int8': (),
<add> 'Int16': (),
<add> 'Int32': ('Float32','Complex32'),
<add> 'UInt8': (),
<add> 'UInt16': (),
<add> 'UInt32': ('Float32','Complex32'),
<add> 'Int64' : ('Float32','Complex32'),
<add> 'UInt64' : ('Float32','Complex32'),
<add> 'Float32': (),
<add> 'Float64': ('Complex32',),
<add> 'Complex32':(),
<add> 'Complex64':()
<add>} # e.g., don't allow promotion from Float64 to Complex32 or Int64 to Float32
<add>
<add># Numeric typecodes
<add>typecodes = {'Integer': '1silN',
<add> 'UnsignedInteger': 'bBwuU',
<add> 'Float': 'fd',
<add> 'Character': 'c',
<add> 'Complex': 'FD' }
<add>
<add>if HasUInt64:
<add> _MaximumType = {
<add> Bool : UInt64,
<add>
<add> Int8 : Int64,
<add> Int16 : Int64,
<add> Int32 : Int64,
<add> Int64 : Int64,
<add>
<add> UInt8 : UInt64,
<add> UInt16 : UInt64,
<add> UInt32 : UInt64,
<add> UInt8 : UInt64,
<add>
<add> Float32 : Float64,
<add> Float64 : Float64,
<add>
<add> Complex32 : Complex64,
<add> Complex64 : Complex64
<add> }
<add>else:
<add> _MaximumType = {
<add> Bool : Int64,
<add>
<add> Int8 : Int64,
<add> Int16 : Int64,
<add> Int32 : Int64,
<add> Int64 : Int64,
<add>
<add> UInt8 : Int64,
<add> UInt16 : Int64,
<add> UInt32 : Int64,
<add> UInt8 : Int64,
<add>
<add> Float32 : Float64,
<add> Float64 : Float64,
<add>
<add> Complex32 : Complex64,
<add> Complex64 : Complex64
<add> }
<add>
<add>def MaximumType(t):
<add> """returns the type of highest precision of the same general kind as 't'"""
<add> return _MaximumType[t]
<add>
<add>
<add>def getType(type):
<add> """Return the numeric type object for type
<add>
<add> type may be the name of a type object or the actual object
<add> """
<add> if isinstance(type, NumericType):
<add> return type
<add> try:
<add> return typeDict[type]
<add> except KeyError:
<add> raise TypeError("Not a numeric type")
<add>
<add>if _sys.version_info >= (2,3):
<add> scalarTypes = (bool,int,long,float,complex)
<add>else:
<add> scalarTypes = (int,long,float,complex)
<add>
<add>_scipy_dtypechar = {
<add> Int8 : 'b',
<add> UInt8 : 'B',
<add> Int16 : 'h',
<add> UInt16 : 'H',
<add> Int32 : 'i',
<add> UInt32 : 'I',
<add> Int64 : 'q',
<add> UInt64 : 'Q',
<add> Float32 : 'f',
<add> Float64 : 'd',
<add> Complex32 : 'F', # Note the switchup here:
<add> Complex64 : 'D' # numarray.Complex32 == scipy.complex64, etc.
<add> }
<add>
<add>_scipy_dtypechar_inverse = {}
<add>for key,value in _scipy_dtypechar.items():
<add> _scipy_dtypechar_inverse[value] = key
<add>
<add> | 5 |
Text | Text | add bmeck to collaborators | 636935acc72b9c3cded5b2b2d722023b711c05c1 | <ide><path>README.md
<ide> information about the governance of the Node.js project, see
<ide> * [AndreasMadsen](https://github.com/AndreasMadsen) - **Andreas Madsen** <amwebdk@gmail.com>
<ide> * [bengl](https://github.com/bengl) - **Bryan English** <bryan@bryanenglish.com>
<ide> * [benjamingr](https://github.com/benjamingr) - **Benjamin Gruenbaum** <benjamingr@gmail.com>
<add>* [bmeck](https://github.com/bmeck) - **Bradley Farias** <bradley.meck@gmail.com>
<ide> * [brendanashworth](https://github.com/brendanashworth) - **Brendan Ashworth** <brendan.ashworth@me.com>
<ide> * [calvinmetcalf](https://github.com/calvinmetcalf) - **Calvin Metcalf** <calvin.metcalf@gmail.com>
<ide> * [claudiorodriguez](https://github.com/claudiorodriguez) - **Claudio Rodriguez** <cjrodr@yahoo.com> | 1 |
Java | Java | remove trailing whitespace in spring-test | 7c84266259227467513aff7c6fe1ea84ba55a5d5 | <ide><path>spring-test/src/main/java/org/springframework/test/context/junit4/rules/SpringMethodRule.java
<ide> private Statement withProfileValueCheck(Statement next, Method testMethod, Objec
<ide> * that is annotated with {@code @ClassRule}.
<ide> */
<ide> private static SpringClassRule validateSpringClassRuleConfiguration(Class<?> testClass) {
<del> Field ruleField = findSpringClassRuleField(testClass).orElseThrow(() ->
<add> Field ruleField = findSpringClassRuleField(testClass).orElseThrow(() ->
<ide> new IllegalStateException(String.format(
<ide> "Failed to find 'public static final SpringClassRule' field in test class [%s]. " +
<ide> "Consult the javadoc for SpringClassRule for details.", testClass.getName())));
<ide><path>spring-test/src/test/java/org/springframework/test/context/junit/jupiter/nested/NestedTestsWithSpringAndJUnitJupiterTestCase.java
<ide> * Integration tests that verify support for {@code @Nested} test classes
<ide> * in conjunction with the {@link SpringExtension} in a JUnit 5 (Jupiter)
<ide> * environment.
<del> *
<add> *
<ide> * @author Sam Brannen
<ide> * @since 5.0
<ide> * @see org.springframework.test.context.junit4.nested.NestedTestsWithSpringRulesTests
<ide><path>spring-test/src/test/java/org/springframework/test/context/junit4/concurrency/SpringJUnit4ConcurrencyTests.java
<ide> * <p>The tests executed by this test class come from a hand-picked collection of test
<ide> * classes within the test suite that is intended to cover most categories of tests
<ide> * that are currently supported by the TestContext Framework on JUnit 4.
<del> *
<add> *
<ide> * <p>The chosen test classes intentionally do <em>not</em> include any classes that
<ide> * fall under the following categories.
<ide> * | 3 |
Javascript | Javascript | raise exception when the socket is closed | 5976d5879621730b28b611da82c7f12286fd5fba | <ide><path>lib/net.js
<ide> Socket.prototype.write = function(data, arg1, arg2) {
<ide> Socket.prototype._write = function(data, encoding, cb) {
<ide> timers.active(this);
<ide>
<add> if (!this._handle) throw new Error('This socket is closed.');
<add>
<ide> // `encoding` is unused right now, `data` is always a buffer.
<ide> var writeReq = this._handle.write(data);
<ide>
<ide><path>test/simple/test-net-write-after-close.js
<add>// Copyright Joyent, Inc. and other Node contributors.
<add>//
<add>// Permission is hereby granted, free of charge, to any person obtaining a
<add>// copy of this software and associated documentation files (the
<add>// "Software"), to deal in the Software without restriction, including
<add>// without limitation the rights to use, copy, modify, merge, publish,
<add>// distribute, sublicense, and/or sell copies of the Software, and to permit
<add>// persons to whom the Software is furnished to do so, subject to the
<add>// following conditions:
<add>//
<add>// The above copyright notice and this permission notice shall be included
<add>// in all copies or substantial portions of the Software.
<add>//
<add>// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
<add>// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
<add>// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
<add>// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
<add>// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
<add>// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
<add>// USE OR OTHER DEALINGS IN THE SOFTWARE.
<add>
<add>var common = require('../common');
<add>var assert = require('assert');
<add>var net = require('net');
<add>
<add>
<add>var server = net.createServer(function(socket) {
<add> setTimeout(function() {
<add> assert.throws(
<add> function() {
<add> socket.write('test');
<add> },
<add> /This socket is closed/
<add> );
<add> process.exit();
<add> }, 250);
<add>});
<add>
<add>server.listen(common.PORT, function() {
<add> var client = net.connect(common.PORT, function() {
<add> client.end();
<add> });
<add>}); | 2 |
PHP | PHP | add arrayaccess implementation | 11ec41d6bc1d7d5806a1e23c0bde3c82cd19b6f4 | <ide><path>lib/Cake/Network/Http/Response.php
<ide> *
<ide> * ### Check the status code
<ide> *
<del> *
<ide> */
<del>class Response {
<add>class Response implements \ArrayAccess {
<ide>
<ide> const STATUS_OK = 200;
<ide> const STATUS_CREATED = 201;
<ide> class Response {
<ide> */
<ide> protected $_body;
<ide>
<add>/**
<add> * Map of public => property names for ArrayAccess
<add> *
<add> * @var array
<add> */
<add> protected $_arrayProperties = [
<add> 'cookies' => '_cookies',
<add> 'headers' => '_headers',
<add> 'body' => '_body',
<add> 'code' => '_code'
<add> ];
<ide> /**
<ide> * Constructor
<ide> *
<ide> public function body($parser = null) {
<ide> return $this->_body;
<ide> }
<ide>
<add>/**
<add> * Read values with array syntax.
<add> *
<add> * @param string $name
<add> * @return mixed
<add> */
<add> public function offsetGet($name) {
<add> if (!isset($this->_arrayProperties[$name])) {
<add> return false;
<add> }
<add> $key = $this->_arrayProperties[$name];
<add> return $this->{$key};
<add> }
<add>
<add>/**
<add> * isset/empty test with array syntax.
<add> *
<add> * @param string $name
<add> * @return boolean
<add> */
<add> public function offsetExists($name) {
<add> if (!isset($this->_arrayProperties[$name])) {
<add> return false;
<add> }
<add> $key = $this->_arrayProperties[$name];
<add> return isset($this->$key);
<add> }
<add>
<add>/**
<add> * Do nothing ArrayAccess is readonly
<add> *
<add> * @param string $name
<add> * @param mixed $value
<add> * @return null
<add> */
<add> public function offsetSet($name, $value) {
<add>
<add> }
<add>
<add>/**
<add> * Do nothing ArrayAccess is readonly
<add> *
<add> * @param string $name
<add> * @return null
<add> */
<add> public function offsetUnset($name) {
<add>
<add> }
<add>
<ide> }
<ide><path>lib/Cake/Test/TestCase/Network/Http/ResponseTest.php
<ide> public function testHeaderParsing() {
<ide> 'Tue, 25 Dec 2012 04:43:47 GMT',
<ide> $response->header('Date')
<ide> );
<add>
<add> $this->assertEquals(
<add> 'text/html;charset="UTF-8"',
<add> $response['headers']['Content-Type']
<add> );
<add> $this->assertTrue(isset($response['headers']));
<ide> }
<ide>
<ide> /**
<ide> public function testBody() {
<ide> $result = $response->body('json_decode');
<ide> $this->assertEquals($data['property'], $result->property);
<ide> $this->assertEquals($encoded, $response->body());
<add>
<add> $this->assertEquals($encoded, $response['body']);
<add> $this->assertTrue(isset($response['body']));
<ide> }
<ide>
<ide> /**
<ide> public function testCookie() {
<ide>
<ide> $result = $response->header('set-cookie');
<ide> $this->assertCount(3, $result, 'Should be an array.');
<add>
<add> $this->assertTrue(isset($response['cookies']));
<add> $this->assertEquals(
<add> 'soon',
<add> $response['cookies']['expiring']['value']
<add> );
<ide> }
<ide>
<ide> /**
<ide> public function testStatusCode() {
<ide> ];
<ide> $response = new Response($headers, '');
<ide> $this->assertEquals(404, $response->statusCode());
<add>
<add> $this->assertEquals(404, $response['code']);
<add> $this->assertTrue(isset($response['code']));
<ide> }
<ide>
<ide> /** | 2 |
Go | Go | fix tcpechoserver.close() in unit test | 809207fc7432b59257d4026b98467ba7414014be | <ide><path>pkg/proxy/network_proxy_test.go
<ide> func (server *TCPEchoServer) Run() {
<ide> }
<ide>
<ide> func (server *TCPEchoServer) LocalAddr() net.Addr { return server.listener.Addr() }
<del>func (server *TCPEchoServer) Close() { server.listener.Addr() }
<add>func (server *TCPEchoServer) Close() { server.listener.Close() }
<ide>
<ide> func (server *UDPEchoServer) Run() {
<ide> go func() { | 1 |
Text | Text | fix typo on "occured" to "occurred" | bdd1d85813f6aef95dbe45de036f473029fa9ce0 | <ide><path>errors/template.md
<ide>
<ide> #### Why This Error Occurred
<ide>
<del><!-- Explain why the error occured. Ensure the description makes it clear why the warning/error exists -->
<add><!-- Explain why the error occurred. Ensure the description makes it clear why the warning/error exists -->
<ide>
<ide> #### Possible Ways to Fix It
<ide> | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.