code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package schema import ( "fmt" ) // MultiLevelFieldReader reads from other field readers, // merging their results along the way in a specific order. You can specify // "levels" and name them in order to read only an exact level or up to // a specific level. // // This is useful for saying things such as "read the field from the state // and config and merge them" or "read the latest value of the field". type MultiLevelFieldReader struct { Readers map[string]FieldReader Levels []string } func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) { return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1]) } func (r *MultiLevelFieldReader) ReadFieldExact( address []string, level string) (FieldReadResult, error) { reader, ok := r.Readers[level] if !ok { return FieldReadResult{}, fmt.Errorf( "Unknown reader level: %s", level) } result, err := reader.ReadField(address) if err != nil { return FieldReadResult{}, fmt.Errorf( "Error reading level %s: %s", level, err) } return result, nil } func (r *MultiLevelFieldReader) ReadFieldMerge( address []string, level string) (FieldReadResult, error) { var result FieldReadResult for _, l := range r.Levels { if r, ok := r.Readers[l]; ok { out, err := r.ReadField(address) if err != nil { return FieldReadResult{}, fmt.Errorf( "Error reading level %s: %s", l, err) } // TODO: computed if out.Exists { result = out } } if l == level { break } } return result, nil }
go
github
https://github.com/hashicorp/terraform
internal/legacy/helper/schema/field_reader_multi.go
{ "include": [ "**/*.ts", "**/*.tsx", "**/.server/**/*.ts", "**/.server/**/*.tsx", "**/.client/**/*.ts", "**/.client/**/*.tsx", "./.react-router/types/**/*" ], "compilerOptions": { "lib": ["DOM", "DOM.Iterable", "ES2022"], "types": ["@react-router/node", "vite/client"], "verbatimModuleSyntax": true, "esModuleInterop": true, "jsx": "react-jsx", "module": "ESNext", "moduleResolution": "Bundler", "resolveJsonModule": true, "target": "ES2022", "strict": true, "allowJs": true, "skipLibCheck": true, "baseUrl": ".", "paths": { "~/*": ["./app/*"] }, "noEmit": true, "rootDirs": [".", "./.react-router/types"] } }
json
github
https://github.com/remix-run/react-router
playground/framework-express/tsconfig.json
/* Minimal main program -- everything is loaded from the library. */ #include "Python.h" #define WIN32_LEAN_AND_MEAN #include <windows.h> #include <stdlib.h> /* __argc, __wargv */ int WINAPI wWinMain( HINSTANCE hInstance, /* handle to current instance */ HINSTANCE hPrevInstance, /* handle to previous instance */ LPWSTR lpCmdLine, /* pointer to command line */ int nCmdShow /* show state of window */ ) { return Py_Main(__argc, __wargv); }
c
github
https://github.com/python/cpython
PC/WinMain.c
import csv import psycopg2 from datetime import datetime def main(): try: db2 = psycopg2.connect(database='firmware',user='firmadyne',password='firmadyne',host='127.0.0.1') cur = db2.cursor() cur.execute('SELECT id,filename FROM image WHERE id>814 ORDER BY id') rows = cur.fetchall() with open('dlink_ftp.dlink.eu_filelist.csv', 'r') as fin: cr = csv.reader(fin,dialect='excel') next(cr) for ftpurl,fsize,fdate,model_ls,sha1,md5 in cr: fname = ftpurl.split('/')[-1] iid = next((_[0] for _ in rows if _[1]==fname),None) if not iid: continue model_ls = eval(model_ls) fsize = int(fsize) fdate = datetime.strptime(fdate, '%Y-%m-%d %H:%M:%S') if not model_ls: model=None else: model=model_ls[0] print('%s %s %s %s'%(fname, iid, model, fdate)) cur.execute('UPDATE image SET file_url=%(ftpurl)s, model=%(model)s, rel_date=%(fdate)s, ' 'file_sha1=%(sha1)s WHERE id=%(iid)s', locals()) db2.commit() finally: db2.close() if __name__=='__main__': main()
unknown
codeparrot/codeparrot-clean
# Copyright: (c) 2018, Matt Davis <mdavis@ansible.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type from datetime import datetime from ansible.errors import AnsibleError from ansible.module_utils._text import to_native from ansible.plugins.action import ActionBase from ansible.plugins.action.reboot import ActionModule as RebootActionModule from ansible.utils.display import Display display = Display() class TimedOutException(Exception): pass class ActionModule(RebootActionModule, ActionBase): TRANSFERS_FILES = False _VALID_ARGS = frozenset(( 'connect_timeout', 'connect_timeout_sec', 'msg', 'post_reboot_delay', 'post_reboot_delay_sec', 'pre_reboot_delay', 'pre_reboot_delay_sec', 'reboot_timeout', 'reboot_timeout_sec', 'shutdown_timeout', 'shutdown_timeout_sec', 'test_command', )) DEFAULT_BOOT_TIME_COMMAND = "(Get-WmiObject -ClassName Win32_OperatingSystem).LastBootUpTime" DEFAULT_CONNECT_TIMEOUT = 5 DEFAULT_PRE_REBOOT_DELAY = 2 DEFAULT_SUDOABLE = False DEFAULT_SHUTDOWN_COMMAND_ARGS = '/r /t {delay_sec} /c "{message}"' DEPRECATED_ARGS = { 'shutdown_timeout': '2.5', 'shutdown_timeout_sec': '2.5', } def __init__(self, *args, **kwargs): super(ActionModule, self).__init__(*args, **kwargs) def get_distribution(self, task_vars): return {'name': 'windows', 'version': '', 'family': ''} def get_shutdown_command(self, task_vars, distribution): return self.DEFAULT_SHUTDOWN_COMMAND def run_test_command(self, distribution, **kwargs): # Need to wrap the test_command in our PowerShell encoded wrapper. This is done to align the command input to a # common shell and to allow the psrp connection plugin to report the correct exit code without manually setting # $LASTEXITCODE for just that plugin. test_command = self._task.args.get('test_command', self.DEFAULT_TEST_COMMAND) kwargs['test_command'] = self._connection._shell._encode_script(test_command) super(ActionModule, self).run_test_command(distribution, **kwargs) def perform_reboot(self, task_vars, distribution): shutdown_command = self.get_shutdown_command(task_vars, distribution) shutdown_command_args = self.get_shutdown_command_args(distribution) reboot_command = self._connection._shell._encode_script('{0} {1}'.format(shutdown_command, shutdown_command_args)) display.vvv("{action}: rebooting server...".format(action=self._task.action)) display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution)) display.debug("{action}: rebooting server with command '{command}'".format(action=self._task.action, command=reboot_command)) result = {} reboot_result = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE) result['start'] = datetime.utcnow() # Test for "A system shutdown has already been scheduled. (1190)" and handle it gracefully stdout = reboot_result['stdout'] stderr = reboot_result['stderr'] if reboot_result['rc'] == 1190 or (reboot_result['rc'] != 0 and "(1190)" in reboot_result['stderr']): display.warning('A scheduled reboot was pre-empted by Ansible.') # Try to abort (this may fail if it was already aborted) result1 = self._low_level_execute_command(self._connection._shell._encode_script('shutdown /a'), sudoable=self.DEFAULT_SUDOABLE) # Initiate reboot again result2 = self._low_level_execute_command(reboot_command, sudoable=self.DEFAULT_SUDOABLE) reboot_result['rc'] = result2['rc'] stdout += result1['stdout'] + result2['stdout'] stderr += result1['stderr'] + result2['stderr'] if reboot_result['rc'] != 0: result['failed'] = True result['rebooted'] = False result['msg'] = "Reboot command failed, error was: {stdout} {stderr}".format( stdout=to_native(stdout.strip()), stderr=to_native(stderr.strip())) return result result['failed'] = False return result
unknown
codeparrot/codeparrot-clean
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import annotations import os import typing as t from ansible.module_utils._internal import _no_six from ansible.module_utils.facts.collector import BaseFactCollector class EnvFactCollector(BaseFactCollector): name = 'env' _fact_ids = set() # type: t.Set[str] def collect(self, module=None, collected_facts=None): env_facts = {} env_facts['env'] = {} for k, v in os.environ.items(): env_facts['env'][k] = v return env_facts def __getattr__(importable_name): return _no_six.deprecate(importable_name, __name__, "iteritems")
python
github
https://github.com/ansible/ansible
lib/ansible/module_utils/facts/system/env.py
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Notification.url' db.add_column('panda_notification', 'url', self.gf('django.db.models.fields.URLField')(default=None, max_length=200, null=True), keep_default=False) def backwards(self, orm): # Deleting field 'Notification.url' db.delete_column('panda_notification', 'url') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'panda.activitylog': { 'Meta': {'unique_together': "(('user', 'when'),)", 'object_name': 'ActivityLog'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_logs'", 'to': "orm['auth.User']"}), 'when': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'panda.category': { 'Meta': {'object_name': 'Category'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}) }, 'panda.dataset': { 'Meta': {'ordering': "['-creation_date']", 'object_name': 'Dataset'}, 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['panda.Category']"}), 'column_schema': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': "orm['auth.User']"}), 'current_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['panda.TaskStatus']", 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initial_upload': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initial_upload_for'", 'null': 'True', 'to': "orm['panda.DataUpload']"}), 'last_modification': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'locked_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'row_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'sample_data': ('panda.fields.JSONField', [], {'default': 'None', 'null': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}) }, 'panda.dataupload': { 'Meta': {'ordering': "['creation_date']", 'object_name': 'DataUpload'}, 'columns': ('panda.fields.JSONField', [], {'null': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'data_type': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data_uploads'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'dialect': ('panda.fields.JSONField', [], {'null': 'True'}), 'encoding': ('django.db.models.fields.CharField', [], {'default': "'utf-8'", 'max_length': '32'}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'guessed_types': ('panda.fields.JSONField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'imported': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'sample_data': ('panda.fields.JSONField', [], {'null': 'True'}), 'size': ('django.db.models.fields.IntegerField', [], {}) }, 'panda.export': { 'Meta': {'ordering': "['creation_date']", 'object_name': 'Export'}, 'creation_date': ('django.db.models.fields.DateTimeField', [], {}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exports'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'size': ('django.db.models.fields.IntegerField', [], {}) }, 'panda.notification': { 'Meta': {'ordering': "['-sent_at']", 'object_name': 'Notification'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'read_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications'", 'to': "orm['auth.User']"}), 'related_dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['panda.Dataset']", 'null': 'True'}), 'related_export': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['panda.Export']", 'null': 'True'}), 'related_task': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['panda.TaskStatus']", 'null': 'True'}), 'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'Info'", 'max_length': '16'}), 'url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'}) }, 'panda.relatedupload': { 'Meta': {'ordering': "['creation_date']", 'object_name': 'RelatedUpload'}, 'creation_date': ('django.db.models.fields.DateTimeField', [], {}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_uploads'", 'to': "orm['panda.Dataset']"}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'size': ('django.db.models.fields.IntegerField', [], {}) }, 'panda.searchlog': { 'Meta': {'object_name': 'SearchLog'}, 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'searches'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'query': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'search_logs'", 'to': "orm['auth.User']"}), 'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'panda.searchsubscription': { 'Meta': {'object_name': 'SearchSubscription'}, 'dataset': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'subscribed_searches'", 'null': 'True', 'to': "orm['panda.Dataset']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_run': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'query': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'query_url': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscribed_searches'", 'to': "orm['auth.User']"}) }, 'panda.taskstatus': { 'Meta': {'object_name': 'TaskStatus'}, 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'null': 'True', 'to': "orm['auth.User']"}), 'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'PENDING'", 'max_length': '50'}), 'task_description': ('django.db.models.fields.TextField', [], {}), 'task_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'traceback': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}) }, 'panda.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}), 'activation_key_expiration': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) } } complete_apps = ['panda']
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ author: Ansible Core Team connection: psrp short_description: Run tasks over Microsoft PowerShell Remoting Protocol description: - Run commands or put/fetch on a target via PSRP (WinRM plugin) - This is similar to the I(winrm) connection plugin which uses the same underlying transport but instead runs in a PowerShell interpreter. version_added: "2.7" requirements: - pypsrp (Python library) options: # transport options remote_addr: description: - The hostname or IP address of the remote host. default: inventory_hostname type: str vars: - name: ansible_host - name: ansible_psrp_host remote_user: description: - The user to log in as. type: str vars: - name: ansible_user - name: ansible_psrp_user remote_password: description: Authentication password for the C(remote_user). Can be supplied as CLI option. type: str vars: - name: ansible_password - name: ansible_winrm_pass - name: ansible_winrm_password aliases: - password # Needed for --ask-pass to come through on delegation port: description: - The port for PSRP to connect on the remote target. - Default is C(5986) if I(protocol) is not defined or is C(https), otherwise the port is C(5985). type: int vars: - name: ansible_port - name: ansible_psrp_port protocol: description: - Set the protocol to use for the connection. - Default is C(https) if I(port) is not defined or I(port) is not C(5985). choices: - http - https type: str vars: - name: ansible_psrp_protocol path: description: - The URI path to connect to. type: str vars: - name: ansible_psrp_path default: 'wsman' auth: description: - The authentication protocol to use when authenticating the remote user. - The default, C(negotiate), will attempt to use C(Kerberos) if it is available and fall back to C(NTLM) if it isn't. type: str vars: - name: ansible_psrp_auth choices: - basic - certificate - negotiate - kerberos - ntlm - credssp default: negotiate cert_validation: description: - Whether to validate the remote server's certificate or not. - Set to C(ignore) to not validate any certificates. - I(ca_cert) can be set to the path of a PEM certificate chain to use in the validation. choices: - validate - ignore default: validate type: str vars: - name: ansible_psrp_cert_validation ca_cert: description: - The path to a PEM certificate chain to use when validating the server's certificate. - This value is ignored if I(cert_validation) is set to C(ignore). type: path vars: - name: ansible_psrp_cert_trust_path - name: ansible_psrp_ca_cert aliases: [ cert_trust_path ] connection_timeout: description: - The connection timeout for making the request to the remote host. - This is measured in seconds. type: int vars: - name: ansible_psrp_connection_timeout default: 30 read_timeout: description: - The read timeout for receiving data from the remote host. - This value must always be greater than I(operation_timeout). - This option requires pypsrp >= 0.3. - This is measured in seconds. type: int vars: - name: ansible_psrp_read_timeout default: 30 version_added: '2.8' reconnection_retries: description: - The number of retries on connection errors. type: int vars: - name: ansible_psrp_reconnection_retries default: 0 version_added: '2.8' reconnection_backoff: description: - The backoff time to use in between reconnection attempts. (First sleeps X, then sleeps 2*X, then sleeps 4*X, ...) - This is measured in seconds. - The C(ansible_psrp_reconnection_backoff) variable was added in Ansible 2.9. type: int vars: - name: ansible_psrp_connection_backoff - name: ansible_psrp_reconnection_backoff default: 2 version_added: '2.8' message_encryption: description: - Controls the message encryption settings, this is different from TLS encryption when I(ansible_psrp_protocol) is C(https). - Only the auth protocols C(negotiate), C(kerberos), C(ntlm), and C(credssp) can do message encryption. The other authentication protocols only support encryption when C(protocol) is set to C(https). - C(auto) means means message encryption is only used when not using TLS/HTTPS. - C(always) is the same as C(auto) but message encryption is always used even when running over TLS/HTTPS. - C(never) disables any encryption checks that are in place when running over HTTP and disables any authentication encryption processes. type: str vars: - name: ansible_psrp_message_encryption choices: - auto - always - never default: auto proxy: description: - Set the proxy URL to use when connecting to the remote host. vars: - name: ansible_psrp_proxy type: str ignore_proxy: description: - Will disable any environment proxy settings and connect directly to the remote host. - This option is ignored if C(proxy) is set. vars: - name: ansible_psrp_ignore_proxy type: bool default: 'no' # auth options certificate_key_pem: description: - The local path to an X509 certificate key to use with certificate auth. type: path vars: - name: ansible_psrp_certificate_key_pem certificate_pem: description: - The local path to an X509 certificate to use with certificate auth. type: path vars: - name: ansible_psrp_certificate_pem credssp_auth_mechanism: description: - The sub authentication mechanism to use with CredSSP auth. - When C(auto), both Kerberos and NTLM is attempted with kerberos being preferred. type: str choices: - auto - kerberos - ntlm default: auto vars: - name: ansible_psrp_credssp_auth_mechanism credssp_disable_tlsv1_2: description: - Disables the use of TLSv1.2 on the CredSSP authentication channel. - This should not be set to C(yes) unless dealing with a host that does not have TLSv1.2. default: no type: bool vars: - name: ansible_psrp_credssp_disable_tlsv1_2 credssp_minimum_version: description: - The minimum CredSSP server authentication version that will be accepted. - Set to C(5) to ensure the server has been patched and is not vulnerable to CVE 2018-0886. default: 2 type: int vars: - name: ansible_psrp_credssp_minimum_version negotiate_delegate: description: - Allow the remote user the ability to delegate it's credentials to another server, i.e. credential delegation. - Only valid when Kerberos was the negotiated auth or was explicitly set as the authentication. - Ignored when NTLM was the negotiated auth. type: bool vars: - name: ansible_psrp_negotiate_delegate negotiate_hostname_override: description: - Override the remote hostname when searching for the host in the Kerberos lookup. - This allows Ansible to connect over IP but authenticate with the remote server using it's DNS name. - Only valid when Kerberos was the negotiated auth or was explicitly set as the authentication. - Ignored when NTLM was the negotiated auth. type: str vars: - name: ansible_psrp_negotiate_hostname_override negotiate_send_cbt: description: - Send the Channel Binding Token (CBT) structure when authenticating. - CBT is used to provide extra protection against Man in the Middle C(MitM) attacks by binding the outer transport channel to the auth channel. - CBT is not used when using just C(HTTP), only C(HTTPS). default: yes type: bool vars: - name: ansible_psrp_negotiate_send_cbt negotiate_service: description: - Override the service part of the SPN used during Kerberos authentication. - Only valid when Kerberos was the negotiated auth or was explicitly set as the authentication. - Ignored when NTLM was the negotiated auth. default: WSMAN type: str vars: - name: ansible_psrp_negotiate_service # protocol options operation_timeout: description: - Sets the WSMan timeout for each operation. - This is measured in seconds. - This should not exceed the value for C(connection_timeout). type: int vars: - name: ansible_psrp_operation_timeout default: 20 max_envelope_size: description: - Sets the maximum size of each WSMan message sent to the remote host. - This is measured in bytes. - Defaults to C(150KiB) for compatibility with older hosts. type: int vars: - name: ansible_psrp_max_envelope_size default: 153600 configuration_name: description: - The name of the PowerShell configuration endpoint to connect to. type: str vars: - name: ansible_psrp_configuration_name default: Microsoft.PowerShell """ import base64 import json import logging import os from ansible import constants as C from ansible.errors import AnsibleConnectionFailure, AnsibleError from ansible.errors import AnsibleFileNotFound from ansible.module_utils.parsing.convert_bool import boolean from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.plugins.connection import ConnectionBase from ansible.plugins.shell.powershell import _common_args from ansible.utils.display import Display from ansible.utils.hashing import secure_hash HAS_PYPSRP = True PYPSRP_IMP_ERR = None try: import pypsrp from pypsrp.complex_objects import GenericComplexObject, PSInvocationState, RunspacePoolState from pypsrp.exceptions import AuthenticationError, WinRMError from pypsrp.host import PSHost, PSHostUserInterface from pypsrp.powershell import PowerShell, RunspacePool from pypsrp.shell import Process, SignalCode, WinRS from pypsrp.wsman import WSMan, AUTH_KWARGS from requests.exceptions import ConnectionError, ConnectTimeout except ImportError as err: HAS_PYPSRP = False PYPSRP_IMP_ERR = err display = Display() class Connection(ConnectionBase): transport = 'psrp' module_implementation_preferences = ('.ps1', '.exe', '') allow_executable = False has_pipelining = True allow_extras = True def __init__(self, *args, **kwargs): self.always_pipeline_modules = True self.has_native_async = True self.runspace = None self.host = None self._shell_type = 'powershell' super(Connection, self).__init__(*args, **kwargs) if not C.DEFAULT_DEBUG: logging.getLogger('pypsrp').setLevel(logging.WARNING) logging.getLogger('requests_credssp').setLevel(logging.INFO) logging.getLogger('urllib3').setLevel(logging.INFO) def _connect(self): if not HAS_PYPSRP: raise AnsibleError("pypsrp or dependencies are not installed: %s" % to_native(PYPSRP_IMP_ERR)) super(Connection, self)._connect() self._build_kwargs() display.vvv("ESTABLISH PSRP CONNECTION FOR USER: %s ON PORT %s TO %s" % (self._psrp_user, self._psrp_port, self._psrp_host), host=self._psrp_host) if not self.runspace: connection = WSMan(**self._psrp_conn_kwargs) # create our psuedo host to capture the exit code and host output host_ui = PSHostUserInterface() self.host = PSHost(None, None, False, "Ansible PSRP Host", None, host_ui, None) self.runspace = RunspacePool( connection, host=self.host, configuration_name=self._psrp_configuration_name ) display.vvvvv( "PSRP OPEN RUNSPACE: auth=%s configuration=%s endpoint=%s" % (self._psrp_auth, self._psrp_configuration_name, connection.transport.endpoint), host=self._psrp_host ) try: self.runspace.open() except AuthenticationError as e: raise AnsibleConnectionFailure("failed to authenticate with " "the server: %s" % to_native(e)) except WinRMError as e: raise AnsibleConnectionFailure( "psrp connection failure during runspace open: %s" % to_native(e) ) except (ConnectionError, ConnectTimeout) as e: raise AnsibleConnectionFailure( "Failed to connect to the host via PSRP: %s" % to_native(e) ) self._connected = True return self def reset(self): display.vvvvv("PSRP: Reset Connection", host=self._psrp_host) self.runspace = None self._connect() def exec_command(self, cmd, in_data=None, sudoable=True): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) if cmd.startswith(" ".join(_common_args) + " -EncodedCommand"): # This is a PowerShell script encoded by the shell plugin, we will # decode the script and execute it in the runspace instead of # starting a new interpreter to save on time b_command = base64.b64decode(cmd.split(" ")[-1]) script = to_text(b_command, 'utf-16-le') in_data = to_text(in_data, errors="surrogate_or_strict", nonstring="passthru") if in_data and in_data.startswith(u"#!"): # ANSIBALLZ wrapper, we need to get the interpreter and execute # that as the script - note this won't work as basic.py relies # on packages not available on Windows, once fixed we can enable # this path interpreter = to_native(in_data.splitlines()[0][2:]) # script = "$input | &'%s' -" % interpreter # in_data = to_text(in_data) raise AnsibleError("cannot run the interpreter '%s' on the psrp " "connection plugin" % interpreter) # call build_module_command to get the bootstrap wrapper text bootstrap_wrapper = self._shell.build_module_command('', '', '') if bootstrap_wrapper == cmd: # Do not display to the user each invocation of the bootstrap wrapper display.vvv("PSRP: EXEC (via pipeline wrapper)") else: display.vvv("PSRP: EXEC %s" % script, host=self._psrp_host) else: # In other cases we want to execute the cmd as the script. We add on the 'exit $LASTEXITCODE' to ensure the # rc is propagated back to the connection plugin. script = to_text(u"%s\nexit $LASTEXITCODE" % cmd) display.vvv(u"PSRP: EXEC %s" % script, host=self._psrp_host) rc, stdout, stderr = self._exec_psrp_script(script, in_data) return rc, stdout, stderr def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._psrp_host) out_path = self._shell._unquote(out_path) script = u'''begin { $ErrorActionPreference = "Stop" $path = '%s' $fd = [System.IO.File]::Create($path) $algo = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create() $bytes = @() } process { $bytes = [System.Convert]::FromBase64String($input) $algo.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) > $null $fd.Write($bytes, 0, $bytes.Length) } end { $fd.Close() $algo.TransformFinalBlock($bytes, 0, 0) > $null $hash = [System.BitConverter]::ToString($algo.Hash) $hash = $hash.Replace("-", "").ToLowerInvariant() Write-Output -InputObject "{`"sha1`":`"$hash`"}" }''' % self._shell._escape(out_path) cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False) b_in_path = to_bytes(in_path, errors='surrogate_or_strict') if not os.path.exists(b_in_path): raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path)) in_size = os.path.getsize(b_in_path) buffer_size = int(self.runspace.connection.max_payload_size / 4 * 3) # copying files is faster when using the raw WinRM shell and not PSRP # we will create a WinRS shell just for this process # TODO: speed this up as there is overhead creating a shell for this with WinRS(self.runspace.connection, codepage=65001) as shell: process = Process(shell, cmd_parts[0], cmd_parts[1:]) process.begin_invoke() offset = 0 with open(b_in_path, 'rb') as src_file: for data in iter((lambda: src_file.read(buffer_size)), b""): offset += len(data) display.vvvvv("PSRP PUT %s to %s (offset=%d, size=%d" % (in_path, out_path, offset, len(data)), host=self._psrp_host) b64_data = base64.b64encode(data) + b"\r\n" process.send(b64_data, end=(src_file.tell() == in_size)) # the file was empty, return empty buffer if offset == 0: process.send(b"", end=True) process.end_invoke() process.signal(SignalCode.CTRL_C) if process.rc != 0: raise AnsibleError(to_native(process.stderr)) put_output = json.loads(process.stdout) remote_sha1 = put_output.get("sha1") if not remote_sha1: raise AnsibleError("Remote sha1 was not returned, stdout: '%s', " "stderr: '%s'" % (to_native(process.stdout), to_native(process.stderr))) local_sha1 = secure_hash(in_path) if not remote_sha1 == local_sha1: raise AnsibleError("Remote sha1 hash %s does not match local hash " "%s" % (to_native(remote_sha1), to_native(local_sha1))) def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._psrp_host) in_path = self._shell._unquote(in_path) out_path = out_path.replace('\\', '/') # because we are dealing with base64 data we need to get the max size # of the bytes that the base64 size would equal max_b64_size = int(self.runspace.connection.max_payload_size - (self.runspace.connection.max_payload_size / 4 * 3)) buffer_size = max_b64_size - (max_b64_size % 1024) # setup the file stream with read only mode setup_script = '''$ErrorActionPreference = "Stop" $path = '%s' if (Test-Path -Path $path -PathType Leaf) { $fs = New-Object -TypeName System.IO.FileStream -ArgumentList @( $path, [System.IO.FileMode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::Read ) $buffer_size = %d } elseif (Test-Path -Path $path -PathType Container) { Write-Output -InputObject "[DIR]" } else { Write-Error -Message "$path does not exist" $host.SetShouldExit(1) }''' % (self._shell._escape(in_path), buffer_size) # read the file stream at the offset and return the b64 string read_script = '''$ErrorActionPreference = "Stop" $fs.Seek(%d, [System.IO.SeekOrigin]::Begin) > $null $buffer = New-Object -TypeName byte[] -ArgumentList $buffer_size $bytes_read = $fs.Read($buffer, 0, $buffer_size) if ($bytes_read -gt 0) { $bytes = $buffer[0..($bytes_read - 1)] Write-Output -InputObject ([System.Convert]::ToBase64String($bytes)) }''' # need to run the setup script outside of the local scope so the # file stream stays active between fetch operations rc, stdout, stderr = self._exec_psrp_script(setup_script, use_local_scope=False, force_stop=True) if rc != 0: raise AnsibleError("failed to setup file stream for fetch '%s': %s" % (out_path, to_native(stderr))) elif stdout.strip() == '[DIR]': # to be consistent with other connection plugins, we assume the caller has created the target dir return b_out_path = to_bytes(out_path, errors='surrogate_or_strict') # to be consistent with other connection plugins, we assume the caller has created the target dir offset = 0 with open(b_out_path, 'wb') as out_file: while True: display.vvvvv("PSRP FETCH %s to %s (offset=%d" % (in_path, out_path, offset), host=self._psrp_host) rc, stdout, stderr = self._exec_psrp_script(read_script % offset, force_stop=True) if rc != 0: raise AnsibleError("failed to transfer file to '%s': %s" % (out_path, to_native(stderr))) data = base64.b64decode(stdout.strip()) out_file.write(data) if len(data) < buffer_size: break offset += len(data) rc, stdout, stderr = self._exec_psrp_script("$fs.Close()", force_stop=True) if rc != 0: display.warning("failed to close remote file stream of file " "'%s': %s" % (in_path, to_native(stderr))) def close(self): if self.runspace and self.runspace.state == RunspacePoolState.OPENED: display.vvvvv("PSRP CLOSE RUNSPACE: %s" % (self.runspace.id), host=self._psrp_host) self.runspace.close() self.runspace = None self._connected = False def _build_kwargs(self): self._psrp_host = self.get_option('remote_addr') self._psrp_user = self.get_option('remote_user') self._psrp_pass = self.get_option('remote_password') protocol = self.get_option('protocol') port = self.get_option('port') if protocol is None and port is None: protocol = 'https' port = 5986 elif protocol is None: protocol = 'https' if int(port) != 5985 else 'http' elif port is None: port = 5986 if protocol == 'https' else 5985 self._psrp_protocol = protocol self._psrp_port = int(port) self._psrp_path = self.get_option('path') self._psrp_auth = self.get_option('auth') # cert validation can either be a bool or a path to the cert cert_validation = self.get_option('cert_validation') cert_trust_path = self.get_option('ca_cert') if cert_validation == 'ignore': self._psrp_cert_validation = False elif cert_trust_path is not None: self._psrp_cert_validation = cert_trust_path else: self._psrp_cert_validation = True self._psrp_connection_timeout = self.get_option('connection_timeout') # Can be None self._psrp_read_timeout = self.get_option('read_timeout') # Can be None self._psrp_message_encryption = self.get_option('message_encryption') self._psrp_proxy = self.get_option('proxy') self._psrp_ignore_proxy = boolean(self.get_option('ignore_proxy')) self._psrp_operation_timeout = int(self.get_option('operation_timeout')) self._psrp_max_envelope_size = int(self.get_option('max_envelope_size')) self._psrp_configuration_name = self.get_option('configuration_name') self._psrp_reconnection_retries = int(self.get_option('reconnection_retries')) self._psrp_reconnection_backoff = float(self.get_option('reconnection_backoff')) self._psrp_certificate_key_pem = self.get_option('certificate_key_pem') self._psrp_certificate_pem = self.get_option('certificate_pem') self._psrp_credssp_auth_mechanism = self.get_option('credssp_auth_mechanism') self._psrp_credssp_disable_tlsv1_2 = self.get_option('credssp_disable_tlsv1_2') self._psrp_credssp_minimum_version = self.get_option('credssp_minimum_version') self._psrp_negotiate_send_cbt = self.get_option('negotiate_send_cbt') self._psrp_negotiate_delegate = self.get_option('negotiate_delegate') self._psrp_negotiate_hostname_override = self.get_option('negotiate_hostname_override') self._psrp_negotiate_service = self.get_option('negotiate_service') supported_args = [] for auth_kwarg in AUTH_KWARGS.values(): supported_args.extend(auth_kwarg) extra_args = set([v.replace('ansible_psrp_', '') for v in self.get_option('_extras')]) unsupported_args = extra_args.difference(supported_args) for arg in unsupported_args: display.warning("ansible_psrp_%s is unsupported by the current " "psrp version installed" % arg) self._psrp_conn_kwargs = dict( server=self._psrp_host, port=self._psrp_port, username=self._psrp_user, password=self._psrp_pass, ssl=self._psrp_protocol == 'https', path=self._psrp_path, auth=self._psrp_auth, cert_validation=self._psrp_cert_validation, connection_timeout=self._psrp_connection_timeout, encryption=self._psrp_message_encryption, proxy=self._psrp_proxy, no_proxy=self._psrp_ignore_proxy, max_envelope_size=self._psrp_max_envelope_size, operation_timeout=self._psrp_operation_timeout, certificate_key_pem=self._psrp_certificate_key_pem, certificate_pem=self._psrp_certificate_pem, credssp_auth_mechanism=self._psrp_credssp_auth_mechanism, credssp_disable_tlsv1_2=self._psrp_credssp_disable_tlsv1_2, credssp_minimum_version=self._psrp_credssp_minimum_version, negotiate_send_cbt=self._psrp_negotiate_send_cbt, negotiate_delegate=self._psrp_negotiate_delegate, negotiate_hostname_override=self._psrp_negotiate_hostname_override, negotiate_service=self._psrp_negotiate_service, ) # Check if PSRP version supports newer read_timeout argument (needs pypsrp 0.3.0+) if hasattr(pypsrp, 'FEATURES') and 'wsman_read_timeout' in pypsrp.FEATURES: self._psrp_conn_kwargs['read_timeout'] = self._psrp_read_timeout elif self._psrp_read_timeout is not None: display.warning("ansible_psrp_read_timeout is unsupported by the current psrp version installed, " "using ansible_psrp_connection_timeout value for read_timeout instead.") # Check if PSRP version supports newer reconnection_retries argument (needs pypsrp 0.3.0+) if hasattr(pypsrp, 'FEATURES') and 'wsman_reconnections' in pypsrp.FEATURES: self._psrp_conn_kwargs['reconnection_retries'] = self._psrp_reconnection_retries self._psrp_conn_kwargs['reconnection_backoff'] = self._psrp_reconnection_backoff else: if self._psrp_reconnection_retries is not None: display.warning("ansible_psrp_reconnection_retries is unsupported by the current psrp version installed.") if self._psrp_reconnection_backoff is not None: display.warning("ansible_psrp_reconnection_backoff is unsupported by the current psrp version installed.") # add in the extra args that were set for arg in extra_args.intersection(supported_args): option = self.get_option('_extras')['ansible_psrp_%s' % arg] self._psrp_conn_kwargs[arg] = option def _exec_psrp_script(self, script, input_data=None, use_local_scope=True, force_stop=False): ps = PowerShell(self.runspace) ps.add_script(script, use_local_scope=use_local_scope) ps.invoke(input=input_data) rc, stdout, stderr = self._parse_pipeline_result(ps) if force_stop: # This is usually not needed because we close the Runspace after our exec and we skip the call to close the # pipeline manually to save on some time. Set to True when running multiple exec calls in the same runspace. # Current pypsrp versions raise an exception if the current state was not RUNNING. We manually set it so we # can call stop without any issues. ps.state = PSInvocationState.RUNNING ps.stop() return rc, stdout, stderr def _parse_pipeline_result(self, pipeline): """ PSRP doesn't have the same concept as other protocols with its output. We need some extra logic to convert the pipeline streams and host output into the format that Ansible understands. :param pipeline: The finished PowerShell pipeline that invoked our commands :return: rc, stdout, stderr based on the pipeline output """ # we try and get the rc from our host implementation, this is set if # exit or $host.SetShouldExit() is called in our pipeline, if not we # set to 0 if the pipeline had not errors and 1 if it did rc = self.host.rc or (1 if pipeline.had_errors else 0) # TODO: figure out a better way of merging this with the host output stdout_list = [] for output in pipeline.output: # Not all pipeline outputs are a string or contain a __str__ value, # we will create our own output based on the properties of the # complex object if that is the case. if isinstance(output, GenericComplexObject) and output.to_string is None: obj_lines = output.property_sets for key, value in output.adapted_properties.items(): obj_lines.append(u"%s: %s" % (key, value)) for key, value in output.extended_properties.items(): obj_lines.append(u"%s: %s" % (key, value)) output_msg = u"\n".join(obj_lines) else: output_msg = to_text(output, nonstring='simplerepr') stdout_list.append(output_msg) if len(self.host.ui.stdout) > 0: stdout_list += self.host.ui.stdout stdout = u"\r\n".join(stdout_list) stderr_list = [] for error in pipeline.streams.error: # the error record is not as fully fleshed out like we usually get # in PS, we will manually create it here command_name = "%s : " % error.command_name if error.command_name else '' position = "%s\r\n" % error.invocation_position_message if error.invocation_position_message else '' error_msg = "%s%s\r\n%s" \ " + CategoryInfo : %s\r\n" \ " + FullyQualifiedErrorId : %s" \ % (command_name, str(error), position, error.message, error.fq_error) stacktrace = error.script_stacktrace if self._play_context.verbosity >= 3 and stacktrace is not None: error_msg += "\r\nStackTrace:\r\n%s" % stacktrace stderr_list.append(error_msg) if len(self.host.ui.stderr) > 0: stderr_list += self.host.ui.stderr stderr = u"\r\n".join([to_text(o) for o in stderr_list]) display.vvvvv("PSRP RC: %d" % rc, host=self._psrp_host) display.vvvvv("PSRP STDOUT: %s" % stdout, host=self._psrp_host) display.vvvvv("PSRP STDERR: %s" % stderr, host=self._psrp_host) # reset the host back output back to defaults, needed if running # multiple pipelines on the same RunspacePool self.host.rc = 0 self.host.ui.stdout = [] self.host.ui.stderr = [] return rc, to_bytes(stdout, encoding='utf-8'), to_bytes(stderr, encoding='utf-8')
unknown
codeparrot/codeparrot-clean
name: Publish NuGet Packages on: release: types: [published] permissions: contents: read jobs: package-nuget: runs-on: windows-latest if: ${{ github.repository == 'twbs/bootstrap' && startsWith(github.event.release.tag_name, 'v') }} env: GITHUB_REF_NAME: ${{ github.ref_name }} steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: persist-credentials: false - name: Set up NuGet uses: nuget/setup-nuget@323ab0502cd38fdc493335025a96c8fdb0edc71f # v2.0.1 with: nuget-api-key: ${{ secrets.NuGetAPIKey }} nuget-version: '5.x' - name: Pack NuGet packages shell: pwsh run: | $bsversion = $env:GITHUB_REF_NAME.Substring(1) nuget pack "nuget\bootstrap.nuspec" -Verbosity detailed -NonInteractive -BasePath . -Version $bsversion nuget pack "nuget\bootstrap.sass.nuspec" -Verbosity detailed -NonInteractive -BasePath . -Version $bsversion nuget push "bootstrap.$bsversion.nupkg" -Verbosity detailed -NonInteractive -Source "https://api.nuget.org/v3/index.json" nuget push "bootstrap.sass.$bsversion.nupkg" -Verbosity detailed -NonInteractive -Source "https://api.nuget.org/v3/index.json"
unknown
github
https://github.com/twbs/bootstrap
.github/workflows/publish-nuget.yml
# Copyright (c) 2014 INFN - "Istituto Nazionale di Fisica Nucleare" - Italy # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.conf import settings from django.utils.translation import ugettext as _ from keystoneclient.exceptions import AuthorizationFailure from keystoneclient.v3.client import Client as BaseClient from keystoneclient.v3.projects import Project as ProjectRes from openstack_auth import backend as base_backend from openstack_auth.exceptions import KeystoneAuthException from openstack_auth.user import create_user_from_token from openstack_auth.user import Token LOG = logging.getLogger(__name__) class ExtClient(BaseClient): def __init__(self, **kwargs): if 'raw_token' in kwargs: self.raw_token = kwargs['raw_token'] del kwargs['raw_token'] else: self.raw_token = None super(ExtClient, self).__init__(**kwargs) def get_raw_token_from_identity_service(self, auth_url, user_id=None, username=None, user_domain_id=None, user_domain_name=None, password=None, domain_id=None, domain_name=None, project_id=None, project_name=None, project_domain_id=None, project_domain_name=None, token=None, trust_id=None, **kwargs): if self.raw_token == None: return super(ExtClient, self).get_raw_token_from_identity_service( auth_url, user_id, username, user_domain_id, user_domain_name, password, domain_id, domain_name, project_id, project_name, project_domain_id, project_domain_name, token, trust_id) try: main_tenant_id = None main_domain_id = None headers = {'Accept' : 'application/json', 'X-Auth-Token' : self.raw_token} url = auth_url + "/OS-FEDERATION/projects" resp, body = self.request(url, 'GET', headers=headers) prj_list = body.get('projects', None) if prj_list and project_id: for p_item in prj_list: if p_item['id'] == project_id: main_tenant_id = project_id main_domain_id = p_item['domain_id'] if main_tenant_id == None: raise AuthorizationFailure("Cannot find required project for user") elif prj_list: main_tenant_id = prj_list[0]['id'] main_domain_id = prj_list[0]['domain_id'] else: raise AuthorizationFailure("Cannot find any project for user") headers = {'Accept' : 'application/json'} url = auth_url + "/auth/tokens" body = {'auth': {'identity': {}, 'scope' : {}}} ident = body['auth']['identity'] ident['methods'] = ['saml2'] ident['saml2'] = {'id' : self.raw_token} body['auth']['scope']['project'] = { "id" : main_tenant_id, 'domain' : {'id' : main_domain_id} } # # TODO verify workaround in keystone/token/providers/common.py (504) # resp, body = self.request(url, 'POST', body=body, headers=headers) return resp, body except: LOG.error("Cannot get scoped token", exc_info=True) raise AuthorizationFailure("Cannot get scoped token") # # Register this backend in /usr/share/openstack-dashboard/openstack_dashboard/settings.py # AUTHENTICATION_BACKENDS = ('openstack_auth_shib.backend.ExtKeystoneBackend',) # class ExtKeystoneBackend(base_backend.KeystoneBackend): def _convert_tlist(self, tlist): result = list() for prj_dict in tlist: result.append(ProjectRes(None, prj_dict, True)) return result def get_user(self, user_id): user = super(ExtKeystoneBackend, self).get_user(user_id) if (user and hasattr(self, 'request') and 'fed_projects' in self.request.session): user.authorized_tenants = self._convert_tlist(self.request.session['fed_projects']) return user def authenticate(self, request=None, username=None, password=None, user_domain_name=None, auth_url=None, rawtoken=None): insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None) ep_type = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'publicURL') secret_key = getattr(settings, 'SECRET_KEY', None) # # Authetication with username and password # if password: parentObj = super(ExtKeystoneBackend, self) return parentObj.authenticate(request, username, password, user_domain_name, auth_url) # # Authetication with os-federation token # if not rawtoken: raise KeystoneAuthException('Missing unscoped token') try: client = ExtClient(raw_token=rawtoken, auth_url=auth_url, insecure=insecure, cacert=cacert, debug=settings.DEBUG) auth_ref = client.auth_ref headers = {'Accept' : 'application/json', 'X-Auth-Token' : rawtoken} url = auth_url + "/OS-FEDERATION/projects" resp, body = client.request(url, 'GET', headers=headers) if 'projects' in body: request.session['fed_projects'] = body['projects'] project_token = Token(auth_ref) user = create_user_from_token(request, project_token, client.service_catalog.url_for(endpoint_type=ep_type)) user.authorized_tenants = self._convert_tlist(request.session['fed_projects']) if request is not None: request.session['unscoped_token'] = rawtoken request.user = user # Support client caching to save on auth calls. setattr(request, base_backend.KEYSTONE_CLIENT_ATTR, client) return user except: LOG.error("Failed to get scoped token", exc_info=True) raise KeystoneAuthException("Cannot authenticate user with token")
unknown
codeparrot/codeparrot-clean
""" FormWizard class -- implements a multi-page form, validating between each step and storing the form's state as HTML hidden fields so that no state is stored on the server side. """ import cPickle as pickle from django import forms from django.conf import settings from django.contrib.formtools.utils import security_hash, form_hmac from django.http import Http404 from django.shortcuts import render_to_response from django.template.context import RequestContext from django.utils.crypto import constant_time_compare from django.utils.hashcompat import md5_constructor from django.utils.translation import ugettext_lazy as _ from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_protect class FormWizard(object): # The HTML (and POST data) field name for the "step" variable. step_field_name="wizard_step" # METHODS SUBCLASSES SHOULDN'T OVERRIDE ################################### def __init__(self, form_list, initial=None): """ Start a new wizard with a list of forms. form_list should be a list of Form classes (not instances). """ self.form_list = form_list[:] self.initial = initial or {} # Dictionary of extra template context variables. self.extra_context = {} # A zero-based counter keeping track of which step we're in. self.step = 0 def __repr__(self): return "step: %d\nform_list: %s\ninitial_data: %s" % (self.step, self.form_list, self.initial) def get_form(self, step, data=None): "Helper method that returns the Form instance for the given step." return self.form_list[step](data, prefix=self.prefix_for_step(step), initial=self.initial.get(step, None)) def num_steps(self): "Helper method that returns the number of steps." # You might think we should just set "self.num_steps = len(form_list)" # in __init__(), but this calculation needs to be dynamic, because some # hook methods might alter self.form_list. return len(self.form_list) def _check_security_hash(self, token, request, form): expected = self.security_hash(request, form) if constant_time_compare(token, expected): return True else: # Fall back to Django 1.2 method, for compatibility with forms that # are in the middle of being used when the upgrade occurs. However, # we don't want to do this fallback if a subclass has provided their # own security_hash method - because they might have implemented a # more secure method, and this would punch a hole in that. # PendingDeprecationWarning <- left here to remind us that this # compatibility fallback should be removed in Django 1.5 FormWizard_expected = FormWizard.security_hash(self, request, form) if expected == FormWizard_expected: # They didn't override security_hash, do the fallback: old_expected = security_hash(request, form) return constant_time_compare(token, old_expected) else: return False @method_decorator(csrf_protect) def __call__(self, request, *args, **kwargs): """ Main method that does all the hard work, conforming to the Django view interface. """ if 'extra_context' in kwargs: self.extra_context.update(kwargs['extra_context']) current_step = self.determine_step(request, *args, **kwargs) self.parse_params(request, *args, **kwargs) # Sanity check. if current_step >= self.num_steps(): raise Http404('Step %s does not exist' % current_step) # Validate and process all the previous forms before instantiating the # current step's form in case self.process_step makes changes to # self.form_list. # If any of them fails validation, that must mean the validator relied # on some other input, such as an external Web site. # It is also possible that alidation might fail under certain attack # situations: an attacker might be able to bypass previous stages, and # generate correct security hashes for all the skipped stages by virtue # of: # 1) having filled out an identical form which doesn't have the # validation (and does something different at the end), # 2) or having filled out a previous version of the same form which # had some validation missing, # 3) or previously having filled out the form when they had more # privileges than they do now. # # Since the hashes only take into account values, and not other other # validation the form might do, we must re-do validation now for # security reasons. previous_form_list = [] for i in range(current_step): f = self.get_form(i, request.POST) if not self._check_security_hash(request.POST.get("hash_%d" % i, ''), request, f): return self.render_hash_failure(request, i) if not f.is_valid(): return self.render_revalidation_failure(request, i, f) else: self.process_step(request, f, i) previous_form_list.append(f) # Process the current step. If it's valid, go to the next step or call # done(), depending on whether any steps remain. if request.method == 'POST': form = self.get_form(current_step, request.POST) else: form = self.get_form(current_step) if form.is_valid(): self.process_step(request, form, current_step) next_step = current_step + 1 if next_step == self.num_steps(): return self.done(request, previous_form_list + [form]) else: form = self.get_form(next_step) self.step = current_step = next_step return self.render(form, request, current_step) def render(self, form, request, step, context=None): "Renders the given Form object, returning an HttpResponse." old_data = request.POST prev_fields = [] if old_data: hidden = forms.HiddenInput() # Collect all data from previous steps and render it as HTML hidden fields. for i in range(step): old_form = self.get_form(i, old_data) hash_name = 'hash_%s' % i prev_fields.extend([bf.as_hidden() for bf in old_form]) prev_fields.append(hidden.render(hash_name, old_data.get(hash_name, self.security_hash(request, old_form)))) return self.render_template(request, form, ''.join(prev_fields), step, context) # METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ######################## def prefix_for_step(self, step): "Given the step, returns a Form prefix to use." return str(step) def render_hash_failure(self, request, step): """ Hook for rendering a template if a hash check failed. step is the step that failed. Any previous step is guaranteed to be valid. This default implementation simply renders the form for the given step, but subclasses may want to display an error message, etc. """ return self.render(self.get_form(step), request, step, context={'wizard_error': _('We apologize, but your form has expired. Please continue filling out the form from this page.')}) def render_revalidation_failure(self, request, step, form): """ Hook for rendering a template if final revalidation failed. It is highly unlikely that this point would ever be reached, but See the comment in __call__() for an explanation. """ return self.render(form, request, step) def security_hash(self, request, form): """ Calculates the security hash for the given HttpRequest and Form instances. Subclasses may want to take into account request-specific information, such as the IP address. """ return form_hmac(form) def determine_step(self, request, *args, **kwargs): """ Given the request object and whatever *args and **kwargs were passed to __call__(), returns the current step (which is zero-based). Note that the result should not be trusted. It may even be a completely invalid number. It's not the job of this method to validate it. """ if not request.POST: return 0 try: step = int(request.POST.get(self.step_field_name, 0)) except ValueError: return 0 return step def parse_params(self, request, *args, **kwargs): """ Hook for setting some state, given the request object and whatever *args and **kwargs were passed to __call__(), sets some state. This is called at the beginning of __call__(). """ pass def get_template(self, step): """ Hook for specifying the name of the template to use for a given step. Note that this can return a tuple of template names if you'd like to use the template system's select_template() hook. """ return 'forms/wizard.html' def render_template(self, request, form, previous_fields, step, context=None): """ Renders the template for the given step, returning an HttpResponse object. Override this method if you want to add a custom context, return a different MIME type, etc. If you only need to override the template name, use get_template() instead. The template will be rendered with the following context: step_field -- The name of the hidden field containing the step. step0 -- The current step (zero-based). step -- The current step (one-based). step_count -- The total number of steps. form -- The Form instance for the current step (either empty or with errors). previous_fields -- A string representing every previous data field, plus hashes for completed forms, all in the form of hidden fields. Note that you'll need to run this through the "safe" template filter, to prevent auto-escaping, because it's raw HTML. """ context = context or {} context.update(self.extra_context) return render_to_response(self.get_template(step), dict(context, step_field=self.step_field_name, step0=step, step=step + 1, step_count=self.num_steps(), form=form, previous_fields=previous_fields ), context_instance=RequestContext(request)) def process_step(self, request, form, step): """ Hook for modifying the FormWizard's internal state, given a fully validated Form object. The Form is guaranteed to have clean, valid data. This method should *not* modify any of that data. Rather, it might want to set self.extra_context or dynamically alter self.form_list, based on previously submitted forms. Note that this method is called every time a page is rendered for *all* submitted steps. """ pass # METHODS SUBCLASSES MUST OVERRIDE ######################################## def done(self, request, form_list): """ Hook for doing something with the validated data. This is responsible for the final processing. form_list is a list of Form instances, each containing clean, valid data. """ raise NotImplementedError("Your %s class has not defined a done() method, which is required." % self.__class__.__name__)
unknown
codeparrot/codeparrot-clean
"Functions that help with dynamically creating decorators for views." try: from contextlib import ContextDecorator except ImportError: ContextDecorator = None from functools import WRAPPER_ASSIGNMENTS, update_wrapper, wraps from django.utils import six class classonlymethod(classmethod): def __get__(self, instance, owner): if instance is not None: raise AttributeError("This method is available only on the class, not on instances.") return super(classonlymethod, self).__get__(instance, owner) def method_decorator(decorator): """ Converts a function decorator into a method decorator """ # 'func' is a function at the time it is passed to _dec, but will eventually # be a method of the class it is defined on. def _dec(func): def _wrapper(self, *args, **kwargs): @decorator def bound_func(*args2, **kwargs2): return func.__get__(self, type(self))(*args2, **kwargs2) # bound_func has the signature that 'decorator' expects i.e. no # 'self' argument, but it is a closure over self so it can call # 'func' correctly. return bound_func(*args, **kwargs) # In case 'decorator' adds attributes to the function it decorates, we # want to copy those. We don't have access to bound_func in this scope, # but we can cheat by using it on a dummy function. @decorator def dummy(*args, **kwargs): pass update_wrapper(_wrapper, dummy) # Need to preserve any existing attributes of 'func', including the name. update_wrapper(_wrapper, func) return _wrapper update_wrapper(_dec, decorator, assigned=available_attrs(decorator)) # Change the name to aid debugging. if hasattr(decorator, '__name__'): _dec.__name__ = 'method_decorator(%s)' % decorator.__name__ else: _dec.__name__ = 'method_decorator(%s)' % decorator.__class__.__name__ return _dec def decorator_from_middleware_with_args(middleware_class): """ Like decorator_from_middleware, but returns a function that accepts the arguments to be passed to the middleware_class. Use like:: cache_page = decorator_from_middleware_with_args(CacheMiddleware) # ... @cache_page(3600) def my_view(request): # ... """ return make_middleware_decorator(middleware_class) def decorator_from_middleware(middleware_class): """ Given a middleware class (not an instance), returns a view decorator. This lets you use middleware functionality on a per-view basis. The middleware is created with no params passed. """ return make_middleware_decorator(middleware_class)() def available_attrs(fn): """ Return the list of functools-wrappable attributes on a callable. This is required as a workaround for http://bugs.python.org/issue3445 under Python 2. """ if six.PY3: return WRAPPER_ASSIGNMENTS else: return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a)) def make_middleware_decorator(middleware_class): def _make_decorator(*m_args, **m_kwargs): middleware = middleware_class(*m_args, **m_kwargs) def _decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if hasattr(middleware, 'process_request'): result = middleware.process_request(request) if result is not None: return result if hasattr(middleware, 'process_view'): result = middleware.process_view(request, view_func, args, kwargs) if result is not None: return result try: response = view_func(request, *args, **kwargs) except Exception as e: if hasattr(middleware, 'process_exception'): result = middleware.process_exception(request, e) if result is not None: return result raise if hasattr(response, 'render') and callable(response.render): if hasattr(middleware, 'process_template_response'): response = middleware.process_template_response(request, response) # Defer running of process_response until after the template # has been rendered: if hasattr(middleware, 'process_response'): callback = lambda response: middleware.process_response(request, response) response.add_post_render_callback(callback) else: if hasattr(middleware, 'process_response'): return middleware.process_response(request, response) return response return _wrapped_view return _decorator return _make_decorator if ContextDecorator is None: # ContextDecorator was introduced in Python 3.2 # See https://docs.python.org/3/library/contextlib.html#contextlib.ContextDecorator class ContextDecorator(object): """ A base class that enables a context manager to also be used as a decorator. """ def __call__(self, func): @wraps(func, assigned=available_attrs(func)) def inner(*args, **kwargs): with self: return func(*args, **kwargs) return inner class classproperty(object): def __init__(self, method=None): self.fget = method def __get__(self, instance, owner): return self.fget(owner) def getter(self, method): self.fget = method return self
unknown
codeparrot/codeparrot-clean
# frozen_string_literal: true require_relative "vendored_tsort" module Bundler class SpecSet include Enumerable include TSort def initialize(specs) @specs = specs end def for(dependencies, platforms = [nil], legacy_platforms = [nil], skips: []) if [true, false].include?(platforms) Bundler::SharedHelpers.feature_removed! \ "SpecSet#for received a `check` parameter, but that's no longer used and deprecated. " \ "SpecSet#for always implicitly performs validation. Please remove this parameter" end materialize_dependencies(dependencies, platforms, skips: skips) @materializations.flat_map(&:specs).uniq end def normalize_platforms!(deps, platforms) remove_invalid_platforms!(deps, platforms) add_extra_platforms!(platforms) platforms.map! do |platform| next platform if platform == Gem::Platform::RUBY begin Integer(platform.version) rescue ArgumentError, TypeError next platform end less_specific_platform = Gem::Platform.new([platform.cpu, platform.os, nil]) next platform if incomplete_for_platform?(deps, less_specific_platform) less_specific_platform end.uniq! end def add_originally_invalid_platforms!(platforms, originally_invalid_platforms) originally_invalid_platforms.each do |originally_invalid_platform| platforms << originally_invalid_platform if complete_platform(originally_invalid_platform) end end def remove_invalid_platforms!(deps, platforms, skips: []) invalid_platforms = [] platforms.reject! do |platform| next false if skips.include?(platform) invalid = incomplete_for_platform?(deps, platform) invalid_platforms << platform if invalid invalid end invalid_platforms end def add_extra_platforms!(platforms) if @specs.empty? platforms.concat([Gem::Platform::RUBY]).uniq return end new_platforms = all_platforms.select do |platform| next if platforms.include?(platform) next unless Gem::Platform.generic(platform) == Gem::Platform::RUBY complete_platform(platform) end return if new_platforms.empty? platforms.concat(new_platforms) return if new_platforms.include?(Bundler.local_platform) less_specific_platform = new_platforms.find {|platform| platform != Gem::Platform::RUBY && Bundler.local_platform === platform && platform === Bundler.local_platform } platforms.delete(Bundler.local_platform) if less_specific_platform end def validate_deps(s) s.runtime_dependencies.each do |dep| next if dep.name == "bundler" return :missing unless names.include?(dep.name) return :invalid if none? {|spec| dep.matches_spec?(spec) } end :valid end def [](key) key = key.name if key.respond_to?(:name) lookup[key]&.reverse || [] end def []=(key, value) delete_by_name(key) add_spec(value) end def delete(specs) Array(specs).each {|spec| remove_spec(spec) } end def sort! self end def to_a sorted.dup end def to_hash lookup.dup end def materialize(deps) materialize_dependencies(deps) SpecSet.new(materialized_specs) end # Materialize for all the specs in the spec set, regardless of what platform they're for # @return [Array<Gem::Specification>] def materialized_for_all_platforms @specs.map do |s| next s unless s.is_a?(LazySpecification) spec = s.materialize_for_cache raise GemNotFound, "Could not find #{s.full_name} in any of the sources" unless spec spec end end def incomplete_for_platform?(deps, platform) incomplete_specs_for_platform(deps, platform).any? end def incomplete_specs_for_platform(deps, platform) return [] if @specs.empty? validation_set = self.class.new(@specs) validation_set.for(deps, [platform]) validation_set.incomplete_specs end def missing_specs_for(deps) materialize_dependencies(deps) missing_specs end def missing_specs @materializations.flat_map(&:completely_missing_specs) end def partially_missing_specs @materializations.flat_map(&:partially_missing_specs) end def incomplete_specs @materializations.flat_map(&:incomplete_specs) end def insecurely_materialized_specs materialized_specs.select(&:insecurely_materialized?) end def -(other) SharedHelpers.feature_removed! "SpecSet#- has been removed with no replacement" end def find_by_name_and_platform(name, platform) @specs.detect {|spec| spec.name == name && spec.installable_on_platform?(platform) } end def specs_with_additional_variants_from(other) sorted | additional_variants_from(other) end def delete_by_name(name) @specs.reject! {|spec| spec.name == name } @sorted&.reject! {|spec| spec.name == name } return if @lookup.nil? @lookup[name] = nil end def version_for(name) exemplary_spec(name)&.version end def what_required(spec) unless req = find {|s| s.runtime_dependencies.any? {|d| d.name == spec.name } } return [spec] end what_required(req) << spec end def <<(spec) SharedHelpers.feature_removed! "SpecSet#<< has been removed with no replacement" end def length @specs.length end def size @specs.size end def empty? @specs.empty? end def each(&b) sorted.each(&b) end def names lookup.keys end def valid?(s) s.matches_current_metadata? && valid_dependencies?(s) end def to_s map(&:full_name).to_s end private def materialize_dependencies(dependencies, platforms = [nil], skips: []) handled = ["bundler"].product(platforms).map {|k| [k, true] }.to_h deps = dependencies.product(platforms) @materializations = [] loop do break unless dep = deps.shift dependency = dep[0] platform = dep[1] name = dependency.name key = [name, platform] next if handled.key?(key) handled[key] = true materialization = Materialization.new(dependency, platform, candidates: lookup[name]) deps.concat(materialization.dependencies) if materialization.complete? @materializations << materialization unless skips.include?(name) end @materializations end def materialized_specs @materializations.filter_map(&:materialized_spec) end def complete_platform(platform) new_specs = [] valid_platform = lookup.all? do |_, specs| spec = specs.first matching_specs = spec.source.specs.search([spec.name, spec.version]) platform_spec = MatchPlatform.select_best_platform_match(matching_specs, platform).find do |s| valid?(s) end if platform_spec new_specs << LazySpecification.from_spec(platform_spec) unless specs.include?(platform_spec) true else false end end if valid_platform && new_specs.any? new_specs.each {|spec| add_spec(spec) } end valid_platform end def all_platforms @specs.flat_map {|spec| spec.source.specs.search([spec.name, spec.version]).map(&:platform) }.uniq end def additional_variants_from(other) other.select do |other_spec| spec = exemplary_spec(other_spec.name) next unless spec selected = spec.version == other_spec.version && valid_dependencies?(other_spec) other_spec.source = spec.source if selected selected end end def valid_dependencies?(s) validate_deps(s) == :valid end def sorted @sorted ||= ([@specs.find {|s| s.name == "rake" }] + tsort).compact.uniq rescue TSort::Cyclic => error cgems = extract_circular_gems(error) raise CyclicDependencyError, "Your bundle requires gems that depend" \ " on each other, creating an infinite loop. Please remove either" \ " gem '#{cgems[0]}' or gem '#{cgems[1]}' and try again." end def extract_circular_gems(error) error.message.scan(/@name="(.*?)"/).flatten end def lookup @lookup ||= begin lookup = {} @specs.each do |s| index_spec(lookup, s.name, s) end lookup end end def tsort_each_node # MUST sort by name for backwards compatibility @specs.sort_by(&:name).each {|s| yield s } end def tsort_each_child(s) s.dependencies.sort_by(&:name).each do |d| next if d.type == :development specs_for_name = lookup[d.name] next unless specs_for_name specs_for_name.each {|s2| yield s2 } end end def add_spec(spec) @specs << spec name = spec.name @sorted&.insert(@sorted.bsearch_index {|s| s.name >= name } || @sorted.size, spec) return if @lookup.nil? index_spec(@lookup, name, spec) end def remove_spec(spec) @specs.delete(spec) @sorted&.delete(spec) return if @lookup.nil? indexed_specs = @lookup[spec.name] return unless indexed_specs if indexed_specs.size > 1 @lookup[spec.name].delete(spec) else @lookup[spec.name] = nil end end def index_spec(hash, key, value) hash[key] ||= [] hash[key] << value end def exemplary_spec(name) self[name].first end end end
ruby
github
https://github.com/ruby/ruby
lib/bundler/spec_set.rb
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: iam short_description: Manage IAM users, groups, roles and keys description: - Allows for the management of IAM users, groups, roles and access keys. version_added: "2.0" options: iam_type: description: - Type of IAM resource required: true default: null choices: [ "user", "group", "role"] name: description: - Name of IAM resource to create or identify required: true new_name: description: - When state is update, will replace name with new_name on IAM resource required: false default: null new_path: description: - When state is update, will replace the path with new_path on the IAM resource required: false default: null state: description: - Whether to create, delete or update the IAM resource. Note, roles cannot be updated. required: true default: null choices: [ "present", "absent", "update" ] path: description: - When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match. required: false default: "/" access_key_state: description: - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified. required: false default: null choices: [ "create", "remove", "active", "inactive"] key_count: description: - When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1. required: false default: '1' access_key_ids: description: - A list of the keys that you want impacted by the access_key_state paramter. groups: description: - A list of groups the user should belong to. When update, will gracefully remove groups not listed. required: false default: null password: description: - When type is user and state is present, define the users login password. Also works with update. Note that always returns changed. required: false default: null update_password: required: false default: always choices: ['always', 'on_create'] description: - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false default: null aliases: [ 'ec2_secret_key', 'secret_key' ] aws_access_key: description: - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: null aliases: [ 'ec2_access_key', 'access_key' ] notes: - 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.' author: - "Jonathan I. Davila (@defionscode)" - "Paul Seiffert (@seiffert)" extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Basic user creation example tasks: - name: Create two new IAM users with API keys iam: iam_type: user name: "{{ item }}" state: present password: "{{ temp_pass }}" access_key_state: create with_items: - jcleese - mpython # Advanced example, create two new groups and add the pre-existing user # jdavila to both groups. task: - name: Create Two Groups, Mario and Luigi iam: iam_type: group name: "{{ item }}" state: present with_items: - Mario - Luigi register: new_groups - name: iam: iam_type: user name: jdavila state: update groups: "{{ item.created_group.group_name }}" with_items: new_groups.results ''' import json import itertools import sys try: import boto import boto.iam import boto.ec2 HAS_BOTO = True except ImportError: HAS_BOTO = False def boto_exception(err): '''generic error message handler''' if hasattr(err, 'error_message'): error = err.error_message elif hasattr(err, 'message'): error = err.message else: error = '%s: %s' % (Exception, err) return error def create_user(module, iam, name, pwd, path, key_state, key_count): key_qty = 0 keys = [] try: user_meta = iam.create_user( name, path).create_user_response.create_user_result.user changed = True if pwd is not None: pwd = iam.create_login_profile(name, pwd) if key_state in ['create']: if key_count: while key_count > key_qty: keys.append(iam.create_access_key( user_name=name).create_access_key_response.\ create_access_key_result.\ access_key) key_qty += 1 else: keys = None except boto.exception.BotoServerError, err: module.fail_json(changed=False, msg=str(err)) else: user_info = dict(created_user=user_meta, password=pwd, access_keys=keys) return (user_info, changed) def delete_user(module, iam, name): try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] for key in current_keys: iam.delete_access_key(key, name) del_meta = iam.delete_user(name).delete_user_response except boto.exception.BotoServerError, err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: iam.delete_user_policy(name, policy) try: del_meta = iam.delete_user(name) except boto.exception.BotoServerError, err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" "that %s has Managed Polices. This is not " "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: module.fail_json(changed=changed, msg=str(err)) else: changed = True return del_meta, name, changed else: changed = True return del_meta, name, changed def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated): changed = False name_change = False if updated and new_name: name = new_name try: current_keys, status = \ [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata],\ [ck['status'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] key_qty = len(current_keys) except boto.exception.BotoServerError, err: error_msg = boto_exception(err) if 'cannot be found' in error_msg and updated: current_keys, status = \ [ck['access_key_id'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata],\ [ck['status'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] name = new_name else: module.fail_json(changed=False, msg=str(err)) updated_key_list = {} if new_name or new_path: c_path = iam.get_user(name).get_user_result.user['path'] if (name != new_name) or (c_path != new_path): changed = True try: if not updated: user = iam.update_user( name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata else: user = iam.update_user( name, new_path=new_path).update_user_response.response_metadata user['updates'] = dict( old_username=name, new_username=new_name, old_path=c_path, new_path=new_path) except boto.exception.BotoServerError, err: error_msg = boto_exception(err) module.fail_json(changed=False, msg=str(err)) else: if not updated: name_change = True if pwd: try: iam.update_login_profile(name, pwd) changed = True except boto.exception.BotoServerError: try: iam.create_login_profile(name, pwd) changed = True except boto.exception.BotoServerError, err: error_msg = boto_exception(str(err)) if 'Password does not conform to the account password policy' in error_msg: module.fail_json(changed=False, msg="Passsword doesn't conform to policy") else: module.fail_json(msg=error_msg) if key_state == 'create': try: while key_count > key_qty: new_key = iam.create_access_key( user_name=name).create_access_key_response.create_access_key_result.access_key key_qty += 1 changed = True except boto.exception.BotoServerError, err: module.fail_json(changed=False, msg=str(err)) if keys and key_state: for access_key in keys: if access_key in current_keys: for current_key, current_key_state in zip(current_keys, status): if key_state != current_key_state.lower(): try: iam.update_access_key( access_key, key_state.capitalize(), user_name=name) except boto.exception.BotoServerError, err: module.fail_json(changed=False, msg=str(err)) else: changed = True if key_state == 'remove': try: iam.delete_access_key(access_key, user_name=name) except boto.exception.BotoServerError, err: module.fail_json(changed=False, msg=str(err)) else: changed = True try: final_keys, final_key_status = \ [ck['access_key_id'] for ck in iam.get_all_access_keys(name). list_access_keys_result. access_key_metadata],\ [ck['status'] for ck in iam.get_all_access_keys(name). list_access_keys_result. access_key_metadata] except boto.exception.BotoServerError, err: module.fail_json(changed=changed, msg=str(err)) for fk, fks in zip(final_keys, final_key_status): updated_key_list.update({fk: fks}) return name_change, updated_key_list, changed def set_users_groups(module, iam, name, groups, updated=None, new_name=None): """ Sets groups for a user, will purge groups not explictly passed, while retaining pre-existing groups that also are in the new list. """ changed = False if updated: name = new_name try: orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user( name).list_groups_for_user_result.groups] remove_groups = [ rg for rg in frozenset(orig_users_groups).difference(groups)] new_groups = [ ng for ng in frozenset(groups).difference(orig_users_groups)] except boto.exception.BotoServerError, err: module.fail_json(changed=changed, msg=str(err)) else: if len(orig_users_groups) > 0: for new in new_groups: iam.add_user_to_group(new, name) for rm in remove_groups: iam.remove_user_from_group(rm, name) else: for group in groups: try: iam.add_user_to_group(group, name) except boto.exception.BotoServerError, err: error_msg = boto_exception(err) if ('The group with name %s cannot be found.' % group) in error_msg: module.fail_json(changed=False, msg="Group %s doesn't exist" % group) if len(remove_groups) > 0 or len(new_groups) > 0: changed = True return (groups, changed) def create_group(module=None, iam=None, name=None, path=None): changed = False try: iam.create_group( name, path).create_group_response.create_group_result.group except boto.exception.BotoServerError, err: module.fail_json(changed=changed, msg=str(err)) else: changed = True return name, changed def delete_group(module=None, iam=None, name=None): changed = False try: iam.delete_group(name) except boto.exception.BotoServerError, err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names: iam.delete_group_policy(name, policy) try: iam.delete_group(name) except boto.exception.BotoServerError, err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" "that %s has Managed Polices. This is not " "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: module.fail_json(changed=changed, msg=str(err)) else: changed = True else: changed = True return changed, name def update_group(module=None, iam=None, name=None, new_name=None, new_path=None): changed = False try: current_group_path = iam.get_group( name).get_group_response.get_group_result.group['path'] if new_path: if current_group_path != new_path: iam.update_group(name, new_path=new_path) changed = True if new_name: if name != new_name: iam.update_group(name, new_group_name=new_name, new_path=new_path) changed = True name = new_name except boto.exception.BotoServerError, err: module.fail_json(changed=changed, msg=str(err)) return changed, name, new_path, current_group_path def create_role(module, iam, name, path, role_list, prof_list): changed = False try: if name not in role_list: changed = True iam.create_role( name, path=path).create_role_response.create_role_result.role.role_name if name not in prof_list: iam.create_instance_profile(name, path=path) iam.add_role_to_instance_profile(name, name) except boto.exception.BotoServerError, err: module.fail_json(changed=changed, msg=str(err)) else: updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. list_roles_result.roles] return changed, updated_role_list def delete_role(module, iam, name, role_list, prof_list): changed = False try: if name in role_list: cur_ins_prof = [rp['instance_profile_name'] for rp in iam.list_instance_profiles_for_role(name). list_instance_profiles_for_role_result. instance_profiles] for profile in cur_ins_prof: iam.remove_role_from_instance_profile(profile, name) try: iam.delete_role(name) except boto.exception.BotoServerError, err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: for policy in iam.list_role_policies(name).list_role_policies_result.policy_names: iam.delete_role_policy(name, policy) try: iam.delete_role(name) except boto.exception.BotoServerError, err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" "that %s has Managed Polices. This is not " "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: module.fail_json(changed=changed, msg=str(err)) else: changed = True else: changed = True for prof in prof_list: if name == prof: iam.delete_instance_profile(name) except boto.exception.BotoServerError, err: module.fail_json(changed=changed, msg=str(err)) else: updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. list_roles_result.roles] return changed, updated_role_list def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( iam_type=dict( default=None, required=True, choices=['user', 'group', 'role']), groups=dict(type='list', default=None, required=False), state=dict( default=None, required=True, choices=['present', 'absent', 'update']), password=dict(default=None, required=False, no_log=True), update_password=dict(default='always', required=False, choices=['always', 'on_create']), access_key_state=dict(default=None, required=False, choices=[ 'active', 'inactive', 'create', 'remove', 'Active', 'Inactive', 'Create', 'Remove']), access_key_ids=dict(type='list', default=None, required=False), key_count=dict(type='int', default=1, required=False), name=dict(default=None, required=False), new_name=dict(default=None, required=False), path=dict(default='/', required=False), new_path=dict(default=None, required=False) ) ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[], ) if not HAS_BOTO: module.fail_json(msg='This module requires boto, please install it') state = module.params.get('state').lower() iam_type = module.params.get('iam_type').lower() groups = module.params.get('groups') name = module.params.get('name') new_name = module.params.get('new_name') password = module.params.get('password') update_pw = module.params.get('update_password') path = module.params.get('path') new_path = module.params.get('new_path') key_count = module.params.get('key_count') key_state = module.params.get('access_key_state') if key_state: key_state = key_state.lower() if any([n in key_state for n in ['active', 'inactive']]) and not key_ids: module.fail_json(changed=False, msg="At least one access key has to be defined in order" " to use 'active' or 'inactive'") key_ids = module.params.get('access_key_ids') if iam_type == 'user' and module.params.get('password') is not None: pwd = module.params.get('password') elif iam_type != 'user' and module.params.get('password') is not None: module.fail_json(msg="a password is being specified when the iam_type " "is not user. Check parameters") else: pwd = None if iam_type != 'user' and (module.params.get('access_key_state') is not None or module.params.get('access_key_id') is not None): module.fail_json(msg="the IAM type must be user, when IAM access keys " "are being modified. Check parameters") if iam_type == 'role' and state == 'update': module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " "please specificy present or absent") region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) result = {} changed = False try: orig_group_list = [gl['group_name'] for gl in iam.get_all_groups(). list_groups_result. groups] orig_user_list = [ul['user_name'] for ul in iam.get_all_users(). list_users_result. users] orig_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. list_roles_result. roles] orig_prof_list = [ap['instance_profile_name'] for ap in iam.list_instance_profiles(). list_instance_profiles_response. list_instance_profiles_result. instance_profiles] except boto.exception.BotoServerError, err: module.fail_json(msg=err.message) if iam_type == 'user': been_updated = False user_groups = None user_exists = any([n in [name, new_name] for n in orig_user_list]) if user_exists: current_path = iam.get_user(name).get_user_result.user['path'] if not new_path and current_path != path: new_path = path path = current_path if state == 'present' and not user_exists and not new_name: (meta, changed) = create_user( module, iam, name, password, path, key_state, key_count) keys = iam.get_all_access_keys(name).list_access_keys_result.\ access_key_metadata if groups: (user_groups, changed) = set_users_groups( module, iam, name, groups, been_updated, new_name) module.exit_json( user_meta=meta, groups=user_groups, keys=keys, changed=changed) elif state in ['present', 'update'] and user_exists: if update_pw == 'on_create': password = None if name not in orig_user_list and new_name in orig_user_list: been_updated = True name_change, key_list, user_changed = update_user( module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated) if name_change and new_name: orig_name = name name = new_name if groups: user_groups, groups_changed = set_users_groups( module, iam, name, groups, been_updated, new_name) if groups_changed == user_changed: changed = groups_changed else: changed = True else: changed = user_changed if new_name and new_path: module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list) elif new_name and not new_path and not been_updated: module.exit_json( changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list) elif new_name and not new_path and been_updated: module.exit_json( changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state) elif not new_name and new_path: module.exit_json( changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, keys=key_list) else: module.exit_json( changed=changed, groups=user_groups, user_name=name, keys=key_list) elif state == 'update' and not user_exists: module.fail_json( msg="The user %s does not exit. No update made." % name) elif state == 'absent': if name in orig_user_list: set_users_groups(module, iam, name, '') del_meta, name, changed = delete_user(module, iam, name) module.exit_json( deletion_meta=del_meta, deleted_user=name, changed=changed) else: module.exit_json( changed=False, msg="User %s is already absent from your AWS IAM users" % name) elif iam_type == 'group': group_exists = name in orig_group_list if state == 'present' and not group_exists: new_group, changed = create_group(iam=iam, name=name, path=path) module.exit_json(changed=changed, group_name=new_group) elif state in ['present', 'update'] and group_exists: changed, updated_name, updated_path, cur_path = update_group( iam=iam, name=name, new_name=new_name, new_path=new_path) if new_path and new_name: module.exit_json(changed=changed, old_group_name=name, new_group_name=updated_name, old_path=cur_path, new_group_path=updated_path) if new_path and not new_name: module.exit_json(changed=changed, group_name=name, old_path=cur_path, new_group_path=updated_path) if not new_path and new_name: module.exit_json(changed=changed, old_group_name=name, new_group_name=updated_name, group_path=cur_path) if not new_path and not new_name: module.exit_json( changed=changed, group_name=name, group_path=cur_path) elif state == 'update' and not group_exists: module.fail_json( changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) elif state == 'absent': if name in orig_group_list: removed_group, changed = delete_group(iam=iam, name=name) module.exit_json(changed=changed, delete_group=removed_group) else: module.exit_json(changed=changed, msg="Group already absent") elif iam_type == 'role': role_list = [] if state == 'present': changed, role_list = create_role( module, iam, name, path, orig_role_list, orig_prof_list) elif state == 'absent': changed, role_list = delete_role( module, iam, name, orig_role_list, orig_prof_list) elif state == 'update': module.fail_json( changed=False, msg='Role update not currently supported by boto.') module.exit_json(changed=changed, roles=role_list) from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * main()
unknown
codeparrot/codeparrot-clean
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re from jinja2 import Environment from jinja2.exceptions import TemplateSyntaxError, UndefinedError from jinja2.utils import concat as j2_concat from jinja2.runtime import StrictUndefined from ansible import constants as C from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable from ansible.plugins import filter_loader, lookup_loader from ansible.template.safe_eval import safe_eval from ansible.template.template import AnsibleJ2Template from ansible.template.vars import AnsibleJ2Vars from ansible.utils.debug import debug from numbers import Number __all__ = ['Templar'] # A regex for checking to see if a variable we're trying to # expand is just a single variable name. SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$") # Primitive Types which we don't want Jinja to convert to strings. NON_TEMPLATED_TYPES = ( bool, Number ) JINJA2_OVERRIDE = '#jinja2:' JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline'] class Templar: ''' The main class for templating, with the main entry-point of template(). ''' def __init__(self, loader, variables=dict(), fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR): self._loader = loader self._basedir = loader.get_basedir() self._filters = None self._available_variables = variables # flags to determine whether certain failures during templating # should result in fatal errors being raised self._fail_on_lookup_errors = True self._fail_on_filter_errors = True self._fail_on_undefined_errors = fail_on_undefined def _count_newlines_from_end(self, in_str): ''' Counts the number of newlines at the end of a string. This is used during the jinja2 templating to ensure the count matches the input, since some newlines may be thrown away during the templating. ''' i = len(in_str) while i > 0: if in_str[i-1] != '\n': break i -= 1 return len(in_str) - i def _get_filters(self): ''' Returns filter plugins, after loading and caching them if need be ''' if self._filters is not None: return self._filters.copy() plugins = [x for x in filter_loader.all()] self._filters = dict() for fp in plugins: self._filters.update(fp.filters()) return self._filters.copy() def _get_extensions(self): ''' Return jinja2 extensions to load. If some extensions are set via jinja_extensions in ansible.cfg, we try to load them with the jinja environment. ''' jinja_exts = [] if C.DEFAULT_JINJA2_EXTENSIONS: # make sure the configuration directive doesn't contain spaces # and split extensions in an array jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',') return jinja_exts def set_available_variables(self, variables): ''' Sets the list of template variables this Templar instance will use to template things, so we don't have to pass them around between internal methods. ''' assert isinstance(variables, dict) self._available_variables = variables.copy() def template(self, variable, convert_bare=False, preserve_trailing_newlines=False): ''' Templates (possibly recursively) any given data as input. If convert_bare is set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}') before being sent through the template engine. ''' try: if convert_bare: variable = self._convert_bare_variable(variable) if isinstance(variable, basestring): result = variable if self._contains_vars(variable): # Check to see if the string we are trying to render is just referencing a single # var. In this case we don't wont to accidentally change the type of the variable # to a string by using the jinja template renderer. We just want to pass it. only_one = SINGLE_VAR.match(variable) if only_one: var_name = only_one.group(1) if var_name in self._available_variables: resolved_val = self._available_variables[var_name] if isinstance(resolved_val, NON_TEMPLATED_TYPES): return resolved_val result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines) # if this looks like a dictionary or list, convert it to such using the safe_eval method if (result.startswith("{") and not result.startswith("{{")) or result.startswith("["): eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True) if eval_results[1] is None: result = eval_results[0] else: # FIXME: if the safe_eval raised an error, should we do something with it? pass return result elif isinstance(variable, (list, tuple)): return [self.template(v, convert_bare=convert_bare) for v in variable] elif isinstance(variable, dict): d = {} for (k, v) in variable.iteritems(): d[k] = self.template(v, convert_bare=convert_bare) return d else: return variable except AnsibleFilterError: if self._fail_on_filter_errors: raise else: return variable def _contains_vars(self, data): ''' returns True if the data contains a variable pattern ''' return "$" in data or "{{" in data or '{%' in data def _convert_bare_variable(self, variable): ''' Wraps a bare string, which may have an attribute portion (ie. foo.bar) in jinja2 variable braces so that it is evaluated properly. ''' if isinstance(variable, basestring): first_part = variable.split(".")[0].split("[")[0] if first_part in self._available_variables and '{{' not in variable and '$' not in variable: return "{{%s}}" % variable # the variable didn't meet the conditions to be converted, # so just return it as-is return variable def _finalize(self, thing): ''' A custom finalize method for jinja2, which prevents None from being returned ''' return thing if thing is not None else '' def _lookup(self, name, *args, **kwargs): instance = lookup_loader.get(name.lower(), loader=self._loader) if instance is not None: # safely catch run failures per #5059 try: ran = instance.run(*args, variables=self._available_variables, **kwargs) except AnsibleUndefinedVariable: raise except Exception, e: if self._fail_on_lookup_errors: raise ran = None if ran: ran = ",".join(ran) return ran else: raise AnsibleError("lookup plugin (%s) not found" % name) def _do_template(self, data, preserve_trailing_newlines=False): try: environment = Environment(trim_blocks=True, undefined=StrictUndefined, extensions=self._get_extensions(), finalize=self._finalize) environment.filters.update(self._get_filters()) environment.template_class = AnsibleJ2Template # FIXME: may not be required anymore, as the basedir stuff will # be handled by the loader? #if '_original_file' in vars: # basedir = os.path.dirname(vars['_original_file']) # filesdir = os.path.abspath(os.path.join(basedir, '..', 'files')) # if os.path.exists(filesdir): # basedir = filesdir try: t = environment.from_string(data) except TemplateSyntaxError, e: raise AnsibleError("template error while templating string: %s" % str(e)) except Exception, e: if 'recursion' in str(e): raise AnsibleError("recursive loop detected in template string: %s" % data) else: return data t.globals['lookup'] = self._lookup t.globals['finalize'] = self._finalize jvars = AnsibleJ2Vars(self, t.globals) new_context = t.new_context(jvars, shared=True) rf = t.root_render_func(new_context) try: res = j2_concat(rf) except TypeError, te: if 'StrictUndefined' in str(te): raise AnsibleUndefinedVariable( "Unable to look up a name or access an attribute in template string. " + \ "Make sure your variable name does not contain invalid characters like '-'." ) else: debug("failing because of a type error, template data is: %s" % data) raise AnsibleError("an unexpected type error occurred. Error was %s" % te) if preserve_trailing_newlines: # The low level calls above do not preserve the newline # characters at the end of the input data, so we use the # calculate the difference in newlines and append them # to the resulting output for parity res_newlines = self._count_newlines_from_end(res) data_newlines = self._count_newlines_from_end(data) if data_newlines > res_newlines: res += '\n' * (data_newlines - res_newlines) return res except (UndefinedError, AnsibleUndefinedVariable), e: if self._fail_on_undefined_errors: raise else: return data
unknown
codeparrot/codeparrot-clean
#ifndef __VSX__ #error "VSX is not supported" #endif #include <altivec.h> typedef __vector unsigned int v_uint32x4; int main(void) { v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16}; v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2}; v_uint32x4 v3 = vec_mod(v1, v2); return (int)vec_extractm(v3); }
c
github
https://github.com/numpy/numpy
numpy/_core/src/_simd/checks/cpu_vsx4.c
# coding:utf-8 ''' Created on 2018/1/15. @author: chk01 ''' from shuwei_fengge.practice_two.load_data.utils import * from PIL import Image, ImageDraw import tensorflow as tf import matplotlib.pyplot as plt def get_face_box(points): X = points[:, 0] Y = points[:, 1] min_x = min(X) max_x = max(X) min_y = min(Y) max_y = max(Y) wid = max(max_y - min_y, max_x - min_x) wid = 1.8 * wid new_x = min_x - (wid - (max_x - min_x)) // 2 new_y = min_y - (wid - (max_y - min_y)) // 2 p = 0.2 region = [new_x, new_y - p * wid, new_x + wid, new_y + (1 - p) * wid] return region, wid def main(): img_path = '7.jpg' image = Image.open(img_path).convert("L") points = get_landmark72(img_path) region, width = get_face_box(points) new_x = region[0] new_y = region[1] res = np.array(image.crop(region).resize([64, 64])) tt = np.squeeze(predict(res)).reshape(-1, 2) * width / 64 + [new_x, new_y] plt.scatter(points[:, 0], -points[:, 1]) plt.scatter(tt[:, 0], -tt[:, 1]) plt.axis('equal') plt.show() drawSurface = ImageDraw.Draw(image) landmark72 = tuple(tuple(t) for t in tt) rr = tuple(tuple(t) for t in points) drawSurface.line(rr[:13], fill=255, width=5) # drawSurface.polygon([landmark72[2:5],landmark72[-3]], fill=255) drawSurface.line(landmark72, fill=255,width=5) image.save(img_path.replace('.jpg', 'res.png')) image.show() def predict(trX): # file = '../data/face_top_9.mat' # data = scio.loadmat(file) tf.reset_default_graph() # graph saver = tf.train.import_meta_graph("save/model-2000-2.ckpt.meta") # value # a = tf.train.NewCheckpointReader('save/model.ckpt.index') # saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, "save/model-2000-2.ckpt") graph = tf.get_default_graph() predict_op = graph.get_tensor_by_name("output/BiasAdd:0") X = graph.get_tensor_by_name("Placeholder:0") # dp = graph.get_tensor_by_name("Placeholder_2:0") resY = predict_op.eval({X: trX.reshape(1, -1) / 255.}) # resY=[[31,10]] print(resY) # resY = [[14.34780979, 32.37727928, 17.39715767, 22.06736565, 23.70981216, # 17.21895123, 29.31753731, 16.67663288, 31.93413925, 14.36086273, # 48.92932129, 29.01085472, 45.96300888, 21.74747467, 42.84361649, # 17.86888313, 34.78334045, 14.6940918]] return resY if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import division import numpy as np import datetime from sunpy.time import parse_time, julian_day from sunpy.wcs import convert_hcc_hg, convert_hg_hcc from sunpy.sun import constants def pb0r(date, stereo=None, soho=False, arcsec=False): # Ejemplo: # >> from cjd_pylib import pb0r # >> A = pb0r.pb0r('2014-06-17T09:35:00') # {'p': -8.8133708, 'b0': 1.2762924, 'l0': 0.0, 'sd': 15.732692} """To calculate the solar P, B0 angles and the semi-diameter. Parameters ----------- date: a date/time object - the date/time specified in any CDS format stereo: { 'A' | 'B' | None } calculate the solar P, B0 angles and the semi-diameter from the point of view of either of the STEREO spacecraft. soho: { False | True } calculate the solar P, B0 angles and the semi-diameter from the point of view of the SOHO spacecraft. SOHO sits at the Lagrange L1 point which is about 1% closer to the Sun than the Earth. Implementation of this seems to require the ability to read SOHO orbit files. arcsec: { False | True } return the semi-diameter in arcseconds. Returns: ------- A dictionary with the following keys with the following meanings: p - Solar P (position angle of pole) (degrees) b0 - latitude of point at disk centre (degrees) sd - semi-diameter of the solar disk in arcminutes See Also -------- IDL code equavalent: http://hesperia.gsfc.nasa.gov/ssw/gen/idl/solar/pb0r.pro """ if (stereo is not None) and soho: raise ValueError("Cannot set STEREO and SOHO simultaneously") # place holder for STEREO calculation if stereo is not None: raise ValueError("STEREO solar P, B0 and semi-diameter calcution" + \ " is not supported.") # number of Julian days since 2415020.0 de = julian_day(date) - 2415020.0 # get the longitude of the sun etc. sun_position = sun_pos(date) longmed = sun_position["longitude"] #ra = sun_position["ra"] #dec = sun_position["dec"] appl = sun_position["app_long"] oblt = sun_position["obliq"] # form the aberrated longitude Lambda = longmed - (20.50 / 3600.0) # form longitude of ascending node of sun's equator on ecliptic node = 73.6666660 + (50.250 / 3600.0) * ((de / 365.250) + 50.0) arg = Lambda - node # calculate P, the position angle of the pole p = np.rad2deg(\ np.arctan(-np.tan(np.deg2rad(oblt)) * np.cos(np.deg2rad(appl))) + \ np.arctan(-0.127220 * np.cos(np.deg2rad(arg)))) # B0 the tilt of the axis... b = np.rad2deg(np.arcsin(0.12620 * np.sin(np.deg2rad(arg)))) # ... and the semi-diameter # Form the mean anomalies of Venus(MV),Earth(ME),Mars(MM),Jupiter(MJ) # and the mean elongation of the Moon from the Sun(D). t = de / 36525.0 mv = 212.60 + np.mod(58517.80 * t, 360.0) me = 358.4760 + np.mod(35999.04980 * t, 360.0) mm = 319.50 + np.mod(19139.860 * t, 360.0) mj = 225.30 + np.mod(3034.690 * t, 360.0) d = 350.70 + np.mod(445267.110 * t, 360.0) # Form the geocentric distance(r) and semi-diameter(sd) r = 1.0001410 - (0.0167480 - 0.00004180 * t) * np.cos(np.deg2rad(me)) \ - 0.000140 * np.cos(np.deg2rad(2.0 * me)) \ + 0.0000160 * np.cos(np.deg2rad(58.30 + 2.0 * mv - 2.0 * me)) \ + 0.0000050 * np.cos(np.deg2rad(209.10 + mv - me)) \ + 0.0000050 * np.cos(np.deg2rad(253.80 - 2.0 * mm + 2.0 * me)) \ + 0.0000160 * np.cos(np.deg2rad(89.50 - mj + me)) \ + 0.0000090 * np.cos(np.deg2rad(357.10 - 2.0 * mj + 2.0 * me)) \ + 0.0000310 * np.cos(np.deg2rad(d)) sd_const = constants.radius / constants.au sd = np.arcsin(sd_const / r) * 10800.0 / np.pi # place holder for SOHO correction if soho: raise ValueError("SOHO correction (on the order of 1% " + \ "since SOHO sets at L1) not yet supported.") if arcsec: return {"p": p, "b0": b, "sd": sd * 60.0} else: return {"p": p, "b0": b, "sd": sd, "l0": 0.0} def sun_pos(date, is_julian=False, since_2415020=False): """ Calculate solar ephemeris parameters. Allows for planetary and lunar perturbations in the calculation of solar longitude at date and various other solar positional parameters. This routine is a truncated version of Newcomb's Sun and is designed to give apparent angular coordinates (T.E.D) to a precision of one second of time. Parameters ----------- date: a date/time object or a fractional number of days since JD 2415020.0 is_julian: { False | True } notify this routine that the variable "date" is a Julian date (a floating point number) since_2415020: { False | True } notify this routine that the variable "date" has been corrected for the required time offset Returns: ------- A dictionary with the following keys with the following meanings: longitude - Longitude of sun for mean equinox of date (degs) ra - Apparent RA for true equinox of date (degs) dec - Apparent declination for true equinox of date (degs) app_long - Apparent longitude (degs) obliq - True obliquity (degs)longditude_delta: See Also -------- IDL code equavalent: http://hesperia.gsfc.nasa.gov/ssw/gen/idl/solar/sun_pos.pro Examples -------- >>> sp = sun_pos('2013-03-27') """ # check the time input if is_julian: # if a Julian date is being passed in if since_2415020: dd = date else: dd = date - 2415020.0 else: # parse the input time as a julian day if since_2415020: dd = julian_day(date) else: dd = julian_day(date) - 2415020.0 # form time in Julian centuries from 1900.0 t = dd / 36525.0 # form sun's mean longitude l = (279.6966780 + np.mod(36000.7689250 * t, 360.00)) * 3600.0 # allow for ellipticity of the orbit (equation of centre) using the Earth's # mean anomaly ME me = 358.4758440 + np.mod(35999.049750 * t, 360.0) ellcor = (6910.10 - 17.20 * t) * np.sin(np.deg2rad(me)) + \ 72.30 * np.sin(np.deg2rad(2.0 * me)) l = l + ellcor # allow for the Venus perturbations using the mean anomaly of Venus MV mv = 212.603219 + np.mod(58517.8038750 * t, 360.0) vencorr = 4.80 * np.cos(np.deg2rad(299.10170 + mv - me)) + \ 5.50 * np.cos(np.deg2rad(148.31330 + 2.0 * mv - 2.0 * me)) + \ 2.50 * np.cos(np.deg2rad(315.94330 + 2.0 * mv - 3.0 * me)) + \ 1.60 * np.cos(np.deg2rad(345.25330 + 3.0 * mv - 4.0 * me)) + \ 1.00 * np.cos(np.deg2rad(318.150 + 3.0 * mv - 5.0 * me)) l = l + vencorr # Allow for the Mars perturbations using the mean anomaly of Mars MM mm = 319.5294250 + np.mod(19139.858500 * t, 360.0) marscorr = 2.0 * np.cos(np.deg2rad(343.88830 - 2.0 * mm + 2.0 * me)) + \ 1.80 * np.cos(np.deg2rad(200.40170 - 2.0 * mm + me)) l = l + marscorr # Allow for the Jupiter perturbations using the mean anomaly of Jupiter MJ mj = 225.3283280 + np.mod(3034.69202390 * t, 360.00) jupcorr = 7.20 * np.cos(np.deg2rad(179.53170 - mj + me)) + \ 2.60 * np.cos(np.deg2rad(263.21670 - mj)) + \ 2.70 * np.cos(np.deg2rad(87.14500 - 2.0 * mj + 2.0 * me)) + \ 1.60 * np.cos(np.deg2rad(109.49330 - 2.0 * mj + me)) l = l + jupcorr # Allow for the Moons perturbations using the mean elongation of the Moon # from the Sun D d = 350.73768140 + np.mod(445267.114220 * t, 360.0) mooncorr = 6.50 * np.sin(np.deg2rad(d)) l = l + mooncorr # Note the original code is # longterm = + 6.4d0 * sin(( 231.19d0 + 20.20d0 * t )*!dtor) longterm = 6.40 * np.sin(np.deg2rad(231.190 + 20.20 * t)) l = l + longterm l = np.mod(l + 2592000.0, 1296000.0) longmed = l / 3600.0 # Allow for Aberration l = l - 20.5 # Allow for Nutation using the longitude of the Moons mean node OMEGA omega = 259.1832750 - np.mod(1934.1420080 * t, 360.0) l = l - 17.20 * np.sin(np.deg2rad(omega)) # Form the True Obliquity oblt = 23.4522940 - 0.01301250 * t + \ (9.20 * np.cos(np.deg2rad(omega))) / 3600.0 # Form Right Ascension and Declination l = l / 3600.0 ra = np.rad2deg(np.arctan2(np.sin(np.deg2rad(l)) * \ np.cos(np.deg2rad(oblt)), np.cos(np.deg2rad(l)))) if isinstance(ra, np.ndarray): ra[ra < 0.0] += 360.0 elif ra < 0.0: ra = ra + 360.0 dec = np.rad2deg(np.arcsin(np.sin(np.deg2rad(l)) * \ np.sin(np.deg2rad(oblt)))) # convert the internal variables to those listed in the top of the # comment section in this code and in the original IDL code. return {"longitude": longmed, "ra": ra, "dec": dec, "app_long": l, "obliq": oblt}
unknown
codeparrot/codeparrot-clean
import os, sys, array, json, math, cStringIO sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) import subresource class Image: """This class partially implements the interface of the PIL.Image.Image. One day in the future WPT might support the PIL module or another imaging library, so this hacky BMP implementation will no longer be required. """ def __init__(self, width, height): self.width = width self.height = height self.img = bytearray([0 for i in range(3 * width * height)]) @staticmethod def new(mode, size, color=0): return Image(size[0], size[1]) def _int_to_bytes(self, number): packed_bytes = [0, 0, 0, 0] for i in range(4): packed_bytes[i] = number & 0xFF number >>= 8 return packed_bytes def putdata(self, color_data): for y in range(self.height): for x in range(self.width): i = x + y * self.width if i > len(color_data) - 1: return self.img[i * 3: i * 3 + 3] = color_data[i][::-1] def save(self, f, type): assert type == "BMP" # 54 bytes of preambule + image color data. filesize = 54 + 3 * self.width * self.height; # 14 bytes of header. bmpfileheader = bytearray(['B', 'M'] + self._int_to_bytes(filesize) + [0, 0, 0, 0, 54, 0, 0, 0]) # 40 bytes of info. bmpinfoheader = bytearray([40, 0, 0, 0] + self._int_to_bytes(self.width) + self._int_to_bytes(self.height) + [1, 0, 24] + (25 * [0])) padlength = (4 - (self.width * 3) % 4) % 4 bmppad = bytearray([0, 0, 0]); padding = bmppad[0 : padlength] f.write(bmpfileheader) f.write(bmpinfoheader) for i in range(self.height): offset = self.width * (self.height - i - 1) * 3 f.write(self.img[offset : offset + 3 * self.width]) f.write(padding) def encode_string_as_bmp_image(string_data): data_bytes = array.array("B", string_data) num_bytes = len(data_bytes) # Convert data bytes to color data (RGB). color_data = [] num_components = 3 rgb = [0] * num_components i = 0 for byte in data_bytes: component_index = i % num_components rgb[component_index] = byte if component_index == (num_components - 1) or i == (num_bytes - 1): color_data.append(tuple(rgb)) rgb = [0] * num_components i += 1 # Render image. num_pixels = len(color_data) sqrt = int(math.ceil(math.sqrt(num_pixels))) img = Image.new("RGB", (sqrt, sqrt), "black") img.putdata(color_data) # Flush image to string. f = cStringIO.StringIO() img.save(f, "BMP") f.seek(0) return f.read() def generate_payload(server_data): data = ('{"headers": %(headers)s}') % server_data return encode_string_as_bmp_image(data) def main(request, response): subresource.respond(request, response, payload_generator = generate_payload, content_type = "image/bmp", access_control_allow_origin = "*")
unknown
codeparrot/codeparrot-clean
// Copyright 2021 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package etcdutl contains the main entry point for the etcdutl. package main import ( "time" "github.com/spf13/cobra" "go.etcd.io/etcd/etcdutl/v3/etcdutl" ) const ( cliName = "etcdutl" cliDescription = "An administrative command line tool for etcd3." ) var rootCmd = &cobra.Command{ Use: cliName, Short: cliDescription, SuggestFor: []string{"etcdutl"}, } func init() { rootCmd.PersistentFlags().DurationVar(&etcdutl.FlockTimeout, "timeout", 10*time.Second, "time to wait to obtain a file lock on db file, 0 to block indefinitely") rootCmd.PersistentFlags().StringVarP(&etcdutl.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)") rootCmd.RegisterFlagCompletionFunc("write-out", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { return []string{"fields", "json", "protobuf", "simple", "table"}, cobra.ShellCompDirectiveDefault }) rootCmd.AddCommand( etcdutl.NewDefragCommand(), etcdutl.NewSnapshotCommand(), etcdutl.NewHashKVCommand(), etcdutl.NewVersionCommand(), etcdutl.NewCompletionCommand(), etcdutl.NewMigrateCommand(), etcdutl.NewListBucketCommand(), etcdutl.NewIterateBucketCommand(), etcdutl.NewHashCommand(), ) } func Start() error { // Make help just show the usage rootCmd.SetHelpTemplate(`{{.UsageString}}`) return rootCmd.Execute() } func init() { cobra.EnablePrefixMatching = true }
go
github
https://github.com/etcd-io/etcd
etcdutl/ctl.go
{% extends "admin/base_site.html" %} {% load i18n %} {% block breadcrumbs %} <ol class="breadcrumbs"> <li><a href="{% url 'admin:index' %}">{% translate 'Home' %}</a></li> <li aria-current="page">{% translate 'Password reset' %}</li> </ol> {% endblock %} {% block content %} <p>{% translate 'We’ve emailed you instructions for setting your password, if an account exists with the email you entered. You should receive them shortly.' %}</p> <p>{% translate 'If you don’t receive an email, please make sure you’ve entered the address you registered with, and check your spam folder.' %}</p> {% endblock %}
html
github
https://github.com/django/django
django/contrib/admin/templates/registration/password_reset_done.html
from ngw.core.models import FIELD_EMAIL def normalize_name(name): ''' looks for upper case words, put them at the end ''' words = name.split(' ') lastname = [] while True: if len(words) == 0: break word = words[0] if word == word.upper(): lastname.append(word) del words[0] else: break return ' '.join(words + lastname) def parse_who_result(mailcontent): mailman_members = [] for line in mailcontent.split('\n'): if '@' not in line: continue line = line.strip() if ' ' in line: email, name = line.split(' ', 1) assert name[0] == '(' and name[-1] == ')', \ 'Invalid name, () not found on line '+line name = name[1:-1] else: email = line name = '' mailman_members.append((name, email)) return mailman_members def format_mailadd(name, email): if name: result = name + ' ' else: result = '' result += '<{}>'.format(email) return result def synchronise_group(cg, mailcontent): ''' takes a contact group returns a list of tupples: ('msg', unsubscribe_addr, subscribe_addr) ''' result = [] mailman_members = parse_who_result(mailcontent) for c in cg.get_all_members(): email_base = c.get_fieldvalue_by_id(FIELD_EMAIL) # FIXME name_base = c.name if name_base == email_base: name_base = '' mailman_names = [name for name, email in mailman_members if email == email_base] if not mailman_names: result.append(( (format_mailadd(c.name, email_base) + ' from database is not registered in mailman!'), None, format_mailadd(c.name, email_base))) else: mailman_name = mailman_names[0] if mailman_name != name_base: result.append(( ('<{}> is called "{}" in mailman but it should be "{}"' .format(email_base, mailman_name, name_base)), format_mailadd(mailman_name, email_base), format_mailadd(name_base, email_base))) mailman_members.remove((mailman_name, email_base)) for name, email in mailman_members: result.append(( (format_mailadd(name, email) + ' should not be registered in mailman.'), format_mailadd(name, email), None)) return result
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # the lib use python logging can get it if the following is set in your # Ansible config. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_log_syslogd_setting short_description: Global settings for remote syslog server in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to set and modify log_syslogd feature and setting category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip address. required: true username: description: - FortiOS or FortiGate username. required: true password: description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: true log_syslogd_setting: description: - Global settings for remote syslog server. default: null suboptions: certificate: description: - Certificate used to communicate with Syslog server. Source certificate.local.name. custom-field-name: description: - Custom field name for CEF format logging. suboptions: custom: description: - Field custom name. id: description: - Entry ID. required: true name: description: - Field name. enc-algorithm: description: - Enable/disable reliable syslogging with TLS encryption. choices: - high-medium - high - low - disable facility: description: - Remote syslog facility. choices: - kernel - user - mail - daemon - auth - syslog - lpr - news - uucp - cron - authpriv - ftp - ntp - audit - alert - clock - local0 - local1 - local2 - local3 - local4 - local5 - local6 - local7 format: description: - Log format. choices: - default - csv - cef mode: description: - Remote syslog logging over UDP/Reliable TCP. choices: - udp - legacy-reliable - reliable port: description: - Server listen port. server: description: - Address of remote syslog server. source-ip: description: - Source IP address of syslog. status: description: - Enable/disable remote syslog logging. choices: - enable - disable ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Global settings for remote syslog server. fortios_log_syslogd_setting: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" log_syslogd_setting: certificate: "<your_own_value> (source certificate.local.name)" custom-field-name: - custom: "<your_own_value>" id: "6" name: "default_name_7" enc-algorithm: "high-medium" facility: "kernel" format: "default" mode: "udp" port: "12" server: "192.168.100.40" source-ip: "84.230.14.43" status: "enable" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule fos = None def login(data): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_log_syslogd_setting_data(json): option_list = ['certificate', 'custom-field-name', 'enc-algorithm', 'facility', 'format', 'mode', 'port', 'server', 'source-ip', 'status'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def flatten_multilists_attributes(data): multilist_attrs = [] for attr in multilist_attrs: try: path = "data['" + "']['".join(elem for elem in attr) + "']" current_val = eval(path) flattened_val = ' '.join(elem for elem in current_val) exec(path + '= flattened_val') except BaseException: pass return data def log_syslogd_setting(data, fos): vdom = data['vdom'] log_syslogd_setting_data = data['log_syslogd_setting'] flattened_data = flatten_multilists_attributes(log_syslogd_setting_data) filtered_data = filter_log_syslogd_setting_data(flattened_data) return fos.set('log.syslogd', 'setting', data=filtered_data, vdom=vdom) def fortios_log_syslogd(data, fos): login(data) if data['log_syslogd_setting']: resp = log_syslogd_setting(data, fos) fos.logout() return not resp['status'] == "success", resp['status'] == "success", resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "log_syslogd_setting": { "required": False, "type": "dict", "options": { "certificate": {"required": False, "type": "str"}, "custom-field-name": {"required": False, "type": "list", "options": { "custom": {"required": False, "type": "str"}, "id": {"required": True, "type": "int"}, "name": {"required": False, "type": "str"} }}, "enc-algorithm": {"required": False, "type": "str", "choices": ["high-medium", "high", "low", "disable"]}, "facility": {"required": False, "type": "str", "choices": ["kernel", "user", "mail", "daemon", "auth", "syslog", "lpr", "news", "uucp", "cron", "authpriv", "ftp", "ntp", "audit", "alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]}, "format": {"required": False, "type": "str", "choices": ["default", "csv", "cef"]}, "mode": {"required": False, "type": "str", "choices": ["udp", "legacy-reliable", "reliable"]}, "port": {"required": False, "type": "int"}, "server": {"required": False, "type": "str"}, "source-ip": {"required": False, "type": "str"}, "status": {"required": False, "type": "str", "choices": ["enable", "disable"]} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") global fos fos = FortiOSAPI() is_error, has_changed, result = fortios_log_syslogd(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
/* * Copyright 2002-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.beans.factory.groovy; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import groovy.lang.Binding; import groovy.lang.Closure; import groovy.lang.GString; import groovy.lang.GroovyObject; import groovy.lang.GroovyObjectSupport; import groovy.lang.GroovyShell; import groovy.lang.GroovySystem; import groovy.lang.MetaClass; import org.codehaus.groovy.runtime.DefaultGroovyMethods; import org.codehaus.groovy.runtime.InvokerHelper; import org.jspecify.annotations.Nullable; import org.springframework.beans.MutablePropertyValues; import org.springframework.beans.factory.BeanDefinitionStoreException; import org.springframework.beans.factory.config.RuntimeBeanReference; import org.springframework.beans.factory.parsing.BeanDefinitionParsingException; import org.springframework.beans.factory.parsing.Location; import org.springframework.beans.factory.parsing.Problem; import org.springframework.beans.factory.support.AbstractBeanDefinition; import org.springframework.beans.factory.support.AbstractBeanDefinitionReader; import org.springframework.beans.factory.support.BeanDefinitionRegistry; import org.springframework.beans.factory.support.GenericBeanDefinition; import org.springframework.beans.factory.support.ManagedList; import org.springframework.beans.factory.support.ManagedMap; import org.springframework.beans.factory.xml.BeanDefinitionParserDelegate; import org.springframework.beans.factory.xml.NamespaceHandler; import org.springframework.beans.factory.xml.XmlBeanDefinitionReader; import org.springframework.beans.factory.xml.XmlReaderContext; import org.springframework.core.io.DescriptiveResource; import org.springframework.core.io.Resource; import org.springframework.core.io.support.EncodedResource; import org.springframework.util.Assert; import org.springframework.util.ObjectUtils; import org.springframework.util.StringUtils; /** * A Groovy-based reader for Spring bean definitions: like a Groovy builder, * but more of a DSL for Spring configuration. * * <p>This bean definition reader also understands XML bean definition files, * allowing for seamless mixing and matching with Groovy bean definition files. * * <p>Typically applied to a * {@link org.springframework.beans.factory.support.DefaultListableBeanFactory} * or a {@link org.springframework.context.support.GenericApplicationContext}, * but can be used against any {@link BeanDefinitionRegistry} implementation. * * <h3>Example Syntax</h3> * <pre class="code"> * import org.hibernate.SessionFactory * import org.apache.commons.dbcp.BasicDataSource * * def reader = new GroovyBeanDefinitionReader(myApplicationContext) * reader.beans { * dataSource(BasicDataSource) { // &lt;--- invokeMethod * driverClassName = "org.hsqldb.jdbcDriver" * url = "jdbc:hsqldb:mem:grailsDB" * username = "sa" // &lt;-- setProperty * password = "" * settings = [mynew:"setting"] * } * sessionFactory(SessionFactory) { * dataSource = dataSource // &lt;-- getProperty for retrieving references * } * myService(MyService) { * nestedBean = { AnotherBean bean -&gt; // &lt;-- setProperty with closure for nested bean * dataSource = dataSource * } * } * }</pre> * * <p>You can also load resources containing beans defined in a Groovy script using * either the {@link #loadBeanDefinitions(Resource...)} or * {@link #loadBeanDefinitions(String...)} method, with a script looking similar to * the following. * * <pre class="code"> * import org.hibernate.SessionFactory * import org.apache.commons.dbcp.BasicDataSource * * beans { * dataSource(BasicDataSource) { * driverClassName = "org.hsqldb.jdbcDriver" * url = "jdbc:hsqldb:mem:grailsDB" * username = "sa" * password = "" * settings = [mynew:"setting"] * } * sessionFactory(SessionFactory) { * dataSource = dataSource * } * myService(MyService) { * nestedBean = { AnotherBean bean -&gt; * dataSource = dataSource * } * } * }</pre> * * @author Jeff Brown * @author Graeme Rocher * @author Juergen Hoeller * @author Sam Brannen * @since 4.0 * @see BeanDefinitionRegistry * @see org.springframework.beans.factory.support.DefaultListableBeanFactory * @see org.springframework.context.support.GenericApplicationContext * @see org.springframework.context.support.GenericGroovyApplicationContext */ public class GroovyBeanDefinitionReader extends AbstractBeanDefinitionReader implements GroovyObject { /** * Standard {@code XmlBeanDefinitionReader} created with default * settings for loading bean definitions from XML files. */ private final XmlBeanDefinitionReader standardXmlBeanDefinitionReader; /** * Groovy DSL {@code XmlBeanDefinitionReader} for loading bean definitions * via the Groovy DSL, typically configured with XML validation disabled. */ private final XmlBeanDefinitionReader groovyDslXmlBeanDefinitionReader; private final Map<String, String> namespaces = new HashMap<>(); private final Map<String, DeferredProperty> deferredProperties = new HashMap<>(); private MetaClass metaClass = GroovySystem.getMetaClassRegistry().getMetaClass(getClass()); private @Nullable Binding binding; private @Nullable GroovyBeanDefinitionWrapper currentBeanDefinition; /** * Create a new {@code GroovyBeanDefinitionReader} for the given * {@link BeanDefinitionRegistry}. * @param registry the {@code BeanDefinitionRegistry} to load bean definitions into */ public GroovyBeanDefinitionReader(BeanDefinitionRegistry registry) { super(registry); this.standardXmlBeanDefinitionReader = new XmlBeanDefinitionReader(registry); this.groovyDslXmlBeanDefinitionReader = new XmlBeanDefinitionReader(registry); this.groovyDslXmlBeanDefinitionReader.setValidating(false); } /** * Create a new {@code GroovyBeanDefinitionReader} based on the given * {@link XmlBeanDefinitionReader}, loading bean definitions into its * {@code BeanDefinitionRegistry} and delegating Groovy DSL loading to it. * <p>The supplied {@code XmlBeanDefinitionReader} should typically * be pre-configured with XML validation disabled. * @param xmlBeanDefinitionReader the {@code XmlBeanDefinitionReader} to * derive the registry from and to delegate Groovy DSL loading to */ public GroovyBeanDefinitionReader(XmlBeanDefinitionReader xmlBeanDefinitionReader) { super(xmlBeanDefinitionReader.getRegistry()); this.standardXmlBeanDefinitionReader = new XmlBeanDefinitionReader(xmlBeanDefinitionReader.getRegistry()); this.groovyDslXmlBeanDefinitionReader = xmlBeanDefinitionReader; } @Override public void setMetaClass(MetaClass metaClass) { this.metaClass = metaClass; } @Override public MetaClass getMetaClass() { return this.metaClass; } /** * Set the binding, i.e. the Groovy variables available in the scope * of a {@code GroovyBeanDefinitionReader} closure. */ public void setBinding(Binding binding) { this.binding = binding; } /** * Return a specified binding for Groovy variables, if any. */ public @Nullable Binding getBinding() { return this.binding; } // TRADITIONAL BEAN DEFINITION READER METHODS /** * Load bean definitions from the specified Groovy script or XML file. * <p>Note that {@code ".xml"} files will be parsed as XML content; all other kinds * of resources will be parsed as Groovy scripts. * @param resource the resource descriptor for the Groovy script or XML file * @return the number of bean definitions found * @throws BeanDefinitionStoreException in case of loading or parsing errors */ @Override public int loadBeanDefinitions(Resource resource) throws BeanDefinitionStoreException { return loadBeanDefinitions(new EncodedResource(resource)); } /** * Load bean definitions from the specified Groovy script or XML file. * <p>Note that {@code ".xml"} files will be parsed as XML content; all other kinds * of resources will be parsed as Groovy scripts. * @param encodedResource the resource descriptor for the Groovy script or XML file, * allowing specification of an encoding to use for parsing the file * @return the number of bean definitions found * @throws BeanDefinitionStoreException in case of loading or parsing errors */ public int loadBeanDefinitions(EncodedResource encodedResource) throws BeanDefinitionStoreException { // Check for XML files and redirect them to the "standard" XmlBeanDefinitionReader String filename = encodedResource.getResource().getFilename(); if (StringUtils.endsWithIgnoreCase(filename, ".xml")) { return this.standardXmlBeanDefinitionReader.loadBeanDefinitions(encodedResource); } if (logger.isTraceEnabled()) { logger.trace("Loading Groovy bean definitions from " + encodedResource); } @SuppressWarnings("serial") Closure<Object> beans = new Closure<>(this) { @Override public @Nullable Object call(Object... args) { invokeBeanDefiningClosure((Closure<?>) args[0]); return null; } }; Binding binding = new Binding() { @Override public void setVariable(String name, Object value) { if (currentBeanDefinition != null) { applyPropertyToBeanDefinition(name, value); } else { super.setVariable(name, value); } } }; binding.setVariable("beans", beans); int countBefore = getRegistry().getBeanDefinitionCount(); try { GroovyShell shell = new GroovyShell(getBeanClassLoader(), binding); shell.evaluate(encodedResource.getReader(), "beans"); } catch (Throwable ex) { throw new BeanDefinitionParsingException(new Problem("Error evaluating Groovy script: " + ex.getMessage(), new Location(encodedResource.getResource()), null, ex)); } int count = getRegistry().getBeanDefinitionCount() - countBefore; if (logger.isDebugEnabled()) { logger.debug("Loaded " + count + " bean definitions from " + encodedResource); } return count; } // METHODS FOR CONSUMPTION IN A GROOVY CLOSURE /** * Defines a set of beans for the given block or closure. * @param closure the block or closure * @return this {@code GroovyBeanDefinitionReader} instance */ public GroovyBeanDefinitionReader beans(Closure<?> closure) { return invokeBeanDefiningClosure(closure); } /** * Define an inner bean definition. * @param type the bean type * @return the bean definition */ public GenericBeanDefinition bean(Class<?> type) { GenericBeanDefinition beanDefinition = new GenericBeanDefinition(); beanDefinition.setBeanClass(type); return beanDefinition; } /** * Define an inner bean definition. * @param type the bean type * @param args the constructors arguments and closure configurer * @return the bean definition */ public AbstractBeanDefinition bean(Class<?> type, Object...args) { GroovyBeanDefinitionWrapper current = this.currentBeanDefinition; try { Closure<?> callable = null; Collection<Object> constructorArgs = null; if (!ObjectUtils.isEmpty(args)) { int index = args.length; Object lastArg = args[index - 1]; if (lastArg instanceof Closure<?> closure) { callable = closure; index--; } constructorArgs = resolveConstructorArguments(args, 0, index); } this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(null, type, constructorArgs); if (callable != null) { callable.call(this.currentBeanDefinition); } return this.currentBeanDefinition.getBeanDefinition(); } finally { this.currentBeanDefinition = current; } } /** * Define a Spring XML namespace definition to use. * @param definition the namespace definition */ public void xmlns(Map<String, String> definition) { if (!definition.isEmpty()) { for (Map.Entry<String,String> entry : definition.entrySet()) { String namespace = entry.getKey(); String uri = entry.getValue(); if (uri == null) { throw new IllegalArgumentException("Namespace definition must supply a non-null URI"); } NamespaceHandler namespaceHandler = this.groovyDslXmlBeanDefinitionReader.getNamespaceHandlerResolver().resolve(uri); if (namespaceHandler == null) { throw new BeanDefinitionParsingException(new Problem("No namespace handler found for URI: " + uri, new Location(new DescriptiveResource(("Groovy"))))); } this.namespaces.put(namespace, uri); } } } /** * Import Spring bean definitions from either XML or Groovy sources into the * current bean builder instance. * @param resourcePattern the resource pattern */ public void importBeans(String resourcePattern) throws IOException { loadBeanDefinitions(resourcePattern); } // INTERNAL HANDLING OF GROOVY CLOSURES AND PROPERTIES /** * This method overrides method invocation to create beans for each method name that * takes a class argument. */ @Override public Object invokeMethod(String name, Object arg) { Object[] args = (Object[])arg; if ("beans".equals(name) && args.length == 1 && args[0] instanceof Closure<?> closure) { return beans(closure); } else if ("ref".equals(name)) { String refName; if (args[0] == null) { throw new IllegalArgumentException("Argument to ref() is not a valid bean or was not found"); } if (args[0] instanceof RuntimeBeanReference runtimeBeanReference) { refName = runtimeBeanReference.getBeanName(); } else { refName = args[0].toString(); } boolean parentRef = false; if (args.length > 1 && args[1] instanceof Boolean bool) { parentRef = bool; } return new RuntimeBeanReference(refName, parentRef); } else if (this.namespaces.containsKey(name) && args.length > 0 && args[0] instanceof Closure) { GroovyDynamicElementReader reader = createDynamicElementReader(name); reader.invokeMethod("doCall", args); } else if (args.length > 0 && args[0] instanceof Closure) { // abstract bean definition return invokeBeanDefiningMethod(name, args); } else if (args.length > 0 && (args[0] instanceof Class || args[0] instanceof RuntimeBeanReference || args[0] instanceof Map)) { return invokeBeanDefiningMethod(name, args); } else if (args.length > 1 && args[args.length -1] instanceof Closure) { return invokeBeanDefiningMethod(name, args); } MetaClass mc = DefaultGroovyMethods.getMetaClass(getRegistry()); if (!mc.respondsTo(getRegistry(), name, args).isEmpty()){ return mc.invokeMethod(getRegistry(), name, args); } return this; } private boolean addDeferredProperty(String property, Object newValue) { if (newValue instanceof List || newValue instanceof Map) { Assert.state(this.currentBeanDefinition != null, "No current bean definition set"); this.deferredProperties.put(this.currentBeanDefinition.getBeanName() + '.' + property, new DeferredProperty(this.currentBeanDefinition, property, newValue)); return true; } return false; } private void finalizeDeferredProperties() { for (DeferredProperty dp : this.deferredProperties.values()) { if (dp.value instanceof List<?> list) { dp.value = manageListIfNecessary(list); } else if (dp.value instanceof Map<?, ?> map) { dp.value = manageMapIfNecessary(map); } dp.apply(); } this.deferredProperties.clear(); } /** * When a method argument is only a closure it is a set of bean definitions. * @param callable the closure argument * @return this {@code GroovyBeanDefinitionReader} instance */ protected GroovyBeanDefinitionReader invokeBeanDefiningClosure(Closure<?> callable) { callable.setDelegate(this); callable.call(); finalizeDeferredProperties(); return this; } /** * This method is called when a bean definition node is called. * @param beanName the name of the bean to define * @param args the arguments to the bean. The first argument is the class name, the last * argument is sometimes a closure. All the arguments in between are constructor arguments. * @return the bean definition wrapper */ private GroovyBeanDefinitionWrapper invokeBeanDefiningMethod(String beanName, Object[] args) { boolean hasClosureArgument = (args[args.length - 1] instanceof Closure); if (args[0] instanceof Class<?> beanClass) { if (hasClosureArgument) { if (args.length - 1 != 1) { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper( beanName, beanClass, resolveConstructorArguments(args, 1, args.length - 1)); } else { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName, beanClass); } } else { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper( beanName, beanClass, resolveConstructorArguments(args, 1, args.length)); } } else if (args[0] instanceof RuntimeBeanReference runtimeBeanReference) { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName); this.currentBeanDefinition.getBeanDefinition().setFactoryBeanName(runtimeBeanReference.getBeanName()); } else if (args[0] instanceof Map<?, ?> namedArgs) { // named constructor arguments if (args.length > 1 && args[1] instanceof Class<?> clazz) { List<Object> constructorArgs = resolveConstructorArguments(args, 2, (hasClosureArgument ? args.length - 1 : args.length)); this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName, clazz, constructorArgs); for (Map.Entry<?, ?> entity : namedArgs.entrySet()) { String propName = (String) entity.getKey(); setProperty(propName, entity.getValue()); } } // factory method syntax else { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName); // First arg is the map containing factoryBean : factoryMethod Map.Entry<?, ?> factoryBeanEntry = namedArgs.entrySet().iterator().next(); // If we have a closure body, that will be the last argument. // In between are the constructor args int constructorArgsTest = (hasClosureArgument ? 2 : 1); // If we have more than this number of args, we have constructor args if (args.length > constructorArgsTest){ // factory-method requires args int endOfConstructArgs = (hasClosureArgument ? args.length - 1 : args.length); this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName, null, resolveConstructorArguments(args, 1, endOfConstructArgs)); } else { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName); } this.currentBeanDefinition.getBeanDefinition().setFactoryBeanName(factoryBeanEntry.getKey().toString()); this.currentBeanDefinition.getBeanDefinition().setFactoryMethodName(factoryBeanEntry.getValue().toString()); } } else if (args[0] instanceof Closure) { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName); this.currentBeanDefinition.getBeanDefinition().setAbstract(true); } else { List<Object> constructorArgs = resolveConstructorArguments(args, 0, (hasClosureArgument ? args.length - 1 : args.length)); this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(beanName, null, constructorArgs); } if (hasClosureArgument) { Closure<?> callable = (Closure<?>) args[args.length - 1]; callable.setDelegate(this); callable.setResolveStrategy(Closure.DELEGATE_FIRST); callable.call(this.currentBeanDefinition); } GroovyBeanDefinitionWrapper beanDefinition = this.currentBeanDefinition; this.currentBeanDefinition = null; beanDefinition.getBeanDefinition().setAttribute(GroovyBeanDefinitionWrapper.class.getName(), beanDefinition); getRegistry().registerBeanDefinition(beanName, beanDefinition.getBeanDefinition()); return beanDefinition; } protected List<Object> resolveConstructorArguments(Object[] args, int start, int end) { Object[] constructorArgs = Arrays.copyOfRange(args, start, end); for (int i = 0; i < constructorArgs.length; i++) { if (constructorArgs[i] instanceof GString) { constructorArgs[i] = constructorArgs[i].toString(); } else if (constructorArgs[i] instanceof List<?> list) { constructorArgs[i] = manageListIfNecessary(list); } else if (constructorArgs[i] instanceof Map<?, ?> map){ constructorArgs[i] = manageMapIfNecessary(map); } } return List.of(constructorArgs); } /** * Checks whether there are any {@link RuntimeBeanReference RuntimeBeanReferences} * inside the {@link Map} and converts it to a {@link ManagedMap} if necessary. * @param map the original Map * @return either the original map or a managed copy of it */ private Object manageMapIfNecessary(Map<?, ?> map) { boolean containsRuntimeRefs = false; for (Object element : map.values()) { if (element instanceof RuntimeBeanReference) { containsRuntimeRefs = true; break; } } if (containsRuntimeRefs) { Map<Object, Object> managedMap = new ManagedMap<>(); managedMap.putAll(map); return managedMap; } return map; } /** * Checks whether there are any {@link RuntimeBeanReference RuntimeBeanReferences} * inside the {@link List} and converts it to a {@link ManagedList} if necessary. * @param list the original List * @return either the original list or a managed copy of it */ private Object manageListIfNecessary(List<?> list) { boolean containsRuntimeRefs = false; for (Object element : list) { if (element instanceof RuntimeBeanReference) { containsRuntimeRefs = true; break; } } if (containsRuntimeRefs) { List<Object> managedList = new ManagedList<>(); managedList.addAll(list); return managedList; } return list; } /** * This method overrides property setting in the scope of the {@code GroovyBeanDefinitionReader} * to set properties on the current bean definition. */ @Override public void setProperty(String name, Object value) { if (this.currentBeanDefinition != null) { applyPropertyToBeanDefinition(name, value); } } protected void applyPropertyToBeanDefinition(String name, Object value) { if (value instanceof GString) { value = value.toString(); } if (addDeferredProperty(name, value)) { return; } else if (value instanceof Closure<?> callable) { GroovyBeanDefinitionWrapper current = this.currentBeanDefinition; try { Class<?> parameterType = callable.getParameterTypes()[0]; if (Object.class == parameterType) { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(""); callable.call(this.currentBeanDefinition); } else { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(null, parameterType); callable.call((Object) null); } value = this.currentBeanDefinition.getBeanDefinition(); } finally { this.currentBeanDefinition = current; } } Assert.state(this.currentBeanDefinition != null, "No current bean definition set"); this.currentBeanDefinition.addProperty(name, value); } /** * This method overrides property retrieval in the scope of the * {@code GroovyBeanDefinitionReader}. A property retrieval will either: * <ul> * <li>Retrieve a variable from the bean builder's binding if it exists * <li>Retrieve a RuntimeBeanReference for a specific bean if it exists * <li>Otherwise just delegate to MetaClass.getProperty which will resolve * properties from the {@code GroovyBeanDefinitionReader} itself * </ul> */ @Override public @Nullable Object getProperty(String name) { Binding binding = getBinding(); if (binding != null && binding.hasVariable(name)) { return binding.getVariable(name); } else { if (this.namespaces.containsKey(name)) { return createDynamicElementReader(name); } if (getRegistry().containsBeanDefinition(name)) { GroovyBeanDefinitionWrapper beanDefinition = (GroovyBeanDefinitionWrapper) getRegistry().getBeanDefinition(name).getAttribute(GroovyBeanDefinitionWrapper.class.getName()); if (beanDefinition != null) { return new GroovyRuntimeBeanReference(name, beanDefinition, false); } else { return new RuntimeBeanReference(name, false); } } // This is to deal with the case where the property setter is the last // statement in a closure (hence the return value) else if (this.currentBeanDefinition != null) { MutablePropertyValues pvs = this.currentBeanDefinition.getBeanDefinition().getPropertyValues(); if (pvs.contains(name)) { return pvs.get(name); } else { DeferredProperty dp = this.deferredProperties.get(this.currentBeanDefinition.getBeanName() + name); if (dp != null) { return dp.value; } else { return getMetaClass().getProperty(this, name); } } } else { return getMetaClass().getProperty(this, name); } } } @SuppressWarnings("NullAway") // Dataflow analysis limitation private GroovyDynamicElementReader createDynamicElementReader(String namespace) { XmlReaderContext readerContext = this.groovyDslXmlBeanDefinitionReader.createReaderContext( new DescriptiveResource("Groovy")); BeanDefinitionParserDelegate delegate = new BeanDefinitionParserDelegate(readerContext); boolean decorating = (this.currentBeanDefinition != null); if (!decorating) { this.currentBeanDefinition = new GroovyBeanDefinitionWrapper(namespace); } return new GroovyDynamicElementReader(namespace, this.namespaces, delegate, this.currentBeanDefinition, decorating) { @Override protected void afterInvocation() { if (!this.decorating) { currentBeanDefinition = null; } } }; } /** * This class is used to defer the adding of a property to a bean definition * until later. This is for a case where you assign a property to a list that * may not contain bean references at that point of assignment, but may later; * hence, it would need to be managed. */ private static class DeferredProperty { private final GroovyBeanDefinitionWrapper beanDefinition; private final String name; public @Nullable Object value; public DeferredProperty(GroovyBeanDefinitionWrapper beanDefinition, String name, @Nullable Object value) { this.beanDefinition = beanDefinition; this.name = name; this.value = value; } public void apply() { this.beanDefinition.addProperty(this.name, this.value); } } /** * A RuntimeBeanReference that takes care of adding new properties to runtime references. */ private class GroovyRuntimeBeanReference extends RuntimeBeanReference implements GroovyObject { private final GroovyBeanDefinitionWrapper beanDefinition; private MetaClass metaClass; public GroovyRuntimeBeanReference(String beanName, GroovyBeanDefinitionWrapper beanDefinition, boolean toParent) { super(beanName, toParent); this.beanDefinition = beanDefinition; this.metaClass = InvokerHelper.getMetaClass(this); } @Override public MetaClass getMetaClass() { return this.metaClass; } @Override public @Nullable Object getProperty(String property) { if (property.equals("beanName")) { return getBeanName(); } else if (property.equals("source")) { return getSource(); } else { return new GroovyPropertyValue( property, this.beanDefinition.getBeanDefinition().getPropertyValues().get(property)); } } @Override public Object invokeMethod(String name, Object args) { return this.metaClass.invokeMethod(this, name, args); } @Override public void setMetaClass(MetaClass metaClass) { this.metaClass = metaClass; } @Override public void setProperty(String property, Object newValue) { if (!addDeferredProperty(property, newValue)) { this.beanDefinition.getBeanDefinition().getPropertyValues().add(property, newValue); } } /** * Wraps a bean definition property and ensures that any RuntimeBeanReference * additions to it are deferred for resolution later. */ private class GroovyPropertyValue extends GroovyObjectSupport { private final String propertyName; private final @Nullable Object propertyValue; public GroovyPropertyValue(String propertyName, @Nullable Object propertyValue) { this.propertyName = propertyName; this.propertyValue = propertyValue; } @SuppressWarnings("unused") public void leftShift(Object value) { InvokerHelper.invokeMethod(this.propertyValue, "leftShift", value); updateDeferredProperties(value); } @SuppressWarnings("unused") public boolean add(Object value) { boolean retVal = (Boolean) InvokerHelper.invokeMethod(this.propertyValue, "add", value); updateDeferredProperties(value); return retVal; } @SuppressWarnings("unused") public boolean addAll(Collection<?> values) { boolean retVal = (Boolean) InvokerHelper.invokeMethod(this.propertyValue, "addAll", values); for (Object value : values) { updateDeferredProperties(value); } return retVal; } @Override public Object invokeMethod(String name, Object args) { return InvokerHelper.invokeMethod(this.propertyValue, name, args); } @Override public Object getProperty(String name) { return InvokerHelper.getProperty(this.propertyValue, name); } @Override public void setProperty(String name, Object value) { InvokerHelper.setProperty(this.propertyValue, name, value); } private void updateDeferredProperties(Object value) { if (value instanceof RuntimeBeanReference) { deferredProperties.put(beanDefinition.getBeanName(), new DeferredProperty(beanDefinition, this.propertyName, this.propertyValue)); } } } } }
java
github
https://github.com/spring-projects/spring-framework
spring-beans/src/main/java/org/springframework/beans/factory/groovy/GroovyBeanDefinitionReader.java
#ifndef BOOST_ARCHIVE_XML_WIARCHIVE_HPP #define BOOST_ARCHIVE_XML_WIARCHIVE_HPP // MS compatible compilers support #pragma once #if defined(_MSC_VER) # pragma once #endif /////////1/////////2/////////3/////////4/////////5/////////6/////////7/////////8 // xml_wiarchive.hpp // (C) Copyright 2002 Robert Ramey - http://www.rrsd.com . // Use, modification and distribution is subject to the Boost Software // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org for updates, documentation, and revision history. #include <boost/config.hpp> #ifdef BOOST_NO_STD_WSTREAMBUF #error "wide char i/o not supported on this platform" #else #include <istream> #include <boost/smart_ptr/scoped_ptr.hpp> #include <boost/archive/detail/auto_link_warchive.hpp> #include <boost/archive/basic_text_iprimitive.hpp> #include <boost/archive/basic_xml_iarchive.hpp> #include <boost/archive/detail/register_archive.hpp> #include <boost/serialization/item_version_type.hpp> #include <boost/archive/detail/abi_prefix.hpp> // must be the last header #ifdef BOOST_MSVC # pragma warning(push) # pragma warning(disable : 4511 4512) #endif namespace boost { namespace archive { namespace detail { template<class Archive> class interface_iarchive; } // namespace detail template<class CharType> class basic_xml_grammar; typedef basic_xml_grammar<wchar_t> xml_wgrammar; template<class Archive> class BOOST_SYMBOL_VISIBLE xml_wiarchive_impl : public basic_text_iprimitive<std::wistream>, public basic_xml_iarchive<Archive> { #ifdef BOOST_NO_MEMBER_TEMPLATE_FRIENDS public: #else protected: friend class detail::interface_iarchive<Archive>; friend class basic_xml_iarchive<Archive>; friend class load_access; #endif std::locale archive_locale; boost::scoped_ptr<xml_wgrammar> gimpl; std::wistream & get_is(){ return is; } template<class T> void load(T & t){ basic_text_iprimitive<std::wistream>::load(t); } void load(version_type & t){ unsigned int v; load(v); t = version_type(v); } void load(boost::serialization::item_version_type & t){ unsigned int v; load(v); t = boost::serialization::item_version_type(v); } BOOST_WARCHIVE_DECL void load(char * t); #ifndef BOOST_NO_INTRINSIC_WCHAR_T BOOST_WARCHIVE_DECL void load(wchar_t * t); #endif BOOST_WARCHIVE_DECL void load(std::string &s); #ifndef BOOST_NO_STD_WSTRING BOOST_WARCHIVE_DECL void load(std::wstring &ws); #endif template<class T> void load_override(T & t){ basic_xml_iarchive<Archive>::load_override(t); } BOOST_WARCHIVE_DECL void load_override(class_name_type & t); BOOST_WARCHIVE_DECL void init(); BOOST_WARCHIVE_DECL xml_wiarchive_impl(std::wistream & is, unsigned int flags); BOOST_WARCHIVE_DECL ~xml_wiarchive_impl() BOOST_OVERRIDE; }; } // namespace archive } // namespace boost #ifdef BOOST_MSVC # pragma warning(pop) #endif #include <boost/archive/detail/abi_suffix.hpp> // pops abi_suffix.hpp pragmas #ifdef BOOST_MSVC # pragma warning(push) # pragma warning(disable : 4511 4512) #endif namespace boost { namespace archive { class BOOST_SYMBOL_VISIBLE xml_wiarchive : public xml_wiarchive_impl<xml_wiarchive>{ public: xml_wiarchive(std::wistream & is, unsigned int flags = 0) : xml_wiarchive_impl<xml_wiarchive>(is, flags) { if(0 == (flags & no_header)) init(); } ~xml_wiarchive() BOOST_OVERRIDE {} }; } // namespace archive } // namespace boost // required by export BOOST_SERIALIZATION_REGISTER_ARCHIVE(boost::archive::xml_wiarchive) #ifdef BOOST_MSVC #pragma warning(pop) #endif #endif // BOOST_NO_STD_WSTREAMBUF #endif // BOOST_ARCHIVE_XML_WIARCHIVE_HPP
unknown
github
https://github.com/mysql/mysql-server
extra/boost/boost_1_87_0/boost/archive/xml_wiarchive.hpp
""" TinyURL is a URL shortening service where you enter a URL such as https://leetcode.com/problems/design-tinyurl and it returns a short URL such as http://tinyurl.com/4e9iAk. Design the encode and decode methods for the TinyURL service. There is no restriction on how your encode/decode algorithm should work. You just need to ensure that a URL can be encoded to a tiny URL and the tiny URL can be decoded to the original URL. """ class Codec: def __init__(self): self.chartable = {i : str(i) for i in range(10)} for i in range(10, 36): self.chartable[i] = chr(55 + i) for i in range(36, 62): self.chartable[i] = chr(61 + i) self.urltable= {} def encode(self, longUrl): """Encodes a URL to a shortened URL. :type longUrl: str :rtype: str """ #shadec = int(hashlib.sha256(longUrl.encode()).hexdigest(), 16) shadec = hash(longUrl) idx = [] while len(idx) < 6: shadec, digit = divmod(shadec, 62) idx.append(self.chartable[digit]) urlkey = "".join(idx[::-1]) self.urltable[urlkey] = longUrl return "http://tinyurl.com/" + urlkey def decode(self, shortUrl): """Decodes a shortened URL to its original URL. :type shortUrl: str :rtype: str """ urlkey = shortUrl.split('/')[-1] return self.urltable[urlkey] # Your Codec object will be instantiated and called as such: # codec = Codec() # codec.decode(codec.encode(url)) import hashlib a = Codec() print(a.decode(a.encode("www.google.com"))) print(a.decode(a.encode("www.facebook.com"))) print(a.decode(a.encode("www.amazon.com")))
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Python 2/3 compatibility from __future__ import print_function import numpy as np from numpy import pi, sin, cos import cv2 defaultSize = 512 class TestSceneRender(): def __init__(self, bgImg = None, fgImg = None, deformation = False, noise = 0.0, speed = 0.25, **params): self.time = 0.0 self.timeStep = 1.0 / 30.0 self.foreground = fgImg self.deformation = deformation self.noise = noise self.speed = speed if bgImg is not None: self.sceneBg = bgImg.copy() else: self.sceneBg = np.zeros(defaultSize, defaultSize, np.uint8) self.w = self.sceneBg.shape[0] self.h = self.sceneBg.shape[1] if fgImg is not None: self.foreground = fgImg.copy() self.center = self.currentCenter = (int(self.w/2 - fgImg.shape[0]/2), int(self.h/2 - fgImg.shape[1]/2)) self.xAmpl = self.sceneBg.shape[0] - (self.center[0] + fgImg.shape[0]) self.yAmpl = self.sceneBg.shape[1] - (self.center[1] + fgImg.shape[1]) self.initialRect = np.array([ (self.h/2, self.w/2), (self.h/2, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2 + self.w/10), (self.h/2 + self.h/10, self.w/2)]).astype(int) self.currentRect = self.initialRect np.random.seed(10) def getXOffset(self, time): return int(self.xAmpl*cos(time*self.speed)) def getYOffset(self, time): return int(self.yAmpl*sin(time*self.speed)) def setInitialRect(self, rect): self.initialRect = rect def getRectInTime(self, time): if self.foreground is not None: tmp = np.array(self.center) + np.array((self.getXOffset(time), self.getYOffset(time))) x0, y0 = tmp x1, y1 = tmp + self.foreground.shape[0:2] return np.array([y0, x0, y1, x1]) else: x0, y0 = self.initialRect[0] + np.array((self.getXOffset(time), self.getYOffset(time))) x1, y1 = self.initialRect[2] + np.array((self.getXOffset(time), self.getYOffset(time))) return np.array([y0, x0, y1, x1]) def getCurrentRect(self): if self.foreground is not None: x0 = self.currentCenter[0] y0 = self.currentCenter[1] x1 = self.currentCenter[0] + self.foreground.shape[0] y1 = self.currentCenter[1] + self.foreground.shape[1] return np.array([y0, x0, y1, x1]) else: x0, y0 = self.currentRect[0] x1, y1 = self.currentRect[2] return np.array([x0, y0, x1, y1]) def getNextFrame(self): img = self.sceneBg.copy() if self.foreground is not None: self.currentCenter = (self.center[0] + self.getXOffset(self.time), self.center[1] + self.getYOffset(self.time)) img[self.currentCenter[0]:self.currentCenter[0]+self.foreground.shape[0], self.currentCenter[1]:self.currentCenter[1]+self.foreground.shape[1]] = self.foreground else: self.currentRect = self.initialRect + np.int( 30*cos(self.time) + 50*sin(self.time/3)) if self.deformation: self.currentRect[1:3] += int(self.h/20*cos(self.time)) cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255)) self.time += self.timeStep if self.noise: noise = np.zeros(self.sceneBg.shape, np.int8) cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) img = cv2.add(img, noise, dtype=cv2.CV_8UC3) return img def resetTime(self): self.time = 0.0 if __name__ == '__main__': backGr = cv2.imread('../../../samples/data/lena.jpg') render = TestSceneRender(backGr, noise = 0.5) while True: img = render.getNextFrame() cv2.imshow('img', img) ch = cv2.waitKey(3) if ch == 27: break cv2.destroyAllWindows()
unknown
codeparrot/codeparrot-clean
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) /* * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, * whether or not the build options for those features are specified. * Therefore, we must test #definitions of CPU features when option native/host * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise * the test will be broken and leads to enable all possible features. */ #if !defined(__SSE__) || !defined(__SSE2__) || !defined(__SSE3__) || \ !defined(__SSSE3__) || !defined(__SSE4_1__) || !defined(__SSE4_2__) || !defined(__POPCNT__) #error HOST/ARCH does not support x86_v2 #endif #endif #include <xmmintrin.h> // SSE #include <emmintrin.h> // SSE2 #include <pmmintrin.h> // SSE3 #include <tmmintrin.h> // SSSE3 #include <smmintrin.h> // SSE4.1 #ifdef _MSC_VER #include <nmmintrin.h> // SSE4.2 and POPCNT for MSVC #else #include <nmmintrin.h> // SSE4.2 #include <popcntintrin.h> // POPCNT #endif int main(int argc, char **argv) { // to prevent optimization int seed = (int)argv[argc-1][0]; volatile int result = 0; // SSE test __m128 a = _mm_set1_ps((float)seed); __m128 b = _mm_set1_ps(2.0f); __m128 c = _mm_add_ps(a, b); result += (int)_mm_cvtss_f32(c); // SSE2 test __m128i ai = _mm_set1_epi32(seed); __m128i bi = _mm_set1_epi32(2); __m128i ci = _mm_add_epi32(ai, bi); result += _mm_cvtsi128_si32(ci); // SSE3 test __m128 d = _mm_movehdup_ps(a); result += (int)_mm_cvtss_f32(d); // SSSE3 test __m128i di = _mm_abs_epi16(_mm_set1_epi16((short)seed)); result += _mm_cvtsi128_si32(di); // SSE4.1 test __m128i ei = _mm_max_epi32(ai, bi); result += _mm_cvtsi128_si32(ei); // SSE4.2 test __m128i str1 = _mm_set1_epi8((char)seed); __m128i str2 = _mm_set1_epi8((char)(seed + 1)); int res4_2 = _mm_cmpestra(str1, 4, str2, 4, 0); result += res4_2; // POPCNT test unsigned int test_val = (unsigned int)seed | 0x01234567; int pcnt = _mm_popcnt_u32(test_val); result += pcnt; return result; }
c
github
https://github.com/numpy/numpy
meson_cpu/x86/test_x86_v2.c
# frozen_string_literal: true module Arel # :nodoc: all class SelectManager < Arel::TreeManager include Arel::Crud STRING_OR_SYMBOL_CLASS = [Symbol, String] def initialize(table = nil) super @ast = Nodes::SelectStatement.new(table) @ctx = @ast.cores.last end def initialize_copy(other) super @ctx = @ast.cores.last end def limit @ast.limit && @ast.limit.expr end alias :taken :limit def constraints @ctx.wheres end def offset @ast.offset && @ast.offset.expr end def skip(amount) if amount @ast.offset = Nodes::Offset.new(amount) else @ast.offset = nil end self end alias :offset= :skip ### # Produces an Arel::Nodes::Exists node def exists Arel::Nodes::Exists.new @ast end def as(other) create_table_alias grouping(@ast), Nodes::SqlLiteral.new(other, retryable: true) end def lock(locking = Arel.sql("FOR UPDATE")) case locking when true locking = Arel.sql("FOR UPDATE") when Arel::Nodes::SqlLiteral when String locking = Arel.sql locking end @ast.lock = Nodes::Lock.new(locking) self end def locked @ast.lock end def on(*exprs) @ctx.source.right.last.right = Nodes::On.new(collapse(exprs)) self end def group(*columns) columns.each do |column| # FIXME: backwards compat case column when Nodes::SqlLiteral when String column = Nodes::SqlLiteral.new(column) when Symbol column = Nodes::SqlLiteral.new(column.name) end @ctx.groups.push Nodes::Group.new column end self end def from(table) table = Nodes::SqlLiteral.new(table) if String === table case table when Nodes::Join @ctx.source.right << table else @ctx.source.left = table end self end def froms @ast.cores.filter_map { |x| x.from } end def join(relation, klass = Nodes::InnerJoin) return self unless relation case relation when String, Nodes::SqlLiteral raise EmptyJoinError if relation.empty? klass = Nodes::StringJoin end @ctx.source.right << create_join(relation, nil, klass) self end def outer_join(relation) join(relation, Nodes::OuterJoin) end def having(expr) @ctx.havings << expr self end def window(name) window = Nodes::NamedWindow.new(name) @ctx.windows.push window window end def project(*projections) # FIXME: converting these to SQLLiterals is probably not good, but # rails tests require it. @ctx.projections.concat projections.map { |x| STRING_OR_SYMBOL_CLASS.include?(x.class) ? Nodes::SqlLiteral.new(x.to_s) : x } self end def projections @ctx.projections end def projections=(projections) @ctx.projections = projections end def optimizer_hints(*hints) unless hints.empty? @ctx.optimizer_hints = Arel::Nodes::OptimizerHints.new(hints) end self end def distinct(value = true) if value @ctx.set_quantifier = Arel::Nodes::Distinct.new else @ctx.set_quantifier = nil end self end def distinct_on(value) if value @ctx.set_quantifier = Arel::Nodes::DistinctOn.new(value) else @ctx.set_quantifier = nil end self end def order(*expr) # FIXME: We SHOULD NOT be converting these to SqlLiteral automatically @ast.orders.concat expr.map { |x| STRING_OR_SYMBOL_CLASS.include?(x.class) ? Nodes::SqlLiteral.new(x.to_s) : x } self end def orders @ast.orders end def where(expr) if Arel::TreeManager === expr expr = expr.ast end @ctx.wheres << expr self end def where_sql(engine = Table.engine) return if @ctx.wheres.empty? Nodes::SqlLiteral.new("WHERE #{Nodes::And.new(@ctx.wheres).to_sql(engine)}") end def union(operation, other = nil) if other node_class = Nodes.const_get("Union#{operation.to_s.capitalize}") else other = operation node_class = Nodes::Union end node_class.new self.ast, other.ast end def intersect(other) Nodes::Intersect.new ast, other.ast end def except(other) Nodes::Except.new ast, other.ast end alias :minus :except def lateral(table_name = nil) base = table_name.nil? ? ast : as(table_name) Nodes::Lateral.new(base) end def with(*subqueries) if subqueries.first.is_a? Symbol node_class = Nodes.const_get("With#{subqueries.shift.to_s.capitalize}") else node_class = Nodes::With end @ast.with = node_class.new(subqueries.flatten) self end def take(limit) if limit @ast.limit = Nodes::Limit.new(limit) else @ast.limit = nil end self end alias limit= take def join_sources @ctx.source.right end def source @ctx.source end def comment(*values) if values.any? @ctx.comment = Nodes::Comment.new(values) self else @ctx.comment end end private def collapse(exprs) exprs = exprs.compact exprs.map! { |expr| if String === expr # FIXME: Don't do this automatically Arel.sql(expr) else expr end } if exprs.length == 1 exprs.first else create_and exprs end end end end
ruby
github
https://github.com/rails/rails
activerecord/lib/arel/select_manager.rb
// Copyright 2024 The Cockroach Authors. // // Use of this software is governed by the CockroachDB Software License // included in the /LICENSE file. package storage import ( "context" "fmt" "testing" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/cloud" _ "github.com/cockroachdb/cockroach/pkg/cloud/amazon" _ "github.com/cockroachdb/cockroach/pkg/cloud/gcp" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/testutils/skip" "github.com/cockroachdb/cockroach/pkg/util/envutil" "github.com/cockroachdb/cockroach/pkg/util/humanizeutil" "github.com/cockroachdb/pebble/objstorage" "github.com/cockroachdb/pebble/objstorage/objstorageprovider" "github.com/stretchr/testify/require" ) var ( gcsPrefix = `gs://cockroach-fixtures-us-east1/benchdata/copyssts/?AUTH=implicit` s3Prefix = `s3://cockroach-fixtures-us-east-2/benchdata/copyssts/?AUTH=implicit` // files are ssts selected -- at random -- from a backup of tpcc 150k backed // up by 24.1. While any one file would have been enough to write a benchmark, // an unlucky random choice could have a region unusually compression-friendly // or unfriendly, thus we have a sampling of files and for higher b.N counts // we can cycle them. All selected files are >100mib, to ensure a 64mb read up // to 32mb offset can be satisfied. files = []string{ `969308161621262343.sst`, // 135.73 MiB `969324898841395202.sst`, // 144.84 MiB `969328101645746185.sst`, // 109.09 MiB `969307702044262410.sst`, // 135.56 MiB `969327387185250307.sst`, // 109.42 MiB `969317317823365130.sst`, // 145.84 MiB `969330433561821186.sst`, // 108.79 MiB `969312935730315269.sst`, // 136.98 MiB `969316100247453706.sst`, // 112.23 MiB } ) func BenchmarkObjStorageCopyGCS(b *testing.B) { benchObjstorageCopy(b, gcsPrefix, files) } func BenchmarkObjStorageCopyS3(b *testing.B) { benchObjstorageCopy(b, s3Prefix, files) } var runCloudBenches = envutil.EnvOrDefaultBool("COCKROACH_BENCHMARK_REMOTE_SSTS", false) func benchObjstorageCopy(b *testing.B, prefix string, suffixes []string) { if !runCloudBenches { skip.IgnoreLint(b, "only run manually with BENCHMARK_REMOTE_SSTS to bench cloud io") } ctx := context.Background() b.StopTimer() st := cluster.MakeTestingClusterSettings() cfg := base.ExternalIODirConfig{} d, err := cloud.ExternalStorageConfFromURI(prefix, username.SQLUsername{}) require.NoError(b, err) es, err := cloud.MakeEarlyBootExternalStorage(ctx, d, cfg, st, nil, cloud.NilMetrics) require.NoError(b, err) defer es.Close() s := MakeExternalStorageWrapper(ctx, es) b.ResetTimer() b.StartTimer() for _, size := range []int64{4 << 10, 64 << 10, 1 << 20, 8 << 20, 32 << 20, 64 << 20} { b.Run(fmt.Sprintf("size=%s", humanizeutil.IBytes(size)), func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { suffix := suffixes[i%len(suffixes)] r, fileSize, err := s.ReadObject(ctx, suffix) if err != nil { b.Fatal(err) } readable := objstorageprovider.NewRemoteReadable(r, fileSize, s.IsNotExistError) rh := readable.NewReadHandle(0 /* readBeforeSize */) if err := objstorage.Copy(ctx, rh, discard{}, 4<<20, uint64(size)); err != nil { b.Fatal(err) } if err := rh.Close(); err != nil { b.Fatal(err) } if err := readable.Close(); err != nil { b.Fatal(err) } } b.SetBytes(size) }) } } type discard struct{} var _ objstorage.Writable = discard{} func (discard) StartMetadataPortion() error { return nil } func (discard) Write(p []byte) error { return nil } func (discard) Finish() error { return nil } func (discard) Abort() {}
go
github
https://github.com/cockroachdb/cockroach
pkg/storage/bench_cloud_io_test.go
# Copyright 2020 The UniqueRandomizer Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Supports sampling unique sequences of discrete random choices.""" import abc import math import typing from typing import Callable, List, Optional, Tuple, Union import numpy as np import scipy.special from unique_randomizer import stochastic_beam_search as sbs def log_subtract(x: float, y: float) -> float: """Returns log(exp(x) - exp(y)), or negative infinity if x <= y.""" # Inspired by https://stackoverflow.com/questions/778047. return x + np.log1p(-np.exp(np.minimum(y - x, 0))) def sample_log_distribution(log_distribution: np.ndarray) -> np.int64: """Samples from an unnormalized probability distribution in log space. Args: log_distribution: A 1-D numpy array of unnormalized log probabilities. Returns: An int in the range [0, len(log_distribution)), sampled according to the given distribution. """ # A slower but more numerically stable solution is discussed at # https://stats.stackexchange.com/questions/64081. However, we expect that # as the randomizer runs, the probability distribution at each node should # not be skewed significantly more than the initial provided distribution, # since we will sample more frequently from high-probability choices until # the probabilities "even out". unnormalized = np.exp(log_distribution - np.max(log_distribution)) distribution = unnormalized / np.sum(unnormalized) return np.random.choice(np.arange(len(distribution)), p=distribution) class _TrieNode(object): """A trie node for UniqueRandomizer. Attributes: parent: The _TrieNode parent of this node, or None if this node is the root. index_in_parent: The index of this node in the parent, or None if this node is the root. children: A list of _TrieNode children. A child may be None if it is not expanded yet. The entire list will be None if this node has never sampled a child yet. The list will be empty if this node is a leaf in the trie. unsampled_log_masses: A numpy array containing the current (unsampled) log probability mass of each child, or None if this node has never sampled a child yet. data: A dict capable of storing arbitrary user-provided data for the node. sbs_child_state_cache: Used for caching children's states when sampling batches. """ def __init__(self, parent: Optional['_TrieNode'], index_in_parent: Optional[int]) -> None: """Initializes a _TrieNode. Args: parent: The parent of this node, or None if this node is the root. index_in_parent: This node's index in the parent node, or None if this node is the root. """ self.parent = parent self.index_in_parent = index_in_parent self.children = None self.unsampled_log_masses = None self.data = {} self.sbs_child_state_cache = None def initial_log_mass_if_not_sampled(self) -> float: """Returns this node's initial log probability mass. This assumes that no samples have been drawn from this node yet. """ # If no samples have been drawn yet, the unsampled log mass equals the # desired initial log mass. return (self.parent.unsampled_log_masses[self.index_in_parent] # If the node is the root, the initial log mass is 0.0. if self.parent else 0.0) def sample_child( self, initial_distribution: Union[np.ndarray, List[float], None] ) -> Tuple['_TrieNode', int]: """Returns a child _TrieNode according to the given initial distribution. This will create the child _TrieNode if it does not already exist. Args: initial_distribution: A 1-D numpy array containing the initial probability distribution that this node should use. Returns: A tuple of the child _TrieNode and the child's index. """ if not self.children: # This is the first sample. Set up children. self.children = [None] * len(initial_distribution) self.unsampled_log_masses = (np.log(initial_distribution) + self.initial_log_mass_if_not_sampled()) # Faster to choose from initial_distribution when it's still accurate # (i.e., on the first sample). child_index = np.random.choice(np.arange(len(initial_distribution)), p=initial_distribution) else: child_index = sample_log_distribution(self.unsampled_log_masses) child = self.children[child_index] if not child: child = self.children[child_index] = _TrieNode( parent=self, index_in_parent=child_index) return child, int(child_index) def mark_leaf(self) -> None: """Marks this node as a leaf.""" self.children = [] def exhausted(self) -> bool: """Returns whether all of the mass at this node has been sampled.""" # Distinguish [] and None. if self.children == []: # pylint: disable=g-explicit-bool-comparison return True if self.unsampled_log_masses is None: return False # This node is not a leaf but has never been sampled from. return all(np.isneginf(log_mass) for log_mass in self.unsampled_log_masses) def mark_mass_sampled(self, log_mass: float) -> None: """Recursively subtracts log_mass from this node and its ancestors.""" if not self.parent: return if self.exhausted(): new_log_mass = np.NINF else: new_log_mass = log_subtract( self.parent.unsampled_log_masses[self.index_in_parent], log_mass) self.parent.unsampled_log_masses[self.index_in_parent] = new_log_mass self.parent.mark_mass_sampled(log_mass) def needs_probabilities(self) -> bool: """Returns whether this node needs probabilities.""" return self.children is None class AllSequencesSampledError(Exception): """Raised when all possible sequences have already been sampled.""" class Randomizer(object, metaclass=abc.ABCMeta): """Samples sequences of discrete random choices. The `sample_*` methods all return an int in the range [0, num_choices). """ def __init__(self) -> None: """Initializes this Randomizer object.""" self._num_sequences_sampled = 0 self._exhausted = False @abc.abstractmethod def sample_distribution( self, probability_distribution: Union[np.ndarray, List[float], None]) -> int: """Samples from a given probability distribution (as a list of floats).""" def sample_boolean(self, probability_1: float = 0.5) -> int: """Samples from a Bernoulli distribution with a given probability of 1.""" return self.sample_distribution([1 - probability_1, probability_1]) def sample_uniform(self, num_choices: int) -> int: """Samples from a uniform distribution over a given number of choices.""" return self.sample_distribution(np.ones(num_choices) / num_choices) @abc.abstractmethod def mark_sequence_complete(self) -> float: """Used to mark a complete sequence of choices. Returns: The log probability of the finished sequence, with respect to the initial (given) probability distribution. """ def num_sequences_sampled(self) -> int: """Returns the number of complete sequences of choices sampled so far.""" return self._num_sequences_sampled def exhausted(self) -> bool: """Returns whether all possible sequences of choices have been sampled.""" return self._exhausted @abc.abstractmethod def fraction_sampled(self) -> float: """Returns the total probability mass that has been sampled.""" @abc.abstractmethod def needs_probabilities(self) -> bool: """Returns whether the current node requires probabilities. In UniqueRandomizer, a _TrieNode will need probabilities if it has never sampled a child before. Then, it will no longer need probabilities to sample a child, since it stores its own updated probabilities. This can enable the client to avoid unnecessarily recomputing probabilities. """ class UniqueRandomizer(Randomizer): """Samples unique sequences of discrete random choices. When using a UniqueRandomizer object to provide randomness, the client algorithm must be deterministic and behave identically when given a constant sequence of choices. When a sequence of choices is complete, the client algorithm must call `mark_sequence_complete()`. This will update the internal data so that the next sampled choices form a new sequence, which is guaranteed to be different from previous complete sequences. Choices returned by a UniqueRandomizer object respect the initial probability distributions provided by the client algorithm, conditioned on the constraint that a complete sequence of choices cannot be sampled more than once. The `sample_*` methods all return an int in the range [0, num_choices). All of these methods raise AllSequencesSampledError if all possible sequences of choices have already been sampled. Attributes: current_node: The current node in the trie. """ def __init__(self) -> None: """Initializes a UniqueRandomizer object.""" super(UniqueRandomizer, self).__init__() self._root_node = _TrieNode(None, None) self.current_node = self._root_node def sample_distribution( self, probability_distribution: Union[np.ndarray, List[float], None]) -> int: """Samples from a given probability distribution (as a list of floats).""" if self._exhausted: raise AllSequencesSampledError('All sequences of choices have been ' 'sampled already.') self.current_node, choice_index = self.current_node.sample_child( probability_distribution) return choice_index def mark_sequence_complete(self) -> float: """Used to mark a complete sequence of choices. Returns: The log probability of the finished sequence, with respect to the initial (given) probability distribution. """ self._num_sequences_sampled += 1 self.current_node.mark_leaf() log_sampled_mass = self.current_node.initial_log_mass_if_not_sampled() self.current_node.mark_mass_sampled(log_sampled_mass) self.current_node = self._root_node self._exhausted = self._root_node.exhausted() return float(log_sampled_mass) def fraction_sampled(self) -> float: """Returns the total probability mass that has been sampled.""" if self._exhausted: return 1.0 if not self._root_node.children: # The root node has never sampled a child before. return 0.0 return float(1.0 - np.exp(scipy.special.logsumexp( self._root_node.unsampled_log_masses))) def needs_probabilities(self) -> bool: """Returns whether the current node requires probabilities.""" return self.current_node.needs_probabilities() def sample_batch( self, child_log_probability_fn: Callable[[List[sbs.State]], List[np.ndarray]], child_state_fn: Callable[[List[Tuple[sbs.State, int]]], List[Tuple[Union[sbs.State, sbs.Output], bool]]], root_state: sbs.State, k: int) -> List[sbs.BeamNode]: """Samples a batch of outputs using Stochastic Beam Search. Nodes in the beam include "states" which can be anything but must contain enough information to: 1. Define a consistent ordering of all children of the node. 2. Enumerate the probabilities of all children. 3. Produce the state of the child with a given index. Args: child_log_probability_fn: A function that takes a list of states and returns the log probabilities of the child states of each input state. child_state_fn: A function that takes a list of (state, i) pairs and maps each to a (ith_child, is_leaf) pair. If ith_child is a leaf state, is_leaf should be True, and ith_child will potentially be an actual sampled item that should be returned by stochastic_beam_search (it may have a different form than other non-leaf states). root_state: The state of the root node. k: The desired number of samples. Returns: A list of up to k BeamNode objects, corresponding to the sampled leaves. """ # A state here contains a _TrieNode and the client's state. def wrapper_child_log_probability_fn( randomizer_states: List[Tuple[_TrieNode, sbs.State]] ) -> List[np.ndarray]: """Computes child probabilities while updating the trie.""" results = [None] * len(randomizer_states) unexpanded_client_states = [] unexpanded_indices = [] for i, (node, client_state) in enumerate(randomizer_states): if node.unsampled_log_masses is None: # We have never computed this node's child probabilities before. unexpanded_client_states.append(client_state) unexpanded_indices.append(i) else: # This node already has unsampled_log_masses set. We just need to # normalize them. log_unnormalized = node.unsampled_log_masses unnormalized = np.exp(log_unnormalized - np.max(log_unnormalized)) results[i] = np.log(unnormalized / np.sum(unnormalized)) # Use client's child_log_probability_fn to get probabilities for # unexpanded states. if unexpanded_client_states: client_fn_results = child_log_probability_fn(unexpanded_client_states) for i, log_probs in zip(unexpanded_indices, client_fn_results): results[i] = log_probs node = randomizer_states[i][0] node.unsampled_log_masses = (log_probs + node.initial_log_mass_if_not_sampled()) return typing.cast(List[np.ndarray], results) def wrapper_child_state_fn( randomizer_state_index_pairs: List[Tuple[Tuple[_TrieNode, sbs.State], int]] ) -> List[Tuple[Union[sbs.State, sbs.Output], bool]]: """Computes child states while updating the trie.""" results = [None] * len(randomizer_state_index_pairs) unexpanded_client_state_index_pairs = [] unexpanded_indices = [] for i, ((node, client_state), child_index) in enumerate( randomizer_state_index_pairs): # Initialize children structures if needed. if node.children is None: num_children = len(typing.cast(np.ndarray, node.unsampled_log_masses)) node.children = [None] * num_children node.sbs_child_state_cache = [None] * num_children if node.children[child_index] is None: # This child has not been created before. unexpanded_client_state_index_pairs.append( (client_state, child_index)) unexpanded_indices.append(i) else: # The child has been created before. child_client_state, child_is_leaf = ( node.sbs_child_state_cache[child_index]) results[i] = ((node.children[child_index], child_client_state), child_is_leaf) # Use client's child_log_probability_fn to get child client states. if unexpanded_client_state_index_pairs: client_fn_results = child_state_fn(unexpanded_client_state_index_pairs) for i, (child_client_state, child_is_leaf) in zip(unexpanded_indices, client_fn_results): (node, _), child_index = randomizer_state_index_pairs[i] child_node = _TrieNode(parent=node, index_in_parent=child_index) node.children[child_index] = child_node node.sbs_child_state_cache[child_index] = (child_client_state, child_is_leaf) results[i] = ((child_node, child_client_state), child_is_leaf) return typing.cast(List[Tuple[Union[sbs.State, sbs.Output], bool]], results) randomizer_beam_nodes = sbs.stochastic_beam_search( child_log_probability_fn=wrapper_child_log_probability_fn, child_state_fn=wrapper_child_state_fn, root_state=(self._root_node, root_state), k=k) # Update probabilities and remove _TrieNode parts of the states. client_beam_nodes = [] for beam_node in randomizer_beam_nodes: leaf_node, client_state = beam_node.output log_sampled_mass = leaf_node.initial_log_mass_if_not_sampled() leaf_node.mark_mass_sampled(log_sampled_mass) client_beam_nodes.append(beam_node._replace(output=client_state)) self._exhausted = self._root_node.exhausted() return client_beam_nodes class NormalRandomizer(Randomizer): """A randomizer where all sequences of choices are independent. As opposed to a UniqueRandomizer, a NormalRandomizer can return duplicate sequences of choices. This does not keep track of the fraction of the search space that was sampled. Thus, fraction_sampled() returns a sentinel value of -1.0. """ def __init__(self) -> None: """Initializes a NormalRandomizer object.""" super(NormalRandomizer, self).__init__() self._log_probability_sum = 0.0 def sample_distribution( self, probability_distribution: Union[np.ndarray, List[float], None]) -> int: """Samples from a given probability distribution.""" index = int(np.random.choice(np.arange(len(probability_distribution)), p=probability_distribution)) self._log_probability_sum += math.log(probability_distribution[index]) return index def mark_sequence_complete(self) -> float: """Used to mark a complete sequence of choices. Returns: The log probability of the finished sequence, with respect to the initial (given) probability distribution. """ result = self._log_probability_sum self._log_probability_sum = 0.0 return result def fraction_sampled(self) -> float: """Returns a sentinel value of -1.0. See class docstring.""" return -1.0 def needs_probabilities(self) -> bool: """Returns whether the current node requires probabilities (always True).""" return True
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.telemetry.internals; import org.apache.kafka.common.MetricName; /** * {@code MetricNamingStrategy} provides a strategy pattern-based means of converting from an * implementation-specific metric name (e.g. Kafka {@link MetricName}) representing a * particular metric name and associated tags into a canonical {@link MetricKey} * (name and tags) representation. * * <p> * * Each strategy may define its own conventions for how the resulting metric should be named, * including things such conforming name and tags to use specific casing and separators for * different parts of the metric name. * * <p> * * In general, a {@code MetricNamingStrategy} implementation is closely managed by another entity, * referred to as the "telemetry reporter", as that reporter handles the conversion between different * representations of metric names and keys. * * <p> * * This class is primarily used by the telemetry reporter, {@link MetricsCollector}, and * {@link MetricsEmitter} layers. */ public interface MetricNamingStrategy<T> { /** * Converts the given metric name into a {@link MetricKey} representation. * * @param metricName Implementation-specific metric * @return {@link MetricKey} */ MetricKey metricKey(T metricName); /** * Creates a derived {@link MetricKey} from an existing {@link MetricKey}. * * <p> * * Some metrics may include multiple components derived from the same underlying source * of data (e.g. a Meter that exposes multiple rates and a counter) in which case it may * be desirable to create a new metric key derived from the primary one, with a different * name for each component of the metric. * * <p> * * Some metrics may be derived from others by the collector itself. For example, a delta * metric might be created from a cumulative counter. * * <p> * * This method exists so each strategy can define its own convention for how to name * derived metrics keys. * * <p> * * The derived key should have the same tags as the input key, and its name new name * will typically be composed of the input key name and the component name. * * @param key Input {@link MetricKey} used to construct the derived key * @param derivedComponent Name to use for the derived component of the input metric * @return Derived {@link MetricKey} with a new metric name composed of the input key * name and the additional name */ MetricKey derivedMetricKey(MetricKey key, String derivedComponent); }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/common/telemetry/internals/MetricNamingStrategy.java
# -*- coding: utf-8 -*- # This coding header is significant for tests, as the debug view is parsing # files to search for such a header to decode the source file content from __future__ import unicode_literals import importlib import inspect import os import re import sys import tempfile from unittest import skipIf from django.core import mail from django.core.files.uploadedfile import SimpleUploadedFile from django.core.urlresolvers import reverse from django.db import DatabaseError, connection from django.template import TemplateDoesNotExist from django.test import RequestFactory, SimpleTestCase, override_settings from django.test.utils import LoggingCaptureMixin from django.utils import six from django.utils.encoding import force_bytes, force_text from django.utils.functional import SimpleLazyObject from django.views.debug import ( CallableSettingWrapper, ExceptionReporter, technical_500_response, ) from .. import BrokenException, except_args from ..views import ( custom_exception_reporter_filter_view, multivalue_dict_key_error, non_sensitive_view, paranoid_view, sensitive_args_function_caller, sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view, ) if six.PY3: from .py3_test_debug import Py3ExceptionReporterTests # NOQA class CallableSettingWrapperTests(SimpleTestCase): """ Unittests for CallableSettingWrapper """ def test_repr(self): class WrappedCallable(object): def __repr__(self): return "repr from the wrapped callable" def __call__(self): pass actual = repr(CallableSettingWrapper(WrappedCallable())) self.assertEqual(actual, "repr from the wrapped callable") @override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls") class DebugViewTests(LoggingCaptureMixin, SimpleTestCase): def test_files(self): response = self.client.get('/raises/') self.assertEqual(response.status_code, 500) data = { 'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'), } response = self.client.post('/raises/', data) self.assertContains(response, 'file_data.txt', status_code=500) self.assertNotContains(response, 'haha', status_code=500) def test_400(self): # Ensure that when DEBUG=True, technical_500_template() is called. response = self.client.get('/raises400/') self.assertContains(response, '<div class="context" id="', status_code=400) # Ensure no 403.html template exists to test the default case. @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', }]) def test_403(self): response = self.client.get('/raises403/') self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403) # Set up a test 403.html template. @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'OPTIONS': { 'loaders': [ ('django.template.loaders.locmem.Loader', { '403.html': 'This is a test template for a 403 error ({{ exception }}).', }), ], }, }]) def test_403_template(self): response = self.client.get('/raises403/') self.assertContains(response, 'test template', status_code=403) self.assertContains(response, '(Insufficient Permissions).', status_code=403) def test_404(self): response = self.client.get('/raises404/') self.assertEqual(response.status_code, 404) def test_raised_404(self): response = self.client.get('/views/raises404/') self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404) def test_404_not_in_urls(self): response = self.client.get('/not-in-urls') self.assertNotContains(response, "Raised by:", status_code=404) self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404) def test_technical_404(self): response = self.client.get('/views/technical404/') self.assertContains(response, "Raised by:", status_code=404) self.assertContains(response, "view_tests.views.technical404", status_code=404) def test_classbased_technical_404(self): response = self.client.get('/views/classbased404/') self.assertContains(response, "Raised by:", status_code=404) self.assertContains(response, "view_tests.views.Http404View", status_code=404) def test_view_exceptions(self): for n in range(len(except_args)): self.assertRaises(BrokenException, self.client.get, reverse('view_exception', args=(n,))) def test_non_l10ned_numeric_ids(self): """ Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized. """ with self.settings(DEBUG=True, USE_L10N=True): response = self.client.get('/raises500/') # We look for a HTML fragment of the form # '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"' self.assertContains(response, '<div class="context" id="', status_code=500) match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content) self.assertIsNotNone(match) id_repr = match.group('id') self.assertFalse(re.search(b'[^c0-9]', id_repr), "Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr) def test_template_exceptions(self): for n in range(len(except_args)): try: self.client.get(reverse('template_exception', args=(n,))) except Exception: raising_loc = inspect.trace()[-1][-2][0].strip() self.assertNotEqual(raising_loc.find('raise BrokenException'), -1, "Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" % raising_loc) def test_template_loader_postmortem(self): """Tests for not existing file""" template_name = "notfound.html" with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile: tempdir = os.path.dirname(tmpfile.name) template_path = os.path.join(tempdir, template_name) with override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [tempdir], }]): response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name})) self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2) def test_no_template_source_loaders(self): """ Make sure if you don't specify a template, the debug view doesn't blow up. """ self.assertRaises(TemplateDoesNotExist, self.client.get, '/render_no_template/') @override_settings(ROOT_URLCONF='view_tests.default_urls') def test_default_urlconf_template(self): """ Make sure that the default urlconf template is shown shown instead of the technical 404 page, if the user has not altered their url conf yet. """ response = self.client.get('/') self.assertContains( response, "<h2>Congratulations on your first Django-powered page.</h2>" ) @override_settings(ROOT_URLCONF='view_tests.regression_21530_urls') def test_regression_21530(self): """ Regression test for bug #21530. If the admin app include is replaced with exactly one url pattern, then the technical 404 template should be displayed. The bug here was that an AttributeError caused a 500 response. """ response = self.client.get('/') self.assertContains( response, "Page not found <span>(404)</span>", status_code=404 ) class DebugViewQueriesAllowedTests(SimpleTestCase): # May need a query to initialize MySQL connection allow_database_queries = True def test_handle_db_exception(self): """ Ensure the debug view works when a database exception is raised by performing an invalid query and passing the exception to the debug view. """ with connection.cursor() as cursor: try: cursor.execute('INVALID SQL') except DatabaseError: exc_info = sys.exc_info() rf = RequestFactory() response = technical_500_response(rf.get('/'), *exc_info) self.assertContains(response, 'OperationalError at /', status_code=500) @override_settings( DEBUG=True, ROOT_URLCONF="view_tests.urls", # No template directories are configured, so no templates will be found. TEMPLATES=[{ 'BACKEND': 'django.template.backends.dummy.TemplateStrings', }], ) class NonDjangoTemplatesDebugViewTests(SimpleTestCase): def test_400(self): # Ensure that when DEBUG=True, technical_500_template() is called. response = self.client.get('/raises400/') self.assertContains(response, '<div class="context" id="', status_code=400) def test_403(self): response = self.client.get('/raises403/') self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403) def test_404(self): response = self.client.get('/raises404/') self.assertEqual(response.status_code, 404) def test_template_not_found_error(self): # Raises a TemplateDoesNotExist exception and shows the debug view. url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"}) response = self.client.get(url) self.assertContains(response, '<div class="context" id="', status_code=500) class ExceptionReporterTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get('/test_view/') raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<h1>ValueError at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">Can&#39;t find my keys</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertIn('<th>Exception Type:</th>', html) self.assertIn('<th>Exception Value:</th>', html) self.assertIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<h1>ValueError</h1>', html) self.assertIn('<pre class="exception_value">Can&#39;t find my keys</pre>', html) self.assertNotIn('<th>Request Method:</th>', html) self.assertNotIn('<th>Request URL:</th>', html) self.assertIn('<th>Exception Type:</th>', html) self.assertIn('<th>Exception Value:</th>', html) self.assertIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertIn('<p>Request data not supplied</p>', html) def test_eol_support(self): """Test that the ExceptionReporter supports Unix, Windows and Macintosh EOL markers""" LINES = list('print %d' % i for i in range(1, 6)) reporter = ExceptionReporter(None, None, None, None) for newline in ['\n', '\r\n', '\r']: fd, filename = tempfile.mkstemp(text=False) os.write(fd, force_bytes(newline.join(LINES) + newline)) os.close(fd) try: self.assertEqual( reporter._get_lines_from_file(filename, 3, 2), (1, LINES[1:3], LINES[3], LINES[4:]) ) finally: os.unlink(filename) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertIn('<h1>Report at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertIn('<h1>Report at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">I&#39;m a little teapot</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertIn('<h1>Report</h1>', html) self.assertIn('<pre class="exception_value">I&#39;m a little teapot</pre>', html) self.assertNotIn('<th>Request Method:</th>', html) self.assertNotIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertIn('<p>Request data not supplied</p>', html) def test_non_utf8_values_handling(self): "Non-UTF-8 exceptions/values should not make the output generation choke." try: class NonUtf8Output(Exception): def __repr__(self): return b'EXC\xe9EXC' somevar = b'VAL\xe9VAL' # NOQA raise NonUtf8Output() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('VAL\\xe9VAL', html) self.assertIn('EXC\\xe9EXC', html) def test_unprintable_values_handling(self): "Unprintable values should not make the output generation choke." try: class OomOutput(object): def __repr__(self): raise MemoryError('OOM') oomvalue = OomOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<td class="code"><pre>Error in formatting', html) def test_too_large_values_handling(self): "Large values should not create a large HTML." large = 256 * 1024 repr_of_str_adds = len(repr('')) try: class LargeOutput(object): def __repr__(self): return repr('A' * large) largevalue = LargeOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb self.assertIn('&lt;trimmed %d bytes string&gt;' % (large + repr_of_str_adds,), html) @skipIf(six.PY2, 'Bug manifests on PY3 only') def test_unfrozen_importlib(self): """ importlib is not a frozen app, but its loader thinks it's frozen which results in an ImportError on Python 3. Refs #21443. """ try: request = self.rf.get('/test_view/') importlib.import_module('abc.def.invalid.name') except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<h1>ImportError at /test_view/</h1>', html) def test_ignore_traceback_evaluation_exceptions(self): """ Don't trip over exceptions generated by crafted objects when evaluating them while cleansing (#24455). """ class BrokenEvaluation(Exception): pass def broken_setup(): raise BrokenEvaluation request = self.rf.get('/test_view/') broken_lazy = SimpleLazyObject(broken_setup) try: bool(broken_lazy) except BrokenEvaluation: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) try: html = reporter.get_traceback_html() except BrokenEvaluation: self.fail("Broken evaluation in traceback is not caught.") self.assertIn( "BrokenEvaluation", html, "Evaluation exception reason not mentioned in traceback" ) class PlainTextReportTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get('/test_view/') raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn('ValueError at /test_view/', text) self.assertIn("Can't find my keys", text) self.assertIn('Request Method:', text) self.assertIn('Request URL:', text) self.assertIn('Exception Type:', text) self.assertIn('Exception Value:', text) self.assertIn('Traceback:', text) self.assertIn('Request information:', text) self.assertNotIn('Request data not supplied', text) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn('ValueError', text) self.assertIn("Can't find my keys", text) self.assertNotIn('Request Method:', text) self.assertNotIn('Request URL:', text) self.assertIn('Exception Type:', text) self.assertIn('Exception Value:', text) self.assertIn('Traceback:', text) self.assertIn('Request data not supplied', text) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) reporter.get_traceback_text() def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, "I'm a little teapot", None) reporter.get_traceback_text() def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) reporter.get_traceback_text() class ExceptionReportTestMixin(object): # Mixin used in the ExceptionReporterFilterTests and # AjaxResponseExceptionReporterFilter tests below breakfast_data = {'sausage-key': 'sausage-value', 'baked-beans-key': 'baked-beans-value', 'hash-brown-key': 'hash-brown-value', 'bacon-key': 'bacon-value'} def verify_unsafe_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that potentially sensitive info are displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # All variables are shown. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertContains(response, 'scrambled', status_code=500) self.assertContains(response, 'sauce', status_code=500) self.assertContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertContains(response, k, status_code=500) self.assertContains(response, v, status_code=500) def verify_safe_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that certain sensitive info are not displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # Non-sensitive variable's name and value are shown. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertContains(response, 'scrambled', status_code=500) # Sensitive variable's name is shown but not its value. self.assertContains(response, 'sauce', status_code=500) self.assertNotContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # Non-sensitive POST parameters' values are shown. self.assertContains(response, 'baked-beans-value', status_code=500) self.assertContains(response, 'hash-brown-value', status_code=500) # Sensitive POST parameters' values are not shown. self.assertNotContains(response, 'sausage-value', status_code=500) self.assertNotContains(response, 'bacon-value', status_code=500) def verify_paranoid_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that no variables or POST parameters are displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # Show variable names but not their values. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertNotContains(response, 'scrambled', status_code=500) self.assertContains(response, 'sauce', status_code=500) self.assertNotContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # No POST parameters' values are shown. self.assertNotContains(response, v, status_code=500) def verify_unsafe_email(self, view, check_for_POST_params=True): """ Asserts that potentially sensitive info are displayed in the email report. """ with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = force_text(email.body) self.assertNotIn('cooked_eggs', body_plain) self.assertNotIn('scrambled', body_plain) self.assertNotIn('sauce', body_plain) self.assertNotIn('worcestershire', body_plain) # Frames vars are shown in html email reports. body_html = force_text(email.alternatives[0][0]) self.assertIn('cooked_eggs', body_html) self.assertIn('scrambled', body_html) self.assertIn('sauce', body_html) self.assertIn('worcestershire', body_html) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertIn(k, body_plain) self.assertIn(v, body_plain) self.assertIn(k, body_html) self.assertIn(v, body_html) def verify_safe_email(self, view, check_for_POST_params=True): """ Asserts that certain sensitive info are not displayed in the email report. """ with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = force_text(email.body) self.assertNotIn('cooked_eggs', body_plain) self.assertNotIn('scrambled', body_plain) self.assertNotIn('sauce', body_plain) self.assertNotIn('worcestershire', body_plain) # Frames vars are shown in html email reports. body_html = force_text(email.alternatives[0][0]) self.assertIn('cooked_eggs', body_html) self.assertIn('scrambled', body_html) self.assertIn('sauce', body_html) self.assertNotIn('worcestershire', body_html) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertIn(k, body_plain) # Non-sensitive POST parameters' values are shown. self.assertIn('baked-beans-value', body_plain) self.assertIn('hash-brown-value', body_plain) self.assertIn('baked-beans-value', body_html) self.assertIn('hash-brown-value', body_html) # Sensitive POST parameters' values are not shown. self.assertNotIn('sausage-value', body_plain) self.assertNotIn('bacon-value', body_plain) self.assertNotIn('sausage-value', body_html) self.assertNotIn('bacon-value', body_html) def verify_paranoid_email(self, view): """ Asserts that no variables or POST parameters are displayed in the email report. """ with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body = force_text(email.body) self.assertNotIn('cooked_eggs', body) self.assertNotIn('scrambled', body) self.assertNotIn('sauce', body) self.assertNotIn('worcestershire', body) for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertIn(k, body) # No POST parameters' values are shown. self.assertNotIn(v, body) @override_settings(ROOT_URLCONF='view_tests.urls') class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase): """ Ensure that sensitive information can be filtered out of error reports. Refs #14614. """ rf = RequestFactory() def test_non_sensitive_request(self): """ Ensure that everything (request info and frame variables) can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) def test_sensitive_request(self): """ Ensure that sensitive POST parameters and frame variables cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view) self.verify_unsafe_email(sensitive_view) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view) self.verify_safe_email(sensitive_view) def test_paranoid_request(self): """ Ensure that no POST parameters and frame variables can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view) self.verify_unsafe_email(paranoid_view) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view) self.verify_paranoid_email(paranoid_view) def test_multivalue_dict_key_error(self): """ #21098 -- Ensure that sensitive POST parameters cannot be seen in the error reports for if request.POST['nonexistent_key'] throws an error. """ with self.settings(DEBUG=True): self.verify_unsafe_response(multivalue_dict_key_error) self.verify_unsafe_email(multivalue_dict_key_error) with self.settings(DEBUG=False): self.verify_safe_response(multivalue_dict_key_error) self.verify_safe_email(multivalue_dict_key_error) def test_custom_exception_reporter_filter(self): """ Ensure that it's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) def test_sensitive_method(self): """ Ensure that the sensitive_variables decorator works with object methods. Refs #18379. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False) self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_method_view, check_for_POST_params=False) self.verify_safe_email(sensitive_method_view, check_for_POST_params=False) def test_sensitive_function_arguments(self): """ Ensure that sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as arguments to the decorated function. Refs #19453. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_args_function_caller) self.verify_unsafe_email(sensitive_args_function_caller) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False) self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False) def test_sensitive_function_keyword_arguments(self): """ Ensure that sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as keyword arguments to the decorated function. Refs #19453. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_kwargs_function_caller) self.verify_unsafe_email(sensitive_kwargs_function_caller) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False) self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False) def test_callable_settings(self): """ Callable settings should not be evaluated in the debug page (#21345). """ def callable_setting(): return "This should not be displayed" with self.settings(DEBUG=True, FOOBAR=callable_setting): response = self.client.get('/raises500/') self.assertNotContains(response, "This should not be displayed", status_code=500) def test_callable_settings_forbidding_to_set_attributes(self): """ Callable settings which forbid to set attributes should not break the debug page (#23070). """ class CallableSettingWithSlots(object): __slots__ = [] def __call__(self): return "This should not be displayed" with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()): response = self.client.get('/raises500/') self.assertNotContains(response, "This should not be displayed", status_code=500) def test_dict_setting_with_non_str_key(self): """ A dict setting containing a non-string key should not break the debug page (#12744). """ with self.settings(DEBUG=True, FOOBAR={42: None}): response = self.client.get('/raises500/') self.assertContains(response, 'FOOBAR', status_code=500) def test_sensitive_settings(self): """ The debug page should not show some sensitive settings (password, secret key, ...). """ sensitive_settings = [ 'SECRET_KEY', 'PASSWORD', 'API_KEY', 'AUTH_TOKEN', ] for setting in sensitive_settings: with self.settings(DEBUG=True, **{setting: "should not be displayed"}): response = self.client.get('/raises500/') self.assertNotContains(response, 'should not be displayed', status_code=500) def test_settings_with_sensitive_keys(self): """ The debug page should filter out some sensitive information found in dict settings. """ sensitive_settings = [ 'SECRET_KEY', 'PASSWORD', 'API_KEY', 'AUTH_TOKEN', ] for setting in sensitive_settings: FOOBAR = { setting: "should not be displayed", 'recursive': {setting: "should not be displayed"}, } with self.settings(DEBUG=True, FOOBAR=FOOBAR): response = self.client.get('/raises500/') self.assertNotContains(response, 'should not be displayed', status_code=500) class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase): """ Ensure that sensitive information can be filtered out of error reports. Here we specifically test the plain text 500 debug-only error page served when it has been detected the request was sent by JS code. We don't check for (non)existence of frames vars in the traceback information section of the response content because we don't include them in these error pages. Refs #14614. """ rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest') def test_non_sensitive_request(self): """ Ensure that request info can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) def test_sensitive_request(self): """ Ensure that sensitive POST parameters cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view, check_for_vars=False) def test_paranoid_request(self): """ Ensure that no POST parameters can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view, check_for_vars=False) def test_custom_exception_reporter_filter(self): """ Ensure that it's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
unknown
codeparrot/codeparrot-clean
# protoc plugin to map from FileDescriptorProtos to a tools.type_whisperer.Types # proto. This is the type information for a single .proto, consumed by # typedb_gen.py. from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor from tools.type_whisperer.types_pb2 import Types from udpa.annotations import migrate_pb2 from udpa.annotations import status_pb2 class TypeWhispererVisitor(visitor.Visitor): """Visitor to compute type information from a FileDescriptor proto. See visitor.Visitor for visitor method docs comments. """ def __init__(self): super(TypeWhispererVisitor, self).__init__() self._types = Types() def VisitService(self, service_proto, type_context): pass def VisitEnum(self, enum_proto, type_context): type_desc = self._types.types[type_context.name] type_desc.next_version_upgrade = any(v.options.deprecated for v in enum_proto.value) type_desc.deprecated_type = type_context.deprecated def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): type_desc = self._types.types[type_context.name] type_desc.map_entry = msg_proto.options.map_entry type_desc.deprecated_type = type_context.deprecated type_deps = set([]) for f in msg_proto.field: if f.type_name.startswith('.'): type_deps.add(f.type_name[1:]) if f.options.deprecated: type_desc.next_version_upgrade = True type_desc.type_dependencies.extend(type_deps) def VisitFile(self, file_proto, type_context, services, msgs, enums): next_version_package = '' if file_proto.options.HasExtension(migrate_pb2.file_migrate): next_version_package = file_proto.options.Extensions[migrate_pb2.file_migrate].move_to_package for t in self._types.types.values(): t.qualified_package = file_proto.package t.proto_path = file_proto.name t.active = file_proto.options.Extensions[ status_pb2.file_status].package_version_status == status_pb2.ACTIVE if next_version_package: t.next_version_package = next_version_package t.next_version_upgrade = True # Return in text proto format. This makes things easier to debug, these # don't need to be compact as they are only interim build artifacts. return str(self._types) def Main(): plugin.Plugin([ plugin.DirectOutputDescriptor('.types.pb_text', TypeWhispererVisitor), ]) if __name__ == '__main__': Main()
unknown
codeparrot/codeparrot-clean
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the "Elastic License * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side * Public License v 1"; you may not use this file except in compliance with, at * your election, the "Elastic License 2.0", the "GNU Affero General Public * License v3.0 only", or the "Server Side Public License, v 1". */ package org.elasticsearch.benchmark.index.codec.tsdb.internal; import java.util.Arrays; import java.util.Random; public class ConstantIntegerSupplier extends AbstractLongArraySupplier { private final Random random; public ConstantIntegerSupplier(int seed, int bitsPerValue, int size) { super(bitsPerValue, size); this.random = new Random(seed); } @Override public long[] get() { long[] data = new long[size]; long value = bitsPerValue == 64 ? random.nextLong() : random.nextLong(1L << bitsPerValue); Arrays.fill(data, value); return data; } }
java
github
https://github.com/elastic/elasticsearch
benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/ConstantIntegerSupplier.java
#!/usr/bin/python # Copyright (C) 2016 Bitergia # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Authors: # Daniel Izquierdo Cortazar <dizquierdo@bitergia.com> # import pandas import scipy import datetime class Format(object): """ Library that allows to format dataframes to be later enriched This class is the first step in the enrichment process of data. Although this can be used alone for other purposes, its main goal consists of providing well formated [missing fields, string dates, removal of not needed fields] for the following steps of the enrichment process. This data format and cleaning process is done due to inconsistencies and missing fields that may appear when reading information. """ def fill_missing_fields(self, data, columns): """ This method fills with 0's missing fields :param data: original Pandas dataframe :param columns: list of columns to be filled in the DataFrame :type data: pandas.DataFrame :type columns: list of strings :returns: Pandas dataframe with missing fields filled with 0's :rtype: pandas.DataFrame """ for column in columns: if column not in data.columns: data[column] = scipy.zeros(len(data)) return data def update_field_names(self, data, matching): """ This method updates the names of the fields according to matching :param data: original Pandas dataframe :param matching: dictionary of matchings between old and new values :type data: pandas.DataFrame :type matching: dictionary :returns: Pandas dataframe with updated names :rtype: pandas.DataFrame """ for key in matching.keys(): if key in data.columns: data.rename(columns={key:matching[key]}) return data def format_dates(self, data, columns): """ This method translates columns values into datetime objects :param data: original Pandas dataframe :param columns: list of columns to cast the date to a datetime object :type data: pandas.DataFrame :type columns: list of strings :returns: Pandas dataframe with updated 'columns' with datetime objects :rtype: pandas.DataFrame """ for column in columns: if column in data.columns: data[column] = pandas.to_datetime(data[column]) return data def remove_columns(self, data, columns): """ This method removes columns in data :param data: original Pandas dataframe :param columns: list of columns to remove :type data: pandas.DataFrame :type columns: list of strings :returns: Pandas dataframe with removed columns :rtype: pandas.DataFrame """ for column in columns: if column in data.columns: data = data.drop(column, axis=1) return data
unknown
codeparrot/codeparrot-clean
import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import AxesGrid def get_demo_image(): import numpy as np from matplotlib.cbook import get_sample_data f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False) z = np.load(f) # z is a numpy array of 15x15 return z, (-3,4,-4,3) def demo_bottom_cbar(fig): """ A grid of 2x2 images with a colorbar for each column. """ grid = AxesGrid(fig, 121, # similar to subplot(132) nrows_ncols = (2, 2), axes_pad = 0.10, share_all=True, label_mode = "1", cbar_location = "bottom", cbar_mode="edge", cbar_pad = 0.25, cbar_size = "15%", direction="column" ) Z, extent = get_demo_image() cmaps = [plt.get_cmap("autumn"), plt.get_cmap("summer")] for i in range(4): im = grid[i].imshow(Z, extent=extent, interpolation="nearest", cmap=cmaps[i//2]) if i % 2: cbar = grid.cbar_axes[i//2].colorbar(im) for cax in grid.cbar_axes: cax.toggle_label(True) cax.axis[cax.orientation].set_label("Bar") # This affects all axes as share_all = True. grid.axes_llc.set_xticks([-2, 0, 2]) grid.axes_llc.set_yticks([-2, 0, 2]) def demo_right_cbar(fig): """ A grid of 2x2 images. Each row has its own colorbar. """ grid = AxesGrid(F, 122, # similar to subplot(122) nrows_ncols = (2, 2), axes_pad = 0.10, label_mode = "1", share_all = True, cbar_location="right", cbar_mode="edge", cbar_size="7%", cbar_pad="2%", ) Z, extent = get_demo_image() cmaps = [plt.get_cmap("spring"), plt.get_cmap("winter")] for i in range(4): im = grid[i].imshow(Z, extent=extent, interpolation="nearest", cmap=cmaps[i//2]) if i % 2: grid.cbar_axes[i//2].colorbar(im) for cax in grid.cbar_axes: cax.toggle_label(True) cax.axis[cax.orientation].set_label('Foo') # This affects all axes because we set share_all = True. grid.axes_llc.set_xticks([-2, 0, 2]) grid.axes_llc.set_yticks([-2, 0, 2]) if 1: F = plt.figure(1, (5.5, 2.5)) F.subplots_adjust(left=0.05, right=0.93) demo_bottom_cbar(F) demo_right_cbar(F) plt.draw() plt.show()
unknown
codeparrot/codeparrot-clean
#ifndef NPY_SIMD #error "Not a standalone header" #endif #ifndef _NPY_SIMD_AVX512_ARITHMETIC_H #define _NPY_SIMD_AVX512_ARITHMETIC_H #include "../avx2/utils.h" #include "../sse/utils.h" /*************************** * Addition ***************************/ // non-saturated #ifdef NPY_HAVE_AVX512BW #define npyv_add_u8 _mm512_add_epi8 #define npyv_add_u16 _mm512_add_epi16 #else NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_add_u8, _mm256_add_epi8) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_add_u16, _mm256_add_epi16) #endif #define npyv_add_s8 npyv_add_u8 #define npyv_add_s16 npyv_add_u16 #define npyv_add_u32 _mm512_add_epi32 #define npyv_add_s32 _mm512_add_epi32 #define npyv_add_u64 _mm512_add_epi64 #define npyv_add_s64 _mm512_add_epi64 #define npyv_add_f32 _mm512_add_ps #define npyv_add_f64 _mm512_add_pd // saturated #ifdef NPY_HAVE_AVX512BW #define npyv_adds_u8 _mm512_adds_epu8 #define npyv_adds_s8 _mm512_adds_epi8 #define npyv_adds_u16 _mm512_adds_epu16 #define npyv_adds_s16 _mm512_adds_epi16 #else NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_adds_u8, _mm256_adds_epu8) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_adds_s8, _mm256_adds_epi8) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_adds_u16, _mm256_adds_epu16) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_adds_s16, _mm256_adds_epi16) #endif // TODO: rest, after implement Packs intrins /*************************** * Subtraction ***************************/ // non-saturated #ifdef NPY_HAVE_AVX512BW #define npyv_sub_u8 _mm512_sub_epi8 #define npyv_sub_u16 _mm512_sub_epi16 #else NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_sub_u8, _mm256_sub_epi8) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_sub_u16, _mm256_sub_epi16) #endif #define npyv_sub_s8 npyv_sub_u8 #define npyv_sub_s16 npyv_sub_u16 #define npyv_sub_u32 _mm512_sub_epi32 #define npyv_sub_s32 _mm512_sub_epi32 #define npyv_sub_u64 _mm512_sub_epi64 #define npyv_sub_s64 _mm512_sub_epi64 #define npyv_sub_f32 _mm512_sub_ps #define npyv_sub_f64 _mm512_sub_pd // saturated #ifdef NPY_HAVE_AVX512BW #define npyv_subs_u8 _mm512_subs_epu8 #define npyv_subs_s8 _mm512_subs_epi8 #define npyv_subs_u16 _mm512_subs_epu16 #define npyv_subs_s16 _mm512_subs_epi16 #else NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_subs_u8, _mm256_subs_epu8) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_subs_s8, _mm256_subs_epi8) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_subs_u16, _mm256_subs_epu16) NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_subs_s16, _mm256_subs_epi16) #endif // TODO: rest, after implement Packs intrins /*************************** * Multiplication ***************************/ // non-saturated #ifdef NPY_HAVE_AVX512BW NPY_FINLINE __m512i npyv_mul_u8(__m512i a, __m512i b) { __m512i even = _mm512_mullo_epi16(a, b); __m512i odd = _mm512_mullo_epi16(_mm512_srai_epi16(a, 8), _mm512_srai_epi16(b, 8)); odd = _mm512_slli_epi16(odd, 8); return _mm512_mask_blend_epi8(0xAAAAAAAAAAAAAAAA, even, odd); } #else NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_mul_u8, npyv256_mul_u8) #endif #ifdef NPY_HAVE_AVX512BW #define npyv_mul_u16 _mm512_mullo_epi16 #else NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_mul_u16, _mm256_mullo_epi16) #endif #define npyv_mul_s8 npyv_mul_u8 #define npyv_mul_s16 npyv_mul_u16 #define npyv_mul_u32 _mm512_mullo_epi32 #define npyv_mul_s32 _mm512_mullo_epi32 #define npyv_mul_f32 _mm512_mul_ps #define npyv_mul_f64 _mm512_mul_pd // saturated // TODO: after implement Packs intrins /*************************** * Integer Division ***************************/ // See simd/intdiv.h for more clarification // divide each unsigned 8-bit element by divisor NPY_FINLINE npyv_u8 npyv_divc_u8(npyv_u8 a, const npyv_u8x3 divisor) { const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); const __m128i shf2 = _mm512_castsi512_si128(divisor.val[2]); #ifdef NPY_HAVE_AVX512BW const __m512i bmask = _mm512_set1_epi32(0x00FF00FF); const __m512i shf1b = _mm512_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf1)); const __m512i shf2b = _mm512_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf2)); // high part of unsigned multiplication __m512i mulhi_even = _mm512_mullo_epi16(_mm512_and_si512(a, bmask), divisor.val[0]); mulhi_even = _mm512_srli_epi16(mulhi_even, 8); __m512i mulhi_odd = _mm512_mullo_epi16(_mm512_srli_epi16(a, 8), divisor.val[0]); __m512i mulhi = _mm512_mask_mov_epi8(mulhi_even, 0xAAAAAAAAAAAAAAAA, mulhi_odd); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m512i q = _mm512_sub_epi8(a, mulhi); q = _mm512_and_si512(_mm512_srl_epi16(q, shf1), shf1b); q = _mm512_add_epi8(mulhi, q); q = _mm512_and_si512(_mm512_srl_epi16(q, shf2), shf2b); return q; #else const __m256i bmask = _mm256_set1_epi32(0x00FF00FF); const __m256i shf1b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf1)); const __m256i shf2b = _mm256_set1_epi8(0xFFU >> _mm_cvtsi128_si32(shf2)); const __m512i shf2bw= npyv512_combine_si256(shf2b, shf2b); const __m256i mulc = npyv512_lower_si256(divisor.val[0]); //// lower 256-bit __m256i lo_a = npyv512_lower_si256(a); // high part of unsigned multiplication __m256i mulhi_even = _mm256_mullo_epi16(_mm256_and_si256(lo_a, bmask), mulc); mulhi_even = _mm256_srli_epi16(mulhi_even, 8); __m256i mulhi_odd = _mm256_mullo_epi16(_mm256_srli_epi16(lo_a, 8), mulc); __m256i mulhi = _mm256_blendv_epi8(mulhi_odd, mulhi_even, bmask); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m256i lo_q = _mm256_sub_epi8(lo_a, mulhi); lo_q = _mm256_and_si256(_mm256_srl_epi16(lo_q, shf1), shf1b); lo_q = _mm256_add_epi8(mulhi, lo_q); lo_q = _mm256_srl_epi16(lo_q, shf2); // no sign extend //// higher 256-bit __m256i hi_a = npyv512_higher_si256(a); // high part of unsigned multiplication mulhi_even = _mm256_mullo_epi16(_mm256_and_si256(hi_a, bmask), mulc); mulhi_even = _mm256_srli_epi16(mulhi_even, 8); mulhi_odd = _mm256_mullo_epi16(_mm256_srli_epi16(hi_a, 8), mulc); mulhi = _mm256_blendv_epi8(mulhi_odd, mulhi_even, bmask); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m256i hi_q = _mm256_sub_epi8(hi_a, mulhi); hi_q = _mm256_and_si256(_mm256_srl_epi16(hi_q, shf1), shf1b); hi_q = _mm256_add_epi8(mulhi, hi_q); hi_q = _mm256_srl_epi16(hi_q, shf2); // no sign extend return _mm512_and_si512(npyv512_combine_si256(lo_q, hi_q), shf2bw); // extend sign #endif } // divide each signed 8-bit element by divisor (round towards zero) NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor); NPY_FINLINE npyv_s8 npyv_divc_s8(npyv_s8 a, const npyv_s8x3 divisor) { __m512i divc_even = npyv_divc_s16(npyv_shri_s16(npyv_shli_s16(a, 8), 8), divisor); __m512i divc_odd = npyv_divc_s16(npyv_shri_s16(a, 8), divisor); divc_odd = npyv_shli_s16(divc_odd, 8); #ifdef NPY_HAVE_AVX512BW return _mm512_mask_mov_epi8(divc_even, 0xAAAAAAAAAAAAAAAA, divc_odd); #else const __m512i bmask = _mm512_set1_epi32(0x00FF00FF); return npyv_select_u8(bmask, divc_even, divc_odd); #endif } // divide each unsigned 16-bit element by divisor NPY_FINLINE npyv_u16 npyv_divc_u16(npyv_u16 a, const npyv_u16x3 divisor) { const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); const __m128i shf2 = _mm512_castsi512_si128(divisor.val[2]); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 #define NPYV__DIVC_U16(RLEN, A, MULC, R) \ mulhi = _mm##RLEN##_mulhi_epu16(A, MULC); \ R = _mm##RLEN##_sub_epi16(A, mulhi); \ R = _mm##RLEN##_srl_epi16(R, shf1); \ R = _mm##RLEN##_add_epi16(mulhi, R); \ R = _mm##RLEN##_srl_epi16(R, shf2); #ifdef NPY_HAVE_AVX512BW __m512i mulhi, q; NPYV__DIVC_U16(512, a, divisor.val[0], q) return q; #else const __m256i m = npyv512_lower_si256(divisor.val[0]); __m256i lo_a = npyv512_lower_si256(a); __m256i hi_a = npyv512_higher_si256(a); __m256i mulhi, lo_q, hi_q; NPYV__DIVC_U16(256, lo_a, m, lo_q) NPYV__DIVC_U16(256, hi_a, m, hi_q) return npyv512_combine_si256(lo_q, hi_q); #endif #undef NPYV__DIVC_U16 } // divide each signed 16-bit element by divisor (round towards zero) NPY_FINLINE npyv_s16 npyv_divc_s16(npyv_s16 a, const npyv_s16x3 divisor) { const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); // q = ((a + mulhi) >> sh1) - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign #define NPYV__DIVC_S16(RLEN, A, MULC, DSIGN, R) \ mulhi = _mm##RLEN##_mulhi_epi16(A, MULC); \ R = _mm##RLEN##_sra_epi16(_mm##RLEN##_add_epi16(A, mulhi), shf1); \ R = _mm##RLEN##_sub_epi16(R, _mm##RLEN##_srai_epi16(A, 15)); \ R = _mm##RLEN##_sub_epi16(_mm##RLEN##_xor_si##RLEN(R, DSIGN), DSIGN); #ifdef NPY_HAVE_AVX512BW __m512i mulhi, q; NPYV__DIVC_S16(512, a, divisor.val[0], divisor.val[2], q) return q; #else const __m256i m = npyv512_lower_si256(divisor.val[0]); const __m256i dsign = npyv512_lower_si256(divisor.val[2]); __m256i lo_a = npyv512_lower_si256(a); __m256i hi_a = npyv512_higher_si256(a); __m256i mulhi, lo_q, hi_q; NPYV__DIVC_S16(256, lo_a, m, dsign, lo_q) NPYV__DIVC_S16(256, hi_a, m, dsign, hi_q) return npyv512_combine_si256(lo_q, hi_q); #endif #undef NPYV__DIVC_S16 } // divide each unsigned 32-bit element by divisor NPY_FINLINE npyv_u32 npyv_divc_u32(npyv_u32 a, const npyv_u32x3 divisor) { const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); const __m128i shf2 = _mm512_castsi512_si128(divisor.val[2]); // high part of unsigned multiplication __m512i mulhi_even = _mm512_srli_epi64(_mm512_mul_epu32(a, divisor.val[0]), 32); __m512i mulhi_odd = _mm512_mul_epu32(_mm512_srli_epi64(a, 32), divisor.val[0]); __m512i mulhi = _mm512_mask_mov_epi32(mulhi_even, 0xAAAA, mulhi_odd); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m512i q = _mm512_sub_epi32(a, mulhi); q = _mm512_srl_epi32(q, shf1); q = _mm512_add_epi32(mulhi, q); q = _mm512_srl_epi32(q, shf2); return q; } // divide each signed 32-bit element by divisor (round towards zero) NPY_FINLINE npyv_s32 npyv_divc_s32(npyv_s32 a, const npyv_s32x3 divisor) { const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); // high part of signed multiplication __m512i mulhi_even = _mm512_srli_epi64(_mm512_mul_epi32(a, divisor.val[0]), 32); __m512i mulhi_odd = _mm512_mul_epi32(_mm512_srli_epi64(a, 32), divisor.val[0]); __m512i mulhi = _mm512_mask_mov_epi32(mulhi_even, 0xAAAA, mulhi_odd); // q = ((a + mulhi) >> sh1) - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign __m512i q = _mm512_sra_epi32(_mm512_add_epi32(a, mulhi), shf1); q = _mm512_sub_epi32(q, _mm512_srai_epi32(a, 31)); q = _mm512_sub_epi32(_mm512_xor_si512(q, divisor.val[2]), divisor.val[2]); return q; } // returns the high 64 bits of unsigned 64-bit multiplication // xref https://stackoverflow.com/a/28827013 NPY_FINLINE npyv_u64 npyv__mullhi_u64(npyv_u64 a, npyv_u64 b) { __m512i lomask = npyv_setall_s64(0xffffffff); __m512i a_hi = _mm512_srli_epi64(a, 32); // a0l, a0h, a1l, a1h __m512i b_hi = _mm512_srli_epi64(b, 32); // b0l, b0h, b1l, b1h // compute partial products __m512i w0 = _mm512_mul_epu32(a, b); // a0l*b0l, a1l*b1l __m512i w1 = _mm512_mul_epu32(a, b_hi); // a0l*b0h, a1l*b1h __m512i w2 = _mm512_mul_epu32(a_hi, b); // a0h*b0l, a1h*b0l __m512i w3 = _mm512_mul_epu32(a_hi, b_hi); // a0h*b0h, a1h*b1h // sum partial products __m512i w0h = _mm512_srli_epi64(w0, 32); __m512i s1 = _mm512_add_epi64(w1, w0h); __m512i s1l = _mm512_and_si512(s1, lomask); __m512i s1h = _mm512_srli_epi64(s1, 32); __m512i s2 = _mm512_add_epi64(w2, s1l); __m512i s2h = _mm512_srli_epi64(s2, 32); __m512i hi = _mm512_add_epi64(w3, s1h); hi = _mm512_add_epi64(hi, s2h); return hi; } // divide each unsigned 64-bit element by a divisor NPY_FINLINE npyv_u64 npyv_divc_u64(npyv_u64 a, const npyv_u64x3 divisor) { const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); const __m128i shf2 = _mm512_castsi512_si128(divisor.val[2]); // high part of unsigned multiplication __m512i mulhi = npyv__mullhi_u64(a, divisor.val[0]); // floor(a/d) = (mulhi + ((a-mulhi) >> sh1)) >> sh2 __m512i q = _mm512_sub_epi64(a, mulhi); q = _mm512_srl_epi64(q, shf1); q = _mm512_add_epi64(mulhi, q); q = _mm512_srl_epi64(q, shf2); return q; } // divide each unsigned 64-bit element by a divisor (round towards zero) NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor) { const __m128i shf1 = _mm512_castsi512_si128(divisor.val[1]); // high part of unsigned multiplication __m512i mulhi = npyv__mullhi_u64(a, divisor.val[0]); // convert unsigned to signed high multiplication // mulhi - ((a < 0) ? m : 0) - ((m < 0) ? a : 0); __m512i asign = _mm512_srai_epi64(a, 63); __m512i msign = _mm512_srai_epi64(divisor.val[0], 63); __m512i m_asign = _mm512_and_si512(divisor.val[0], asign); __m512i a_msign = _mm512_and_si512(a, msign); mulhi = _mm512_sub_epi64(mulhi, m_asign); mulhi = _mm512_sub_epi64(mulhi, a_msign); // q = ((a + mulhi) >> sh1) - XSIGN(a) // trunc(a/d) = (q ^ dsign) - dsign __m512i q = _mm512_sra_epi64(_mm512_add_epi64(a, mulhi), shf1); q = _mm512_sub_epi64(q, asign); q = _mm512_sub_epi64(_mm512_xor_si512(q, divisor.val[2]), divisor.val[2]); return q; } /*************************** * Division ***************************/ // TODO: emulate integer division #define npyv_div_f32 _mm512_div_ps #define npyv_div_f64 _mm512_div_pd /*************************** * FUSED ***************************/ // multiply and add, a*b + c #define npyv_muladd_f32 _mm512_fmadd_ps #define npyv_muladd_f64 _mm512_fmadd_pd // multiply and subtract, a*b - c #define npyv_mulsub_f32 _mm512_fmsub_ps #define npyv_mulsub_f64 _mm512_fmsub_pd // negate multiply and add, -(a*b) + c #define npyv_nmuladd_f32 _mm512_fnmadd_ps #define npyv_nmuladd_f64 _mm512_fnmadd_pd // negate multiply and subtract, -(a*b) - c #define npyv_nmulsub_f32 _mm512_fnmsub_ps #define npyv_nmulsub_f64 _mm512_fnmsub_pd // multiply, add for odd elements and subtract even elements. // (a * b) -+ c #define npyv_muladdsub_f32 _mm512_fmaddsub_ps #define npyv_muladdsub_f64 _mm512_fmaddsub_pd /*************************** * Summation: Calculates the sum of all vector elements. * there are three ways to implement reduce sum for AVX512: * 1- split(256) /add /split(128) /add /hadd /hadd /extract * 2- shuff(cross) /add /shuff(cross) /add /shuff /add /shuff /add /extract * 3- _mm512_reduce_add_ps/pd * The first one is been widely used by many projects * * the second one is used by Intel Compiler, maybe because the * latency of hadd increased by (2-3) starting from Skylake-X which makes two * extra shuffles(non-cross) cheaper. check https://godbolt.org/z/s3G9Er for more info. * * The third one is almost the same as the second one but only works for * intel compiler/GCC 7.1/Clang 4, we still need to support older GCC. ***************************/ // reduce sum across vector #ifdef NPY_HAVE_AVX512F_REDUCE #define npyv_sum_u32 _mm512_reduce_add_epi32 #define npyv_sum_u64 _mm512_reduce_add_epi64 #define npyv_sum_f32 _mm512_reduce_add_ps #define npyv_sum_f64 _mm512_reduce_add_pd #else NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a) { __m256i half = _mm256_add_epi32(npyv512_lower_si256(a), npyv512_higher_si256(a)); __m128i quarter = _mm_add_epi32(_mm256_castsi256_si128(half), _mm256_extracti128_si256(half, 1)); quarter = _mm_hadd_epi32(quarter, quarter); return _mm_cvtsi128_si32(_mm_hadd_epi32(quarter, quarter)); } NPY_FINLINE npy_uint64 npyv_sum_u64(npyv_u64 a) { __m256i four = _mm256_add_epi64(npyv512_lower_si256(a), npyv512_higher_si256(a)); __m256i two = _mm256_add_epi64(four, _mm256_shuffle_epi32(four, _MM_SHUFFLE(1, 0, 3, 2))); __m128i one = _mm_add_epi64(_mm256_castsi256_si128(two), _mm256_extracti128_si256(two, 1)); return (npy_uint64)npyv128_cvtsi128_si64(one); } NPY_FINLINE float npyv_sum_f32(npyv_f32 a) { __m512 h64 = _mm512_shuffle_f32x4(a, a, _MM_SHUFFLE(3, 2, 3, 2)); __m512 sum32 = _mm512_add_ps(a, h64); __m512 h32 = _mm512_shuffle_f32x4(sum32, sum32, _MM_SHUFFLE(1, 0, 3, 2)); __m512 sum16 = _mm512_add_ps(sum32, h32); __m512 h16 = _mm512_permute_ps(sum16, _MM_SHUFFLE(1, 0, 3, 2)); __m512 sum8 = _mm512_add_ps(sum16, h16); __m512 h4 = _mm512_permute_ps(sum8, _MM_SHUFFLE(2, 3, 0, 1)); __m512 sum4 = _mm512_add_ps(sum8, h4); return _mm_cvtss_f32(_mm512_castps512_ps128(sum4)); } NPY_FINLINE double npyv_sum_f64(npyv_f64 a) { __m512d h64 = _mm512_shuffle_f64x2(a, a, _MM_SHUFFLE(3, 2, 3, 2)); __m512d sum32 = _mm512_add_pd(a, h64); __m512d h32 = _mm512_permutex_pd(sum32, _MM_SHUFFLE(1, 0, 3, 2)); __m512d sum16 = _mm512_add_pd(sum32, h32); __m512d h16 = _mm512_permute_pd(sum16, _MM_SHUFFLE(2, 3, 0, 1)); __m512d sum8 = _mm512_add_pd(sum16, h16); return _mm_cvtsd_f64(_mm512_castpd512_pd128(sum8)); } #endif // expand the source vector and performs sum reduce NPY_FINLINE npy_uint16 npyv_sumup_u8(npyv_u8 a) { #ifdef NPY_HAVE_AVX512BW __m512i eight = _mm512_sad_epu8(a, _mm512_setzero_si512()); __m256i four = _mm256_add_epi16(npyv512_lower_si256(eight), npyv512_higher_si256(eight)); #else __m256i lo_four = _mm256_sad_epu8(npyv512_lower_si256(a), _mm256_setzero_si256()); __m256i hi_four = _mm256_sad_epu8(npyv512_higher_si256(a), _mm256_setzero_si256()); __m256i four = _mm256_add_epi16(lo_four, hi_four); #endif __m128i two = _mm_add_epi16(_mm256_castsi256_si128(four), _mm256_extracti128_si256(four, 1)); __m128i one = _mm_add_epi16(two, _mm_unpackhi_epi64(two, two)); return (npy_uint16)_mm_cvtsi128_si32(one); } NPY_FINLINE npy_uint32 npyv_sumup_u16(npyv_u16 a) { const npyv_u16 even_mask = _mm512_set1_epi32(0x0000FFFF); __m512i even = _mm512_and_si512(a, even_mask); __m512i odd = _mm512_srli_epi32(a, 16); __m512i ff = _mm512_add_epi32(even, odd); return npyv_sum_u32(ff); } #endif // _NPY_SIMD_AVX512_ARITHMETIC_H
c
github
https://github.com/numpy/numpy
numpy/_core/src/common/simd/avx512/arithmetic.h
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( parse_iso8601, str_to_int, ) class CrackedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?cracked\.com/video_(?P<id>\d+)_[\da-z-]+\.html' _TESTS = [{ 'url': 'http://www.cracked.com/video_19070_if-animal-actors-got-e21-true-hollywood-stories.html', 'md5': '89b90b9824e3806ca95072c4d78f13f7', 'info_dict': { 'id': '19070', 'ext': 'mp4', 'title': 'If Animal Actors Got E! True Hollywood Stories', 'timestamp': 1404954000, 'upload_date': '20140710', } }, { # youtube embed 'url': 'http://www.cracked.com/video_19006_4-plot-holes-you-didnt-notice-in-your-favorite-movies.html', 'md5': 'ccd52866b50bde63a6ef3b35016ba8c7', 'info_dict': { 'id': 'EjI00A3rZD0', 'ext': 'mp4', 'title': "4 Plot Holes You Didn't Notice in Your Favorite Movies - The Spit Take", 'description': 'md5:c603708c718b796fe6079e2b3351ffc7', 'upload_date': '20140725', 'uploader_id': 'Cracked', 'uploader': 'Cracked', } }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) youtube_url = self._search_regex( r'<iframe[^>]+src="((?:https?:)?//www\.youtube\.com/embed/[^"]+)"', webpage, 'youtube url', default=None) if youtube_url: return self.url_result(youtube_url, 'Youtube') video_url = self._html_search_regex( [r'var\s+CK_vidSrc\s*=\s*"([^"]+)"', r'<video\s+src="([^"]+)"'], webpage, 'video URL') title = self._search_regex( [r'property="?og:title"?\s+content="([^"]+)"', r'class="?title"?>([^<]+)'], webpage, 'title') description = self._search_regex( r'name="?(?:og:)?description"?\s+content="([^"]+)"', webpage, 'description', default=None) timestamp = self._html_search_regex( r'"date"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False) if timestamp: timestamp = parse_iso8601(timestamp[:-6]) view_count = str_to_int(self._html_search_regex( r'<span\s+class="?views"? id="?viewCounts"?>([\d,\.]+) Views</span>', webpage, 'view count', fatal=False)) comment_count = str_to_int(self._html_search_regex( r'<span\s+id="?commentCounts"?>([\d,\.]+)</span>', webpage, 'comment count', fatal=False)) m = re.search(r'_(?P<width>\d+)X(?P<height>\d+)\.mp4$', video_url) if m: width = int(m.group('width')) height = int(m.group('height')) else: width = height = None return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'timestamp': timestamp, 'view_count': view_count, 'comment_count': comment_count, 'height': height, 'width': width, }
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from psycopg2 import OperationalError from openerp import SUPERUSER_ID from openerp.osv import fields, osv import openerp.addons.decimal_precision as dp from openerp.tools.translate import _ import openerp PROCUREMENT_PRIORITIES = [('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')] class procurement_group(osv.osv): ''' The procurement group class is used to group products together when computing procurements. (tasks, physical products, ...) The goal is that when you have one sale order of several products and the products are pulled from the same or several location(s), to keep having the moves grouped into pickings that represent the sale order. Used in: sales order (to group delivery order lines like the so), pull/push rules (to pack like the delivery order), on orderpoints (e.g. for wave picking all the similar products together). Grouping is made only if the source and the destination is the same. Suppose you have 4 lines on a picking from Output where 2 lines will need to come from Input (crossdock) and 2 lines coming from Stock -> Output As the four procurement orders will have the same group ids from the SO, the move from input will have a stock.picking with 2 grouped lines and the move from stock will have 2 grouped lines also. The name is usually the name of the original document (sale order) or a sequence computed if created manually. ''' _name = 'procurement.group' _description = 'Procurement Requisition' _order = "id desc" _columns = { 'name': fields.char('Reference', required=True), 'move_type': fields.selection([ ('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True), 'procurement_ids': fields.one2many('procurement.order', 'group_id', 'Procurements'), } _defaults = { 'name': lambda self, cr, uid, c: self.pool.get('ir.sequence').get(cr, uid, 'procurement.group') or '', 'move_type': lambda self, cr, uid, c: 'direct' } class procurement_rule(osv.osv): ''' A rule describe what a procurement should do; produce, buy, move, ... ''' _name = 'procurement.rule' _description = "Procurement Rule" _order = "name" def _get_action(self, cr, uid, context=None): return [] _columns = { 'name': fields.char('Name', required=True, help="This field will fill the packing origin and the name of its moves"), 'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."), 'group_propagation_option': fields.selection([('none', 'Leave Empty'), ('propagate', 'Propagate'), ('fixed', 'Fixed')], string="Propagation of Procurement Group"), 'group_id': fields.many2one('procurement.group', 'Fixed Procurement Group'), 'action': fields.selection(selection=lambda s, cr, uid, context=None: s._get_action(cr, uid, context=context), string='Action', required=True), 'sequence': fields.integer('Sequence'), 'company_id': fields.many2one('res.company', 'Company'), } _defaults = { 'group_propagation_option': 'propagate', 'sequence': 20, 'active': True, } class procurement_order(osv.osv): """ Procurement Orders """ _name = "procurement.order" _description = "Procurement" _order = 'priority desc, date_planned, id asc' _inherit = ['mail.thread'] _log_create = False _columns = { 'name': fields.text('Description', required=True), 'origin': fields.char('Source Document', help="Reference of the document that created this Procurement.\n" "This is automatically completed by Odoo."), 'company_id': fields.many2one('res.company', 'Company', required=True), # These two fields are used for shceduling 'priority': fields.selection(PROCUREMENT_PRIORITIES, 'Priority', required=True, select=True, track_visibility='onchange'), 'date_planned': fields.datetime('Scheduled Date', required=True, select=True, track_visibility='onchange'), 'group_id': fields.many2one('procurement.group', 'Procurement Group'), 'rule_id': fields.many2one('procurement.rule', 'Rule', track_visibility='onchange', help="Chosen rule for the procurement resolution. Usually chosen by the system but can be manually set by the procurement manager to force an unusual behavior."), 'product_id': fields.many2one('product.product', 'Product', required=True, states={'confirmed': [('readonly', False)]}, readonly=True), 'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'confirmed': [('readonly', False)]}, readonly=True), 'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, states={'confirmed': [('readonly', False)]}, readonly=True), 'product_uos_qty': fields.float('UoS Quantity', states={'confirmed': [('readonly', False)]}, readonly=True), 'product_uos': fields.many2one('product.uom', 'Product UoS', states={'confirmed': [('readonly', False)]}, readonly=True), 'state': fields.selection([ ('cancel', 'Cancelled'), ('confirmed', 'Confirmed'), ('exception', 'Exception'), ('running', 'Running'), ('done', 'Done') ], 'Status', required=True, track_visibility='onchange', copy=False), } _defaults = { 'state': 'confirmed', 'priority': '1', 'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c) } def unlink(self, cr, uid, ids, context=None): procurements = self.read(cr, uid, ids, ['state'], context=context) unlink_ids = [] for s in procurements: if s['state'] == 'cancel': unlink_ids.append(s['id']) else: raise osv.except_osv(_('Invalid Action!'), _('Cannot delete Procurement Order(s) which are in %s state.') % s['state']) return osv.osv.unlink(self, cr, uid, unlink_ids, context=context) def do_view_procurements(self, cr, uid, ids, context=None): ''' This function returns an action that display existing procurement orders of same procurement group of given ids. ''' act_obj = self.pool.get('ir.actions.act_window') action_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'procurement.do_view_procurements', raise_if_not_found=True) result = act_obj.read(cr, uid, [action_id], context=context)[0] group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id]) result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]" return result def onchange_product_id(self, cr, uid, ids, product_id, context=None): """ Finds UoM and UoS of changed product. @param product_id: Changed id of product. @return: Dictionary of values. """ if product_id: w = self.pool.get('product.product').browse(cr, uid, product_id, context=context) v = { 'product_uom': w.uom_id.id, 'product_uos': w.uos_id and w.uos_id.id or w.uom_id.id } return {'value': v} return {} def get_cancel_ids(self, cr, uid, ids, context=None): return [proc.id for proc in self.browse(cr, uid, ids, context=context) if proc.state != 'done'] def cancel(self, cr, uid, ids, context=None): #cancel only the procurements that aren't done already to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context) if to_cancel_ids: return self.write(cr, uid, to_cancel_ids, {'state': 'cancel'}, context=context) def reset_to_confirmed(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {'state': 'confirmed'}, context=context) def run(self, cr, uid, ids, autocommit=False, context=None): for procurement_id in ids: #we intentionnaly do the browse under the for loop to avoid caching all ids which would be resource greedy #and useless as we'll make a refresh later that will invalidate all the cache (and thus the next iteration #will fetch all the ids again) procurement = self.browse(cr, uid, procurement_id, context=context) if procurement.state not in ("running", "done"): try: if self._assign(cr, uid, procurement, context=context): res = self._run(cr, uid, procurement, context=context or {}) if res: self.write(cr, uid, [procurement.id], {'state': 'running'}, context=context) else: self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context) else: self.message_post(cr, uid, [procurement.id], body=_('No rule matching this procurement'), context=context) self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context) if autocommit: cr.commit() except OperationalError: if autocommit: cr.rollback() continue else: raise return True def check(self, cr, uid, ids, autocommit=False, context=None): done_ids = [] for procurement in self.browse(cr, uid, ids, context=context): try: result = self._check(cr, uid, procurement, context=context) if result: done_ids.append(procurement.id) if autocommit: cr.commit() except OperationalError: if autocommit: cr.rollback() continue else: raise if done_ids: self.write(cr, uid, done_ids, {'state': 'done'}, context=context) return done_ids # # Method to overwrite in different procurement modules # def _find_suitable_rule(self, cr, uid, procurement, context=None): '''This method returns a procurement.rule that depicts what to do with the given procurement in order to complete its needs. It returns False if no suiting rule is found. :param procurement: browse record :rtype: int or False ''' return False def _assign(self, cr, uid, procurement, context=None): '''This method check what to do with the given procurement in order to complete its needs. It returns False if no solution is found, otherwise it stores the matching rule (if any) and returns True. :param procurement: browse record :rtype: boolean ''' #if the procurement already has a rule assigned, we keep it (it has a higher priority as it may have been chosen manually) if procurement.rule_id: return True elif procurement.product_id.type != 'service': rule_id = self._find_suitable_rule(cr, uid, procurement, context=context) if rule_id: self.write(cr, uid, [procurement.id], {'rule_id': rule_id}, context=context) return True return False def _run(self, cr, uid, procurement, context=None): '''This method implements the resolution of the given procurement :param procurement: browse record :returns: True if the resolution of the procurement was a success, False otherwise to set it in exception ''' return True def _check(self, cr, uid, procurement, context=None): '''Returns True if the given procurement is fulfilled, False otherwise :param procurement: browse record :rtype: boolean ''' return False # # Scheduler # def run_scheduler(self, cr, uid, use_new_cursor=False, company_id = False, context=None): ''' Call the scheduler to check the procurement order. This is intented to be done for all existing companies at the same time, so we're running all the methods as SUPERUSER to avoid intercompany and access rights issues. @param self: The object pointer @param cr: The current row, from the database cursor, @param uid: The current user ID for security checks @param ids: List of selected IDs @param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement. This is appropriate for batch jobs only. @param context: A standard dictionary for contextual values @return: Dictionary of values ''' if context is None: context = {} try: if use_new_cursor: cr = openerp.registry(cr.dbname).cursor() # Run confirmed procurements dom = [('state', '=', 'confirmed')] if company_id: dom += [('company_id', '=', company_id)] prev_ids = [] while True: ids = self.search(cr, SUPERUSER_ID, dom, context=context) if not ids or prev_ids == ids: break else: prev_ids = ids self.run(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context) if use_new_cursor: cr.commit() # Check if running procurements are done offset = 0 dom = [('state', '=', 'running')] if company_id: dom += [('company_id', '=', company_id)] prev_ids = [] while True: ids = self.search(cr, SUPERUSER_ID, dom, offset=offset, context=context) if not ids or prev_ids == ids: break else: prev_ids = ids self.check(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context) if use_new_cursor: cr.commit() finally: if use_new_cursor: try: cr.close() except Exception: pass return {} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json from six.moves.urllib import parse as urllib from tempest.lib.common import rest_client class RolesClient(rest_client.RestClient): api_version = "v2.0" def create_role(self, **kwargs): """Create a role. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/identity/v2-ext/index.html#create-a-role """ post_body = json.dumps({'role': kwargs}) resp, body = self.post('OS-KSADM/roles', post_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def show_role(self, role_id_or_name): """Get a role by its id or name. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/identity/v2-ext/index.html#show-a-role OR https://developer.openstack.org/api-ref/identity/v2-ext/index.html#show-role-information-by-name """ resp, body = self.get('OS-KSADM/roles/%s' % role_id_or_name) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_roles(self, **params): """Returns roles. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/identity/v2-ext/index.html#list-all-roles """ url = 'OS-KSADM/roles' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def delete_role(self, role_id): """Delete a role. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/identity/v2-ext/index.html#delete-a-role """ resp, body = self.delete('OS-KSADM/roles/%s' % role_id) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body) def create_user_role_on_project(self, tenant_id, user_id, role_id): """Add roles to a user on a tenant. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/identity/v2-ext/index.html#grant-roles-to-user-on-tenant """ resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' % (tenant_id, user_id, role_id), "") self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def list_user_roles_on_project(self, tenant_id, user_id, **params): """Returns a list of roles assigned to a user for a tenant.""" # TODO(gmann): Need to write API-ref link, Bug# 1592711 url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id) if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body) def delete_role_from_user_on_project(self, tenant_id, user_id, role_id): """Removes a role assignment for a user on a tenant. For a full list of available parameters, please refer to the official API reference: https://developer.openstack.org/api-ref/identity/v2-ext/index.html#revoke-role-from-user-on-tenant """ resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' % (tenant_id, user_id, role_id)) self.expected_success(204, resp.status) return rest_client.ResponseBody(resp, body)
unknown
codeparrot/codeparrot-clean
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os import fcntl import errno import time import socket import llnl.util.tty as tty __all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction', 'LockError', 'LockTimeoutError', 'LockPermissionError', 'LockROFileError', 'CantCreateLockError'] # Default timeout in seconds, after which locks will raise exceptions. _default_timeout = 60 # Sleep time per iteration in spin loop (in seconds) _sleep_time = 1e-5 class Lock(object): """This is an implementation of a filesystem lock using Python's lockf. In Python, ``lockf`` actually calls ``fcntl``, so this should work with any filesystem implementation that supports locking through the fcntl calls. This includes distributed filesystems like Lustre (when flock is enabled) and recent NFS versions. """ def __init__(self, path, start=0, length=0, debug=False): """Construct a new lock on the file at ``path``. By default, the lock applies to the whole file. Optionally, caller can specify a byte range beginning ``start`` bytes from the start of the file and extending ``length`` bytes from there. This exposes a subset of fcntl locking functionality. It does not currently expose the ``whence`` parameter -- ``whence`` is always ``os.SEEK_SET`` and ``start`` is always evaluated from the beginning of the file. """ self.path = path self._file = None self._reads = 0 self._writes = 0 # byte range parameters self._start = start self._length = length # enable debug mode self.debug = debug # PID and host of lock holder (only used in debug mode) self.pid = self.old_pid = None self.host = self.old_host = None def _lock(self, op, timeout=_default_timeout): """This takes a lock using POSIX locks (``fcntl.lockf``). The lock is implemented as a spin lock using a nonblocking call to ``lockf()``. On acquiring an exclusive lock, the lock writes this process's pid and host to the lock file, in case the holding process needs to be killed later. If the lock times out, it raises a ``LockError``. """ assert op in (fcntl.LOCK_SH, fcntl.LOCK_EX) start_time = time.time() while (time.time() - start_time) < timeout: # Create file and parent directories if they don't exist. if self._file is None: parent = self._ensure_parent_directory() # Open writable files as 'r+' so we can upgrade to write later os_mode, fd_mode = (os.O_RDWR | os.O_CREAT), 'r+' if os.path.exists(self.path): if not os.access(self.path, os.W_OK): if op == fcntl.LOCK_SH: # can still lock read-only files if we open 'r' os_mode, fd_mode = os.O_RDONLY, 'r' else: raise LockROFileError(self.path) elif not os.access(parent, os.W_OK): raise CantCreateLockError(self.path) fd = os.open(self.path, os_mode) self._file = os.fdopen(fd, fd_mode) elif op == fcntl.LOCK_EX and self._file.mode == 'r': # Attempt to upgrade to write lock w/a read-only file. # If the file were writable, we'd have opened it 'r+' raise LockROFileError(self.path) try: # Try to get the lock (will raise if not available.) fcntl.lockf(self._file, op | fcntl.LOCK_NB, self._length, self._start, os.SEEK_SET) # help for debugging distributed locking if self.debug: # All locks read the owner PID and host self._read_debug_data() # Exclusive locks write their PID/host if op == fcntl.LOCK_EX: self._write_debug_data() return except IOError as e: if e.errno in (errno.EAGAIN, errno.EACCES): # EAGAIN and EACCES == locked by another process pass else: raise time.sleep(_sleep_time) raise LockTimeoutError("Timed out waiting for lock.") def _ensure_parent_directory(self): parent = os.path.dirname(self.path) # relative paths to lockfiles in the current directory have no parent if not parent: return '.' try: os.makedirs(parent) except OSError as e: # makedirs can fail when diretory already exists. if not (e.errno == errno.EEXIST and os.path.isdir(parent) or e.errno == errno.EISDIR): raise return parent def _read_debug_data(self): """Read PID and host data out of the file if it is there.""" self.old_pid = self.pid self.old_host = self.host line = self._file.read() if line: pid, host = line.strip().split(',') _, _, self.pid = pid.rpartition('=') _, _, self.host = host.rpartition('=') self.pid = int(self.pid) def _write_debug_data(self): """Write PID and host data to the file, recording old values.""" self.old_pid = self.pid self.old_host = self.host self.pid = os.getpid() self.host = socket.getfqdn() # write pid, host to disk to sync over FS self._file.seek(0) self._file.write("pid=%s,host=%s" % (self.pid, self.host)) self._file.truncate() self._file.flush() os.fsync(self._file.fileno()) def _unlock(self): """Releases a lock using POSIX locks (``fcntl.lockf``) Releases the lock regardless of mode. Note that read locks may be masquerading as write locks, but this removes either. """ fcntl.lockf(self._file, fcntl.LOCK_UN, self._length, self._start, os.SEEK_SET) self._file.close() self._file = None def acquire_read(self, timeout=_default_timeout): """Acquires a recursive, shared lock for reading. Read and write locks can be acquired and released in arbitrary order, but the POSIX lock is held until all local read and write locks are released. Returns True if it is the first acquire and actually acquires the POSIX lock, False if it is a nested transaction. """ if self._reads == 0 and self._writes == 0: self._debug( 'READ LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]' .format(self)) self._lock(fcntl.LOCK_SH, timeout=timeout) # can raise LockError. self._debug( 'READ LOCK: {0.path}[{0._start}:{0._length}] [Acquired]' .format(self)) self._reads += 1 return True else: self._reads += 1 return False def acquire_write(self, timeout=_default_timeout): """Acquires a recursive, exclusive lock for writing. Read and write locks can be acquired and released in arbitrary order, but the POSIX lock is held until all local read and write locks are released. Returns True if it is the first acquire and actually acquires the POSIX lock, False if it is a nested transaction. """ if self._writes == 0: self._debug( 'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Acquiring]' .format(self)) self._lock(fcntl.LOCK_EX, timeout=timeout) # can raise LockError. self._debug( 'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Acquired]' .format(self)) self._writes += 1 return True else: self._writes += 1 return False def release_read(self): """Releases a read lock. Returns True if the last recursive lock was released, False if there are still outstanding locks. Does limited correctness checking: if a read lock is released when none are held, this will raise an assertion error. """ assert self._reads > 0 if self._reads == 1 and self._writes == 0: self._debug( 'READ LOCK: {0.path}[{0._start}:{0._length}] [Released]' .format(self)) self._unlock() # can raise LockError. self._reads -= 1 return True else: self._reads -= 1 return False def release_write(self): """Releases a write lock. Returns True if the last recursive lock was released, False if there are still outstanding locks. Does limited correctness checking: if a read lock is released when none are held, this will raise an assertion error. """ assert self._writes > 0 if self._writes == 1 and self._reads == 0: self._debug( 'WRITE LOCK: {0.path}[{0._start}:{0._length}] [Released]' .format(self)) self._unlock() # can raise LockError. self._writes -= 1 return True else: self._writes -= 1 return False def _debug(self, *args): tty.debug(*args) class LockTransaction(object): """Simple nested transaction context manager that uses a file lock. This class can trigger actions when the lock is acquired for the first time and released for the last. If the ``acquire_fn`` returns a value, it is used as the return value for ``__enter__``, allowing it to be passed as the ``as`` argument of a ``with`` statement. If ``acquire_fn`` returns a context manager, *its* ``__enter__`` function will be called in ``__enter__`` after ``acquire_fn``, and its ``__exit__`` funciton will be called before ``release_fn`` in ``__exit__``, allowing you to nest a context manager to be used along with the lock. Timeout for lock is customizable. """ def __init__(self, lock, acquire_fn=None, release_fn=None, timeout=_default_timeout): self._lock = lock self._timeout = timeout self._acquire_fn = acquire_fn self._release_fn = release_fn self._as = None def __enter__(self): if self._enter() and self._acquire_fn: self._as = self._acquire_fn() if hasattr(self._as, '__enter__'): return self._as.__enter__() else: return self._as def __exit__(self, type, value, traceback): suppress = False if self._exit(): if self._as and hasattr(self._as, '__exit__'): if self._as.__exit__(type, value, traceback): suppress = True if self._release_fn: if self._release_fn(type, value, traceback): suppress = True return suppress class ReadTransaction(LockTransaction): """LockTransaction context manager that does a read and releases it.""" def _enter(self): return self._lock.acquire_read(self._timeout) def _exit(self): return self._lock.release_read() class WriteTransaction(LockTransaction): """LockTransaction context manager that does a write and releases it.""" def _enter(self): return self._lock.acquire_write(self._timeout) def _exit(self): return self._lock.release_write() class LockError(Exception): """Raised for any errors related to locks.""" class LockTimeoutError(LockError): """Raised when an attempt to acquire a lock times out.""" class LockPermissionError(LockError): """Raised when there are permission issues with a lock.""" class LockROFileError(LockPermissionError): """Tried to take an exclusive lock on a read-only file.""" def __init__(self, path): msg = "Can't take write lock on read-only file: %s" % path super(LockROFileError, self).__init__(msg) class CantCreateLockError(LockPermissionError): """Attempt to create a lock in an unwritable location.""" def __init__(self, path): msg = "cannot create lock '%s': " % path msg += "file does not exist and location is not writable" super(LockError, self).__init__(msg)
unknown
codeparrot/codeparrot-clean
### Run tests Contributing includes testing your changes. If you change the Moby code, you may need to add a new test or modify an existing test. Your contribution could even be adding tests to Moby. For this reason, you need to know a little about Moby's test infrastructure. This section describes tests you can run in the `dry-run-test` branch of your Docker fork. If you have followed along in this guide, you already have this branch. If you don't have this branch, you can create it or simply use another branch. ## Understand how to test Moby Moby tests use the Go language's test framework. In this framework, files whose names end in `_test.go` contain test code; you'll find test files like this throughout the Moby repo. Use these files for inspiration when writing your own tests. For information on Go's test framework, see <a href="http://golang.org/pkg/testing/" target="_blank">Go's testing package documentation</a> and the <a href="http://golang.org/cmd/go/#hdr-Test_packages" target="_blank">go test help</a>. You are responsible for _unit testing_ your contribution when you add new or change existing Moby code. A unit test is a piece of code that invokes a single, small piece of code (_unit of work_) to verify the unit works as expected. Depending on your contribution, you may need to add _integration tests_. These are tests that combine two or more work units into one component. These work units each have unit tests and then, together, integration tests that test the interface between the components. The `integration` and `integration-cli` directories in the Docker repository contain integration test code. Note that `integration-cli` tests are now deprecated in the Moby project, and new tests cannot be added to this suite - add `integration` tests instead using the API client. Testing is its own specialty. If you aren't familiar with testing techniques, there is a lot of information available to you on the Web. For now, you should understand that, the Docker maintainers may ask you to write a new test or change an existing one. ## Run tests on your local host Before submitting a pull request with a code change, you should run the entire Moby Engine test suite. The `Makefile` contains a target for the entire test suite, named `test`. Also, it contains several targets for testing: | Target | What this target does | | ---------------------- | ---------------------------------------------- | | `test` | Run the unit, integration, and docker-py tests | | `test-unit` | Run just the unit tests | | `test-integration` | Run the integration tests | | `test-docker-py` | Run the tests for the Docker API client | Running the entire test suite on your current repository can take over half an hour. To run the test suite, do the following: 1. Open a terminal on your local host. 2. Change to the root of your Docker repository. ```bash $ cd moby-fork ``` 3. Make sure you are in your development branch. ```bash $ git checkout dry-run-test ``` 4. Run the `make test` command. ```bash $ make test ``` This command does several things, it creates a container temporarily for testing. Inside that container, the `make`: * creates a new binary * cross-compiles all the binaries for the various operating systems * runs all the tests in the system It can take approximately one hour to run all the tests. The time depends on your host performance. The default timeout is 60 minutes, which is defined in `hack/make.sh` (`${TIMEOUT:=60m}`). You can modify the timeout value on the basis of your host performance. When they complete successfully, you see the output concludes with something like this: ```none Ran 68 tests in 79.135s ``` ## Run targets inside a development container If you are working inside a development container, you use the `hack/test/unit` script to run unit-tests, and `hack/make.sh` script to run integration and other tests. The `hack/make.sh` script doesn't have a single target that runs all the tests. Instead, you provide a single command line with multiple targets that does the same thing. Try this now. 1. Open a terminal and change to the `moby-fork` root. 2. Start a Moby development image. If you are following along with this guide, you should have a `docker-dev:latest` image. ```bash $ docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/docker/docker docker-dev:latest /bin/bash ``` 3. Run the unit tests using the `hack/test/unit` script. ```bash # hack/test/unit ``` 4. Run the tests using the `hack/make.sh` script. ```bash # hack/make.sh dynbinary binary test-integration test-docker-py ``` The tests run just as they did within your local host. Of course, you can also run a subset of these targets too. For example, to run just the integration tests: ```bash # hack/make.sh dynbinary binary test-integration ``` Most test targets require that you build these precursor targets first: `dynbinary binary` ## Run unit tests We use golang standard [testing](https://golang.org/pkg/testing/) package or [gocheck](https://labix.org/gocheck) for our unit tests. You can use the `TESTDIRS` environment variable to run unit tests for a single package. ```bash $ TESTDIRS='github.com/docker/docker/opts' make test-unit ``` You can also use the `TESTFLAGS` environment variable to run a single test. The flag's value is passed as arguments to the `go test` command. For example, from your local host you can run the `TestValidateIPAddress` test with this command: ```bash $ TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit ``` On unit tests, it's better to use `TESTFLAGS` in combination with `TESTDIRS` to make it quicker to run a specific test. ```bash $ TESTDIRS='github.com/docker/docker/opts' TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit ``` ## Run integration tests We use [gocheck](https://labix.org/gocheck) for our integration-cli tests. You can use the `TESTFLAGS` environment variable to run a single test. The flag's value is passed as arguments to the `go test` command. For example, from your local host you can run the `TestDockerCLIBuildSuite` test with this command: ```bash $ TESTFLAGS='-test.run TestDockerCLIBuildSuite' make test-integration ``` To run the same test inside your Docker development container, you do this: ```bash # TESTFLAGS='-test.run TestDockerCLIBuildSuite' hack/make.sh binary test-integration ``` ## Test the Windows binary against a Linux daemon This explains how to test the Windows binary on a Windows machine set up as a development environment. The tests will be run against a daemon running on a remote Linux machine. You'll use **Git Bash** that came with the Git for Windows installation. **Git Bash**, just as it sounds, allows you to run a Bash terminal on Windows. 1. If you don't have one open already, start a Git Bash terminal. ![Git Bash](images/git_bash.png) 2. Change to the `moby` source directory. ```bash $ cd /c/gopath/src/github.com/docker/docker ``` 3. Set `DOCKER_REMOTE_DAEMON` as follows: ```bash $ export DOCKER_REMOTE_DAEMON=1 ``` 4. Set `DOCKER_TEST_HOST` to the `tcp://IP_ADDRESS:2376` value; substitute your Linux machine's actual IP address. For example: ```bash $ export DOCKER_TEST_HOST=tcp://213.124.23.200:2376 ``` 5. Make the binary and run the tests: ```bash $ hack/make.sh binary test-integration ``` Some tests are skipped on Windows for various reasons. You can see which tests were skipped by re-running the make and passing in the `TESTFLAGS='-test.v'` value. For example ```bash $ TESTFLAGS='-test.v' hack/make.sh binary test-integration ``` Should you wish to run a single test such as one with the name 'TestExample', you can pass in `TESTFLAGS='-test.run /TestExample'`. For example ```bash $ TESTFLAGS='-test.run /TestExample' hack/make.sh binary test-integration ``` You can now choose to make changes to the Moby source or the tests. If you make any changes, just run these commands again. ## [Public CI infrastructure](ci.docker.com/public) The current infrastructure is maintained here: [Moby ci job](https://ci.docker.com/public/job/moby). The Jenkins infrastructure for the Moby project is maintained and managed by Docker Inc. All contributions against the Jenkinsfile are appreciated and welcomed! However we might not be able to fully provide the infrastructure to test against various architectures in our CI pipelines. All jobs can be triggered and re-ran by the Moby maintainers ## Where to go next Congratulations, you have successfully completed the basics you need to understand the Moby test framework. In the next section you'll [learn how to debug Docker daemon, running inside the development container](debug.md).
unknown
github
https://github.com/moby/moby
docs/contributing/test.md
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the CockroachDB Software License // included in the /LICENSE file. package sql_test import ( gosql "database/sql" "testing" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/errors" ) func TestCommentOnTable(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) runCommentOnTests(t, func(db *gosql.DB) { if _, err := db.Exec(` CREATE DATABASE d; SET DATABASE = d; CREATE TABLE t (i INT ) WITH (schema_locked=false); `); err != nil { t.Fatal(err) } testCases := []struct { exec string query string expect gosql.NullString }{ { `COMMENT ON TABLE t IS 'foo'`, `SELECT obj_description('t'::regclass, 'pg_class')`, gosql.NullString{String: `foo`, Valid: true}, }, { `TRUNCATE t`, `SELECT obj_description('t'::regclass, 'pg_class')`, gosql.NullString{String: `foo`, Valid: true}, }, { `COMMENT ON TABLE t IS NULL`, `SELECT obj_description('t'::regclass, 'pg_class')`, gosql.NullString{Valid: false}, }, } for _, tc := range testCases { if _, err := db.Exec(tc.exec); err != nil { t.Fatal(err) } row := db.QueryRow(tc.query) var comment gosql.NullString if err := row.Scan(&comment); err != nil { t.Fatal(err) } if tc.expect != comment { t.Fatalf("expected comment %v, got %v", tc.expect, comment) } } }) } func TestCommentOnTableWhenDrop(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) runCommentOnTests(t, func(db *gosql.DB) { if _, err := db.Exec(` CREATE DATABASE d; SET DATABASE = d; CREATE TABLE t (i INT ); `); err != nil { t.Fatal(err) } if _, err := db.Exec(`COMMENT ON TABLE t IS 'foo'`); err != nil { t.Fatal(err) } if _, err := db.Exec(`DROP TABLE t`); err != nil { t.Fatal(err) } row := db.QueryRow(`SELECT comment FROM system.comments LIMIT 1`) var comment string err := row.Scan(&comment) if !errors.Is(err, gosql.ErrNoRows) { if err != nil { t.Fatal(err) } t.Fatal("comment remaining in system.comments despite drop") } }) }
go
github
https://github.com/cockroachdb/cockroach
pkg/sql/comment_on_table_test.go
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Author: Gillett Hernandez # @Date: 2016-03-21 22:33:46 # @Last Modified by: Gillett Hernandez # @Last Modified time: 2017-12-11 20:09:28 from euler_funcs import ContinuedFraction, solve_pell, analyze_triplets, sconvergents, lag, is_square from math import sqrt, gcd, log from decimal import * from itertools import count getcontext().prec = 28 class Pair: def __init__(self, x, y): self.x = x self.y = y def __add__(self, other): if isinstance(other, (list, tuple)): if len(other) != 2: raise TypeError("unsupported operand length for +") return Pair(self.x + other[0], self.y + other[1]) return Pair(self.x+other.x, self.y+other.y) def __radd__(self, other): return self + other def __sub__(self, other): if isinstance(other, (list, tuple)): if len(other) != 2: raise TypeError("unsupported operand length for -") return Pair(self.x - other[0], self.y - other[1]) return Pair(self.x-other.x, self.y-other.y) def __rsub__(self, other): if isinstance(other, (list, tuple)): if len(other) != 2: raise TypeError("unsupported operand length for -") return Pair(other[0] - self.x, other[1] - self.y) elif isinstance(other, Pair): return Pair(other.x-self.x, other.y-self.y) else: raise TypeError("oops") def __mul__(self, other): if isinstance(other, Pair): raise TypeError("unsupported operand type(s) for *: 'Pair' and 'Pair'") else: return Pair(other*self.x, other*self.y) def __rmul__(self, other): if isinstance(other, Pair): raise TypeError("unsupported operand type(s) for *: 'Pair' and 'Pair'") else: return Pair(other*self.x, other*self.y) def __eq__(self, other): if isinstance(other, (tuple,list)): return self.x == other[0] and self.y == other[1] and len(other) == 2 elif isinstance(other, Pair): return self.x == other.x and self.y == other.y else: raise NotImplementedError(f"No known conversion of {type(other)} to Pair") def __getitem__(self, key): return (self.x, self.y)[key] def __repr__(self): return f"Pair({self.x}, {self.y})" def compute_pi(): """Compute Pi to the current precision. >>> print(pi()) 3.141592653589793238462643383 """ getcontext().prec += 2 # extra digits for intermediate steps three = Decimal(3) # substitute "three=3.0" for regular floats lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24 while s != lasts: lasts = s n, na = n+na, na+8 d, da = d+da, da+32 t = (t * n) / d s += t getcontext().prec -= 2 return +s # unary plus applies the new precision global PI PI = compute_pi() def error(d): def ed(a,b): try: return (d*b**2-a**2)/(b*(Decimal(d).sqrt())+a) except: print(a,b,d) raise return ed modified_error = lambda d: lambda a,b: (d*b**2-a**2)/((b*(Decimal(d).sqrt())+a)) def approximate_unit(d): cd = ContinuedFraction.from_(d) ed = modified_error(d) max_convergence = 0 unit = 0 last_error = -1 for c1, c2 in lag((conv.numerator, conv.denominator) for conv in cd.cng(100)): e1 = ed(*c1) e2 = ed(*c2) if abs(last_error - e1/e2) > max_convergence: max_convergence = abs(last_error - e1/e2) unit = abs(e1/e2) last_error = e1/e2 return unit def reconstruct(l, muls): nl = [(pair[0]*m, pair[1]*m) for pair,m in zip(l, muls)] s = [0, 0] for e in nl: s[0] += e[0] s[1] += e[1] return s def overlimit(pair, limit=10**13): return abs(pair[0]) > limit or abs(pair[1]) > limit def BQA(d, target, limit=10**13, debug=False): cd = ContinuedFraction.from_(d) ed = error(d) if debug: print(ed) ud = approximate_unit(d) if debug: print(f"unit for {d} = {ud}") sqrtd = Decimal(d).sqrt() # print(cd.convergent_list(40)) # target = target % 1 l = [] for conv in cd.cng(): a,b = conv.numerator, conv.denominator if a > 2*limit or b > 2*limit: break l.append((a,b)) for i,(a,b) in enumerate(l): if debug: print("\n") e = ed(a,b) na, nb = 2*b-a, b if debug: print(f"{i}, {log(b,10)}, {b}*√{d} mod 1 = {e}, {a},{b}") # find start start = l[0][0] offset = start*sqrtd offset = int(offset - offset%1 - 3) answer = Pair(offset, start) e = ed(offset, start) if debug: print("starting answer =",offset-3, start, "e =",e) print(start*sqrtd) s = 1 if debug: print("\n") orig_len = len(l) def getclose(P, l, target=target, mlimit=(1+int(ud))): # if debug: print("\n") if len(l) == 0: if debug: print(f"{' '*(orig_len-len(l))}P = {P}") return [P] # get first of Ls currently passed through first = l[0] second = l[1] closest = float("inf") if len(l) == 2: closest = float("inf") if debug: print(f"{' '*(orig_len-len(l))}start of function, P = {P}, l[0] = {l[0]}") for mul1 in range(-mlimit, 1+mlimit): nP1 = P + mul1*Pair(*first) if nP1 == [0, 0]: continue e1 = ed(*nP1) sdiff1 = abs(e1 - target) if debug: print(f"{' '*(orig_len-len(l))}{mul1:3}, {e1}, {sdiff1}") for mul2 in range(-mlimit, 1+mlimit): nP2 = nP1 + mul2*Pair(*second) if nP2 == [0, 0]: continue e2 = ed(*nP2) sdiff2 = abs(e2 - target) if overlimit(nP2, limit): continue if abs(sdiff2) < closest: if debug: print(f"{' '*(orig_len-len(l))}getting closer, muls={[mul1,mul2]}, L[:2] = {first, second}, nPs={[nP1,nP2]}") closest = sdiff2 closest_nP = nP1 closest_nP2 = nP2 closest_muls = [mul1,mul2] if closest == float("inf"): print("waatttt error!!!!") return [] if debug: print(f"{' '*(orig_len-len(l))}{P} + {closest_mul} * {first} = {closest_nP}") return closest_muls + [closest_nP2] else: for mul1 in range(-mlimit, 1+mlimit): nP1 = P + mul1*Pair(*first) if nP1 == [0, 0]: continue e1 = ed(*nP1) sdiff1 = abs(e1 - target) if debug: print(f"{' '*(orig_len-len(l))}{mul1:3}, {e1}, {sdiff1}") if overlimit(nP1, limit): continue if abs(sdiff1) < closest: if debug: print(f"{' '*(orig_len-len(l))}getting closer, muls={mul1}, L[0] = {first}, nPs={nP1}") closest = sdiff1 closest_nP = nP1 closest_mul = mul1 return [closest_mul] + getclose(closest_nP, l[1:], target, mlimit) rl = getclose(answer, l, target) reconstructed = reconstruct(l, rl[:-1]) r = answer + reconstructed if debug: print(f"r = {r} and tail of rl = {rl[-1]}") print(r, rl) print("error of r:", ed(*r)) return (r, answer, l, rl) def test(): print(BQA(2,PI,10)) print(BQA(5,PI,100)) print(BQA(7,PI,10**6)) print(BQA(2,PI,10**13)) (a, b), aoffset, l, rl = BQA(21, PI, 10**6, debug=True) true_target = Pair(393304, 85826) print(a,b, true_target) def main(): s = Decimal(0) for d in range(1,100): if is_square(d): continue (a, b), aoffset, l, rl = BQA(d, PI, 10**13) ed = error(d) r = [a,b] pr = [abs(e) for e in r] print(f"adding {max(pr)}", d, r, ed(*r), ed(*r)-PI) if any(e>10**13 for e in pr): r = BQA(d, PI, 10**13, debug=True) break s += pr[0] print(s) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
// Licensed to the Apache Software Foundation (ASF) under one or more // contributor license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright ownership. // The ASF licenses this file to You under the Apache License, Version 2.0 // (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. { "type": "data", "name": "EndTxnMarker", "validVersions": "0", "flexibleVersions": "none", "fields": [ { "name": "CoordinatorEpoch", "type": "int32", "versions": "0+", "about": "The coordinator epoch when appending the record" } ] }
json
github
https://github.com/apache/kafka
clients/src/main/resources/common/message/EndTxnMarker.json
#!/usr/bin/env python from ciscoconfparse import CiscoConfParse import re from pprint import pprint import markdown def read_in_file(filename): return CiscoConfParse(filename) def generate_pointee_markdown(parsed_config, pointee_objects): pointee_types = ['access-list', 'ip access-list', 'interface', 'route-map', 'class-map', 'policy-map'] for pointee_type in pointee_types: for pointee in pointee_objects: for line in parsed_config.find_objects(r'.*'): if re.search(r'^' + pointee_type, line.text): # markdown line to bold, hyperlink the name new_line = line.text new_line = re.sub(r'.*', '[' + pointee[1] + '] ' + '**' + new_line + '**', new_line) new_line = re.sub(' ' + pointee[1] + r'( |$)', '*' + pointee[1] + '*' , new_line) print new_line def find_pointees(parsed_config): # Find all pointee lines and returns list lists. # The nested list object is a ciscoconfparse line object. pointee_types = ['access-list', 'ip access-list', 'interface', 'route-map', 'class-map', 'policy-map'] object_list = [] for pointee_type in pointee_types: re_pattern = r'^' + pointee_type conf_lines = (parsed_config.find_objects(re_pattern)) for line in conf_lines: object_list.append(find_pointee_names(line, pointee_type)) return object_list def find_pointee_names(line, pointee_type): # Extracts object name from config line text, # and returns tuple of (line object, object name) if pointee_type == 'access-list': try: pointee_name = re.search(r'^access-list (\S+) .+$', line.text).group(1) except: pointee_name = "FAIL" return (line, pointee_name) elif pointee_type == 'ip access-list': try: pointee_name = re.search(r'^ip access-list .+ (\S+)$', line.text).group(1) except: pointee_name = "FAIL" return (line, pointee_name) elif pointee_type == 'interface': try: pointee_name = re.search(r'^interface (\S+)$', line.text).group(1) except: pointee_name = "FAIL" return (line, pointee_name) elif pointee_type == 'route-map': try: pointee_name = re.search(r'^route-map (\S+).*$', line.text).group(1) except: pointee_name = "FAIL" return (line, pointee_name) elif pointee_type == 'class-map': try: pointee_name = re.search(r'^class-map.+ (\S+)$', line.text).group(1) except: pointee_name = "FAIL" return (line, pointee_name) elif pointee_type == 'policy-map': try: pointee_name = re.search(r'^policy-map (.+ )?(\S+)$', line.text).group(2) except: pointee_name = "FAIL" return (line, pointee_name) else: return (line, "END_FAIL") def find_pointers(parsed_config): # Find all pointers based on pointer types # The nested list object is a ciscoconfparse line object. pointer_types = ['access-class', 'access-group', 'policy-map', 'match access-group'] object_list = [] for pointer_type in pointer_types: re_pattern = r'^' + pointer_type object_list.extend(parsed_config.find_objects(re_pattern)) return object_list def find_references(pointer_objects, pointee_objects): # Finds each pointer that points to a pointee object and returns a list # of tuples that contains # (pointer ciscoconfparse line obj, pointee ciscoconfparse line obj, # pointee name list). pointer_to_pointee_data = [] for pointer in pointer_objects: pointee_name_list = [] for pointee in pointee_objects: if re.search(r' ' + pointee[1] + r'( |$)', pointer.text): pointee_name_list.append(pointee[1]) pointee_name_list = list(set(pointee_name_list)) pointer_to_pointee_data.append((pointer, pointee_name_list)) del pointee_name_list #pprint(pointer_to_pointee_data) return pointer_to_pointee_data def main(): filename = 'startup-config.txt' parsed_config = read_in_file(filename) pointee_objects = find_pointees(parsed_config) #pointer_objects = find_pointers(parsed_config) generate_pointee_markdown(parsed_config, pointee_objects) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv from openerp.tools.translate import _ class pos_sales_user_today_current_user(osv.osv_memory): _name = 'pos.sales.user.today.current_user' _description = 'Sales Details' _columns = { } def print_report(self, cr, uid, ids, context=None): """ To get the date and print the report @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param context: A standard dictionary @return : retrun report """ if context is None: context = {} datas = {'ids': context.get('active_ids', [])} res = self.read(cr, uid, ids, [], context=context) res = res and res[0] or {} datas['form'] = res return { 'type': 'ir.actions.report.xml', 'report_name': 'pos.sales.user.today.current.user', 'datas': datas, } pos_sales_user_today_current_user() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// \file /// Declaration of the FormatStringConverter class which is used to convert /// printf format strings to C++ std::formatter format strings. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_UTILS_FORMATSTRINGCONVERTER_H #define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_UTILS_FORMATSTRINGCONVERTER_H #include "clang/AST/ASTContext.h" #include "clang/AST/FormatString.h" #include "clang/ASTMatchers/ASTMatchers.h" #include <string> namespace clang::tidy::utils { /// Convert a printf-style format string to a std::formatter-style one, and /// prepare any casts that are required to wrap the arguments to retain printf /// compatibility. This class is expecting to work on the already-cooked format /// string (i.e. all the escapes have been converted) so we have to convert them /// back. This means that we might not convert them back using the same form. class FormatStringConverter : public clang::analyze_format_string::FormatStringHandler { public: using ConversionSpecifier = clang::analyze_format_string::ConversionSpecifier; using PrintfSpecifier = analyze_printf::PrintfSpecifier; struct Configuration { bool StrictMode = false; bool AllowTrailingNewlineRemoval = false; }; FormatStringConverter(ASTContext *Context, const CallExpr *Call, unsigned FormatArgOffset, Configuration Config, const LangOptions &LO, SourceManager &SM, Preprocessor &PP); bool canApply() const { return ConversionNotPossibleReason.empty(); } const std::string &conversionNotPossibleReason() const { return ConversionNotPossibleReason; } void applyFixes(DiagnosticBuilder &Diag, SourceManager &SM); bool usePrintNewlineFunction() const { return UsePrintNewlineFunction; } private: ASTContext *Context; const Configuration Config; const bool CastMismatchedIntegerTypes; const Expr *const *Args; const unsigned NumArgs; unsigned ArgsOffset; const LangOptions &LangOpts; std::string ConversionNotPossibleReason; bool FormatStringNeededRewriting = false; bool UsePrintNewlineFunction = false; size_t PrintfFormatStringPos = 0U; StringRef PrintfFormatString; /// Lazily-created c_str() call matcher std::optional<clang::ast_matchers::StatementMatcher> StringCStrCallExprMatcher; const StringLiteral *FormatExpr; std::string StandardFormatString; /// Casts to be used to wrap arguments to retain printf compatibility. struct ArgumentFix { unsigned ArgIndex; std::string Fix; // We currently need this for emplace_back. Roll on C++20. explicit ArgumentFix(unsigned ArgIndex, std::string Fix) : ArgIndex(ArgIndex), Fix(std::move(Fix)) {} }; std::vector<ArgumentFix> ArgFixes; std::vector<clang::ast_matchers::BoundNodes> ArgCStrRemovals; // Argument rotations to cope with the fact that std::print puts the value to // be formatted first and the width and precision afterwards whereas printf // puts the width and preicision first. std::vector<std::tuple<unsigned, unsigned>> ArgRotates; void emitAlignment(const PrintfSpecifier &FS, std::string &FormatSpec); void emitSign(const PrintfSpecifier &FS, std::string &FormatSpec); void emitAlternativeForm(const PrintfSpecifier &FS, std::string &FormatSpec); void emitFieldWidth(const PrintfSpecifier &FS, std::string &FormatSpec); void emitPrecision(const PrintfSpecifier &FS, std::string &FormatSpec); void emitStringArgument(unsigned ArgIndex, const Expr *Arg); bool emitIntegerArgument(ConversionSpecifier::Kind ArgKind, const Expr *Arg, unsigned ArgIndex, std::string &FormatSpec); bool emitType(const PrintfSpecifier &FS, const Expr *Arg, std::string &FormatSpec); bool convertArgument(const PrintfSpecifier &FS, const Expr *Arg, std::string &StandardFormatString); void maybeRotateArguments(const PrintfSpecifier &FS); bool HandlePrintfSpecifier(const PrintfSpecifier &FS, const char *StartSpecifier, unsigned SpecifierLen, const TargetInfo &Target) override; void appendFormatText(StringRef Text); void finalizeFormatText(); static std::optional<StringRef> formatStringContainsUnreplaceableMacro(const CallExpr *CallExpr, const StringLiteral *FormatExpr, SourceManager &SM, Preprocessor &PP); bool conversionNotPossible(std::string Reason) { ConversionNotPossibleReason = std::move(Reason); return false; } }; } // namespace clang::tidy::utils #endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_UTILS_FORMATSTRINGCONVERTER_H
c
github
https://github.com/llvm/llvm-project
clang-tools-extra/clang-tidy/utils/FormatStringConverter.h
import numpy as np from matplotlib import pyplot as plt from scipy import stats from sklearn.tree import DecisionTreeClassifier def plot_surface(model, X, y): n_classes = 3 plot_colors = "ryb" cmap = plt.cm.RdYlBu plot_step = 0.02 plot_step_coarser = 0.5 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) if isinstance(model, DecisionTreeClassifier): Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=cmap) else: estimator_alpha = 1.0 / len(model.estimators_) for tree in model.estimators_: Z = tree.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap) xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser), np.arange(y_min, y_max, plot_step_coarser)) Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape) cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none") for i, c in zip(range(n_classes), plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, cmap=cmap) plt.show() def plot_outlier_detector(clf, X, ground_truth): n_outliers = (ground_truth == 0).sum() outliers_fraction = 1. * n_outliers / len(ground_truth) x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.linspace(x_min, x_max, 500), np.linspace(y_min, y_max, 500)) y_pred = clf.decision_function(X).ravel() threshold = stats.scoreatpercentile(y_pred, 100 * outliers_fraction) y_pred = y_pred > threshold Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7), cmap=plt.cm.Blues_r) a = plt.contour(xx, yy, Z, levels=[threshold], linewidths=2, colors='red') plt.contourf(xx, yy, Z, levels=[threshold, Z.max()], colors='orange') b = plt.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white') c = plt.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black') plt.legend( [a.collections[0], b, c], ['Learned decision function', 'True inliers', 'True outliers']) plt.show()
unknown
codeparrot/codeparrot-clean
import type * as ts from "./_namespaces/ts.js"; import type { ApplicableRefactorInfo, CompilerOptionsValue, CompletionsTriggerCharacter, EndOfLineState, FileExtensionInfo, HighlightSpanKind, InlayHintKind, InteractiveRefactorArguments, OutputFile, RefactorActionInfo, RefactorTriggerReason, RenameInfoFailure, RenameLocation, ScriptElementKind, ScriptKind, SignatureHelpCharacterTypedReason, SignatureHelpInvokedReason, SignatureHelpParameter, SignatureHelpRetriggerCharacter, SignatureHelpRetriggeredReason, SignatureHelpTriggerCharacter, SignatureHelpTriggerReason, SymbolDisplayPart, TextChange, TextInsertion, TodoComment, TodoCommentDescriptor, TypeAcquisition, UserPreferences, } from "./_namespaces/ts.js"; import { ClassificationType, CompletionTriggerKind, OrganizeImportsMode, SemicolonPreference, } from "./_namespaces/ts.js"; // These types/enums used to be defined in duplicate here and exported. They are re-exported to avoid breaking changes. export { ApplicableRefactorInfo, ClassificationType, CompletionsTriggerCharacter, CompletionTriggerKind, InlayHintKind, OrganizeImportsMode, RefactorActionInfo, RefactorTriggerReason, RenameInfoFailure, SemicolonPreference, SignatureHelpCharacterTypedReason, SignatureHelpInvokedReason, SignatureHelpParameter, SignatureHelpRetriggerCharacter, SignatureHelpRetriggeredReason, SignatureHelpTriggerCharacter, SignatureHelpTriggerReason, SymbolDisplayPart, UserPreferences }; type ChangeStringIndexSignature<T, NewStringIndexSignatureType> = { [K in keyof T]: string extends K ? NewStringIndexSignatureType : T[K]; }; type ChangePropertyTypes<T, Substitutions extends { [K in keyof T]?: any; }> = { [K in keyof T]: K extends keyof Substitutions ? Substitutions[K] : T[K]; }; // Declaration module describing the TypeScript Server protocol export const enum CommandTypes { JsxClosingTag = "jsxClosingTag", LinkedEditingRange = "linkedEditingRange", Brace = "brace", /** @internal */ BraceFull = "brace-full", BraceCompletion = "braceCompletion", GetSpanOfEnclosingComment = "getSpanOfEnclosingComment", Change = "change", Close = "close", /** @deprecated Prefer CompletionInfo -- see comment on CompletionsResponse */ Completions = "completions", CompletionInfo = "completionInfo", /** @internal */ CompletionsFull = "completions-full", CompletionDetails = "completionEntryDetails", /** @internal */ CompletionDetailsFull = "completionEntryDetails-full", CompileOnSaveAffectedFileList = "compileOnSaveAffectedFileList", CompileOnSaveEmitFile = "compileOnSaveEmitFile", Configure = "configure", Definition = "definition", /** @internal */ DefinitionFull = "definition-full", DefinitionAndBoundSpan = "definitionAndBoundSpan", /** @internal */ DefinitionAndBoundSpanFull = "definitionAndBoundSpan-full", Implementation = "implementation", /** @internal */ ImplementationFull = "implementation-full", /** @internal */ EmitOutput = "emit-output", Exit = "exit", FileReferences = "fileReferences", /** @internal */ FileReferencesFull = "fileReferences-full", Format = "format", Formatonkey = "formatonkey", /** @internal */ FormatFull = "format-full", /** @internal */ FormatonkeyFull = "formatonkey-full", /** @internal */ FormatRangeFull = "formatRange-full", Geterr = "geterr", GeterrForProject = "geterrForProject", SemanticDiagnosticsSync = "semanticDiagnosticsSync", SyntacticDiagnosticsSync = "syntacticDiagnosticsSync", SuggestionDiagnosticsSync = "suggestionDiagnosticsSync", NavBar = "navbar", /** @internal */ NavBarFull = "navbar-full", Navto = "navto", /** @internal */ NavtoFull = "navto-full", NavTree = "navtree", NavTreeFull = "navtree-full", DocumentHighlights = "documentHighlights", /** @internal */ DocumentHighlightsFull = "documentHighlights-full", Open = "open", Quickinfo = "quickinfo", /** @internal */ QuickinfoFull = "quickinfo-full", References = "references", /** @internal */ ReferencesFull = "references-full", Reload = "reload", Rename = "rename", /** @internal */ RenameInfoFull = "rename-full", /** @internal */ RenameLocationsFull = "renameLocations-full", Saveto = "saveto", SignatureHelp = "signatureHelp", /** @internal */ SignatureHelpFull = "signatureHelp-full", FindSourceDefinition = "findSourceDefinition", Status = "status", TypeDefinition = "typeDefinition", ProjectInfo = "projectInfo", ReloadProjects = "reloadProjects", Unknown = "unknown", OpenExternalProject = "openExternalProject", OpenExternalProjects = "openExternalProjects", CloseExternalProject = "closeExternalProject", /** @internal */ SynchronizeProjectList = "synchronizeProjectList", /** @internal */ ApplyChangedToOpenFiles = "applyChangedToOpenFiles", UpdateOpen = "updateOpen", /** @internal */ EncodedSyntacticClassificationsFull = "encodedSyntacticClassifications-full", /** @internal */ EncodedSemanticClassificationsFull = "encodedSemanticClassifications-full", /** @internal */ Cleanup = "cleanup", GetOutliningSpans = "getOutliningSpans", /** @internal */ GetOutliningSpansFull = "outliningSpans", // Full command name is different for backward compatibility purposes TodoComments = "todoComments", Indentation = "indentation", DocCommentTemplate = "docCommentTemplate", /** @internal */ CompilerOptionsDiagnosticsFull = "compilerOptionsDiagnostics-full", /** @internal */ NameOrDottedNameSpan = "nameOrDottedNameSpan", /** @internal */ BreakpointStatement = "breakpointStatement", CompilerOptionsForInferredProjects = "compilerOptionsForInferredProjects", GetCodeFixes = "getCodeFixes", /** @internal */ GetCodeFixesFull = "getCodeFixes-full", GetCombinedCodeFix = "getCombinedCodeFix", /** @internal */ GetCombinedCodeFixFull = "getCombinedCodeFix-full", ApplyCodeActionCommand = "applyCodeActionCommand", GetSupportedCodeFixes = "getSupportedCodeFixes", GetApplicableRefactors = "getApplicableRefactors", GetEditsForRefactor = "getEditsForRefactor", GetMoveToRefactoringFileSuggestions = "getMoveToRefactoringFileSuggestions", PreparePasteEdits = "preparePasteEdits", GetPasteEdits = "getPasteEdits", /** @internal */ GetEditsForRefactorFull = "getEditsForRefactor-full", OrganizeImports = "organizeImports", /** @internal */ OrganizeImportsFull = "organizeImports-full", GetEditsForFileRename = "getEditsForFileRename", /** @internal */ GetEditsForFileRenameFull = "getEditsForFileRename-full", ConfigurePlugin = "configurePlugin", SelectionRange = "selectionRange", /** @internal */ SelectionRangeFull = "selectionRange-full", ToggleLineComment = "toggleLineComment", /** @internal */ ToggleLineCommentFull = "toggleLineComment-full", ToggleMultilineComment = "toggleMultilineComment", /** @internal */ ToggleMultilineCommentFull = "toggleMultilineComment-full", CommentSelection = "commentSelection", /** @internal */ CommentSelectionFull = "commentSelection-full", UncommentSelection = "uncommentSelection", /** @internal */ UncommentSelectionFull = "uncommentSelection-full", PrepareCallHierarchy = "prepareCallHierarchy", ProvideCallHierarchyIncomingCalls = "provideCallHierarchyIncomingCalls", ProvideCallHierarchyOutgoingCalls = "provideCallHierarchyOutgoingCalls", ProvideInlayHints = "provideInlayHints", WatchChange = "watchChange", MapCode = "mapCode", /** @internal */ CopilotRelated = "copilotRelated", } /** * A TypeScript Server message */ export interface Message { /** * Sequence number of the message */ seq: number; /** * One of "request", "response", or "event" */ type: "request" | "response" | "event"; } /** * Client-initiated request message */ export interface Request extends Message { type: "request"; /** * The command to execute */ command: string; /** * Object containing arguments for the command */ arguments?: any; } /** * Request to reload the project structure for all the opened files */ export interface ReloadProjectsRequest extends Request { command: CommandTypes.ReloadProjects; } /** * Server-initiated event message */ export interface Event extends Message { type: "event"; /** * Name of event */ event: string; /** * Event-specific information */ body?: any; } /** * Response by server to client request message. */ export interface Response extends Message { type: "response"; /** * Sequence number of the request message. */ request_seq: number; /** * Outcome of the request. */ success: boolean; /** * The command requested. */ command: string; /** * If success === false, this should always be provided. * Otherwise, may (or may not) contain a success message. */ message?: string; /** * Contains message body if success === true. */ body?: any; /** * Contains extra information that plugin can include to be passed on */ metadata?: unknown; /** * Exposes information about the performance of this request-response pair. */ performanceData?: PerformanceData; } export interface PerformanceData { /** * Time spent updating the program graph, in milliseconds. */ updateGraphDurationMs?: number; /** * The time spent creating or updating the auto-import program, in milliseconds. */ createAutoImportProviderProgramDurationMs?: number; /** * The time spent computing diagnostics, in milliseconds. */ diagnosticsDuration?: FileDiagnosticPerformanceData[]; } /** * Time spent computing each kind of diagnostics, in milliseconds. */ export type DiagnosticPerformanceData = { [Kind in DiagnosticEventKind]?: number; }; export interface FileDiagnosticPerformanceData extends DiagnosticPerformanceData { /** * The file for which the performance data is reported. */ file: string; } /** * Arguments for FileRequest messages. */ export interface FileRequestArgs { /** * The file for the request (absolute pathname required). */ file: string; /* * Optional name of project that contains file */ projectFileName?: string; } export interface StatusRequest extends Request { command: CommandTypes.Status; } export interface StatusResponseBody { /** * The TypeScript version (`ts.version`). */ version: string; } /** * Response to StatusRequest */ export interface StatusResponse extends Response { body: StatusResponseBody; } /** * Requests a JS Doc comment template for a given position */ export interface DocCommentTemplateRequest extends FileLocationRequest { command: CommandTypes.DocCommentTemplate; } /** * Response to DocCommentTemplateRequest */ export interface DocCommandTemplateResponse extends Response { body?: TextInsertion; } /** * A request to get TODO comments from the file */ export interface TodoCommentRequest extends FileRequest { command: CommandTypes.TodoComments; arguments: TodoCommentRequestArgs; } /** * Arguments for TodoCommentRequest request. */ export interface TodoCommentRequestArgs extends FileRequestArgs { /** * Array of target TodoCommentDescriptors that describes TODO comments to be found */ descriptors: TodoCommentDescriptor[]; } /** * Response for TodoCommentRequest request. */ export interface TodoCommentsResponse extends Response { body?: TodoComment[]; } /** * A request to determine if the caret is inside a comment. */ export interface SpanOfEnclosingCommentRequest extends FileLocationRequest { command: CommandTypes.GetSpanOfEnclosingComment; arguments: SpanOfEnclosingCommentRequestArgs; } export interface SpanOfEnclosingCommentRequestArgs extends FileLocationRequestArgs { /** * Requires that the enclosing span be a multi-line comment, or else the request returns undefined. */ onlyMultiLine: boolean; } /** * Request to obtain outlining spans in file. */ export interface OutliningSpansRequest extends FileRequest { command: CommandTypes.GetOutliningSpans; } export type OutliningSpan = ChangePropertyTypes<ts.OutliningSpan, { textSpan: TextSpan; hintSpan: TextSpan; }>; /** * Response to OutliningSpansRequest request. */ export interface OutliningSpansResponse extends Response { body?: OutliningSpan[]; } /** * Request to obtain outlining spans in file. * * @internal */ export interface OutliningSpansRequestFull extends FileRequest { command: CommandTypes.GetOutliningSpansFull; } /** * Response to OutliningSpansRequest request. * * @internal @knipignore */ export interface OutliningSpansResponseFull extends Response { body?: ts.OutliningSpan[]; } /** * A request to get indentation for a location in file */ export interface IndentationRequest extends FileLocationRequest { command: CommandTypes.Indentation; arguments: IndentationRequestArgs; } /** * Response for IndentationRequest request. */ export interface IndentationResponse extends Response { body?: IndentationResult; } /** * Indentation result representing where indentation should be placed */ export interface IndentationResult { /** * The base position in the document that the indent should be relative to */ position: number; /** * The number of columns the indent should be at relative to the position's column. */ indentation: number; } /** * Arguments for IndentationRequest request. */ export interface IndentationRequestArgs extends FileLocationRequestArgs { /** * An optional set of settings to be used when computing indentation. * If argument is omitted - then it will use settings for file that were previously set via 'configure' request or global settings. */ options?: EditorSettings; } /** * Arguments for ProjectInfoRequest request. */ export interface ProjectInfoRequestArgs extends FileRequestArgs { /** * Indicate if the file name list of the project is needed */ needFileNameList: boolean; /** * if true returns details about default configured project calculation */ needDefaultConfiguredProjectInfo?: boolean; } /** * A request to get the project information of the current file. */ export interface ProjectInfoRequest extends Request { command: CommandTypes.ProjectInfo; arguments: ProjectInfoRequestArgs; } /** * A request to retrieve compiler options diagnostics for a project */ export interface CompilerOptionsDiagnosticsRequest extends Request { arguments: CompilerOptionsDiagnosticsRequestArgs; } /** * Arguments for CompilerOptionsDiagnosticsRequest request. */ export interface CompilerOptionsDiagnosticsRequestArgs { /** * Name of the project to retrieve compiler options diagnostics. */ projectFileName: string; } /** * Details about the default project for the file if tsconfig file is found */ export interface DefaultConfiguredProjectInfo { /** List of config files looked and did not match because file was not part of root file names */ notMatchedByConfig?: readonly string[]; /** List of projects which were loaded but file was not part of the project or is file from referenced project */ notInProject?: readonly string[]; /** Configured project used as default */ defaultProject?: string; } /** * Response message body for "projectInfo" request */ export interface ProjectInfo { /** * For configured project, this is the normalized path of the 'tsconfig.json' file * For inferred project, this is undefined */ configFileName: string; /** * The list of normalized file name in the project, including 'lib.d.ts' */ fileNames?: string[]; /** * Indicates if the project has a active language service instance */ languageServiceDisabled?: boolean; /** * Information about default project */ configuredProjectInfo?: DefaultConfiguredProjectInfo; } /** * Represents diagnostic info that includes location of diagnostic in two forms * - start position and length of the error span * - startLocation and endLocation - a pair of Location objects that store start/end line and offset of the error span. */ export interface DiagnosticWithLinePosition { message: string; start: number; length: number; startLocation: Location; endLocation: Location; category: string; code: number; /** May store more in future. For now, this will simply be `true` to indicate when a diagnostic is an unused-identifier diagnostic. */ reportsUnnecessary?: {}; reportsDeprecated?: {}; relatedInformation?: DiagnosticRelatedInformation[]; } /** * Response message for "projectInfo" request */ export interface ProjectInfoResponse extends Response { body?: ProjectInfo; } /** * Request whose sole parameter is a file name. */ export interface FileRequest extends Request { arguments: FileRequestArgs; } /** * Instances of this interface specify a location in a source file: * (file, line, character offset), where line and character offset are 1-based. */ export interface FileLocationRequestArgs extends FileRequestArgs { /** * The line number for the request (1-based). */ line: number; /** * The character offset (on the line) for the request (1-based). */ offset: number; /** * Position (can be specified instead of line/offset pair) * * @internal */ position?: number; } export type FileLocationOrRangeRequestArgs = FileLocationRequestArgs | FileRangeRequestArgs; /** * Request refactorings at a given position or selection area. */ export interface GetApplicableRefactorsRequest extends Request { command: CommandTypes.GetApplicableRefactors; arguments: GetApplicableRefactorsRequestArgs; } export type GetApplicableRefactorsRequestArgs = FileLocationOrRangeRequestArgs & { triggerReason?: RefactorTriggerReason; kind?: string; /** * Include refactor actions that require additional arguments to be passed when * calling 'GetEditsForRefactor'. When true, clients should inspect the * `isInteractive` property of each returned `RefactorActionInfo` * and ensure they are able to collect the appropriate arguments for any * interactive refactor before offering it. */ includeInteractiveActions?: boolean; }; /** * Response is a list of available refactorings. * Each refactoring exposes one or more "Actions"; a user selects one action to invoke a refactoring */ export interface GetApplicableRefactorsResponse extends Response { body?: ApplicableRefactorInfo[]; } /** * Request refactorings at a given position or selection area to move to an existing file. */ export interface GetMoveToRefactoringFileSuggestionsRequest extends Request { command: CommandTypes.GetMoveToRefactoringFileSuggestions; arguments: GetMoveToRefactoringFileSuggestionsRequestArgs; } export type GetMoveToRefactoringFileSuggestionsRequestArgs = FileLocationOrRangeRequestArgs & { kind?: string; }; /** * Response is a list of available files. * Each refactoring exposes one or more "Actions"; a user selects one action to invoke a refactoring */ export interface GetMoveToRefactoringFileSuggestions extends Response { body: { newFileName: string; files: string[]; }; } /** * Request to check if `pasteEdits` should be provided for a given location post copying text from that location. */ export interface PreparePasteEditsRequest extends FileRequest { command: CommandTypes.PreparePasteEdits; arguments: PreparePasteEditsRequestArgs; } export interface PreparePasteEditsRequestArgs extends FileRequestArgs { copiedTextSpan: TextSpan[]; } export interface PreparePasteEditsResponse extends Response { body: boolean; } /** * Request refactorings at a given position post pasting text from some other location. */ export interface GetPasteEditsRequest extends Request { command: CommandTypes.GetPasteEdits; arguments: GetPasteEditsRequestArgs; } export interface GetPasteEditsRequestArgs extends FileRequestArgs { /** The text that gets pasted in a file. */ pastedText: string[]; /** Locations of where the `pastedText` gets added in a file. If the length of the `pastedText` and `pastedLocations` are not the same, * then the `pastedText` is combined into one and added at all the `pastedLocations`. */ pasteLocations: TextSpan[]; /** The source location of each `pastedText`. If present, the length of `spans` must be equal to the length of `pastedText`. */ copiedFrom?: { file: string; spans: TextSpan[]; }; } export interface GetPasteEditsResponse extends Response { body: PasteEditsAction; } export interface PasteEditsAction { edits: FileCodeEdits[]; fixId?: {}; } export interface GetEditsForRefactorRequest extends Request { command: CommandTypes.GetEditsForRefactor; arguments: GetEditsForRefactorRequestArgs; } /** * Request the edits that a particular refactoring action produces. * Callers must specify the name of the refactor and the name of the action. */ export type GetEditsForRefactorRequestArgs = FileLocationOrRangeRequestArgs & { /* The 'name' property from the refactoring that offered this action */ refactor: string; /* The 'name' property from the refactoring action */ action: string; /* Arguments for interactive action */ interactiveRefactorArguments?: InteractiveRefactorArguments; }; export interface GetEditsForRefactorResponse extends Response { body?: RefactorEditInfo; } export interface RefactorEditInfo { edits: FileCodeEdits[]; /** * An optional location where the editor should start a rename operation once * the refactoring edits have been applied */ renameLocation?: Location; renameFilename?: string; notApplicableReason?: string; } /** * Organize imports by: * 1) Removing unused imports * 2) Coalescing imports from the same module * 3) Sorting imports */ export interface OrganizeImportsRequest extends Request { command: CommandTypes.OrganizeImports; arguments: OrganizeImportsRequestArgs; } export type OrganizeImportsScope = GetCombinedCodeFixScope; export interface OrganizeImportsRequestArgs { scope: OrganizeImportsScope; /** @deprecated Use `mode` instead */ skipDestructiveCodeActions?: boolean; mode?: OrganizeImportsMode; } export interface OrganizeImportsResponse extends Response { body: readonly FileCodeEdits[]; } export interface GetEditsForFileRenameRequest extends Request { command: CommandTypes.GetEditsForFileRename; arguments: GetEditsForFileRenameRequestArgs; } /** Note: Paths may also be directories. */ export interface GetEditsForFileRenameRequestArgs { readonly oldFilePath: string; readonly newFilePath: string; } export interface GetEditsForFileRenameResponse extends Response { body: readonly FileCodeEdits[]; } /** * Request for the available codefixes at a specific position. */ export interface CodeFixRequest extends Request { command: CommandTypes.GetCodeFixes; arguments: CodeFixRequestArgs; } export interface GetCombinedCodeFixRequest extends Request { command: CommandTypes.GetCombinedCodeFix; arguments: GetCombinedCodeFixRequestArgs; } export interface GetCombinedCodeFixResponse extends Response { body: CombinedCodeActions; } export interface ApplyCodeActionCommandRequest extends Request { command: CommandTypes.ApplyCodeActionCommand; arguments: ApplyCodeActionCommandRequestArgs; } // All we need is the `success` and `message` fields of Response. export interface ApplyCodeActionCommandResponse extends Response {} export interface FileRangeRequestArgs extends FileRequestArgs, FileRange { /** * Position (can be specified instead of line/offset pair) * * @internal */ startPosition?: number; /** * Position (can be specified instead of line/offset pair) * * @internal */ endPosition?: number; } /** * Instances of this interface specify errorcodes on a specific location in a sourcefile. */ export interface CodeFixRequestArgs extends FileRangeRequestArgs { /** * Errorcodes we want to get the fixes for. */ errorCodes: readonly number[]; } export interface GetCombinedCodeFixRequestArgs { scope: GetCombinedCodeFixScope; fixId: {}; } export interface GetCombinedCodeFixScope { type: "file"; args: FileRequestArgs; } export interface ApplyCodeActionCommandRequestArgs { /** May also be an array of commands. */ command: {}; } /** * Response for GetCodeFixes request. */ export interface GetCodeFixesResponse extends Response { body?: CodeAction[]; } /** * A request whose arguments specify a file location (file, line, col). */ export interface FileLocationRequest extends FileRequest { arguments: FileLocationRequestArgs; } /** * A request to get codes of supported code fixes. */ export interface GetSupportedCodeFixesRequest extends Request { command: CommandTypes.GetSupportedCodeFixes; arguments?: Partial<FileRequestArgs>; } /** * A response for GetSupportedCodeFixesRequest request. */ export interface GetSupportedCodeFixesResponse extends Response { /** * List of error codes supported by the server. */ body?: string[]; } /** * A request to get encoded Syntactic classifications for a span in the file * * @internal */ export interface EncodedSyntacticClassificationsRequest extends FileRequest { arguments: EncodedSyntacticClassificationsRequestArgs; } /** * Arguments for EncodedSyntacticClassificationsRequest request. * * @internal */ export interface EncodedSyntacticClassificationsRequestArgs extends FileRequestArgs { /** * Start position of the span. */ start: number; /** * Length of the span. */ length: number; } /** * A request to get encoded semantic classifications for a span in the file */ export interface EncodedSemanticClassificationsRequest extends FileRequest { arguments: EncodedSemanticClassificationsRequestArgs; } /** * Arguments for EncodedSemanticClassificationsRequest request. */ export interface EncodedSemanticClassificationsRequestArgs extends FileRequestArgs { /** * Start position of the span. */ start: number; /** * Length of the span. */ length: number; /** * Optional parameter for the semantic highlighting response, if absent it * defaults to "original". */ format?: "original" | "2020"; } /** The response for a EncodedSemanticClassificationsRequest */ export interface EncodedSemanticClassificationsResponse extends Response { body?: EncodedSemanticClassificationsResponseBody; } /** * Implementation response message. Gives series of text spans depending on the format ar. */ export interface EncodedSemanticClassificationsResponseBody { endOfLineState: EndOfLineState; spans: number[]; } /** * Arguments in document highlight request; include: filesToSearch, file, * line, offset. */ export interface DocumentHighlightsRequestArgs extends FileLocationRequestArgs { /** * List of files to search for document highlights. */ filesToSearch: string[]; } /** * Go to definition request; value of command field is * "definition". Return response giving the file locations that * define the symbol found in file at location line, col. */ export interface DefinitionRequest extends FileLocationRequest { command: CommandTypes.Definition; } export interface DefinitionAndBoundSpanRequest extends FileLocationRequest { readonly command: CommandTypes.DefinitionAndBoundSpan; } export interface FindSourceDefinitionRequest extends FileLocationRequest { readonly command: CommandTypes.FindSourceDefinition; } export interface DefinitionAndBoundSpanResponse extends Response { readonly body: DefinitionInfoAndBoundSpan; } /** @internal */ export interface EmitOutputRequest extends FileRequest { command: CommandTypes.EmitOutput; arguments: EmitOutputRequestArgs; } /** @internal */ export interface EmitOutputRequestArgs extends FileRequestArgs { includeLinePosition?: boolean; /** if true - return response as object with emitSkipped and diagnostics */ richResponse?: boolean; } /** @internal */ export interface EmitOutputResponse extends Response { readonly body: EmitOutput | ts.EmitOutput; } /** @internal */ export interface EmitOutput { outputFiles: OutputFile[]; emitSkipped: boolean; diagnostics: Diagnostic[] | DiagnosticWithLinePosition[]; } /** * Go to type request; value of command field is * "typeDefinition". Return response giving the file locations that * define the type for the symbol found in file at location line, col. */ export interface TypeDefinitionRequest extends FileLocationRequest { command: CommandTypes.TypeDefinition; } /** * Go to implementation request; value of command field is * "implementation". Return response giving the file locations that * implement the symbol found in file at location line, col. */ export interface ImplementationRequest extends FileLocationRequest { command: CommandTypes.Implementation; } /** * Location in source code expressed as (one-based) line and (one-based) column offset. */ export interface Location { line: number; offset: number; } /** * Object found in response messages defining a span of text in source code. */ export interface TextSpan { /** * First character of the definition. */ start: Location; /** * One character past last character of the definition. */ end: Location; } /** * Object found in response messages defining a span of text in a specific source file. */ export interface FileSpan extends TextSpan { /** * File containing text span. */ file: string; } export interface JSDocTagInfo { /** Name of the JSDoc tag */ name: string; /** * Comment text after the JSDoc tag -- the text after the tag name until the next tag or end of comment * Display parts when UserPreferences.displayPartsForJSDoc is true, flattened to string otherwise. */ text?: string | SymbolDisplayPart[]; } export interface TextSpanWithContext extends TextSpan { contextStart?: Location; contextEnd?: Location; } export interface FileSpanWithContext extends FileSpan, TextSpanWithContext { } export interface DefinitionInfo extends FileSpanWithContext { /** * When true, the file may or may not exist. */ unverified?: boolean; } export interface DefinitionInfoAndBoundSpan { definitions: readonly DefinitionInfo[]; textSpan: TextSpan; } /** * Definition response message. Gives text range for definition. */ export interface DefinitionResponse extends Response { body?: DefinitionInfo[]; } export interface DefinitionInfoAndBoundSpanResponse extends Response { body?: DefinitionInfoAndBoundSpan; } /** @deprecated Use `DefinitionInfoAndBoundSpanResponse` instead. */ export type DefinitionInfoAndBoundSpanReponse = DefinitionInfoAndBoundSpanResponse; /** * Definition response message. Gives text range for definition. */ export interface TypeDefinitionResponse extends Response { body?: FileSpanWithContext[]; } /** * Implementation response message. Gives text range for implementations. */ export interface ImplementationResponse extends Response { body?: FileSpanWithContext[]; } /** * Request to get brace completion for a location in the file. */ export interface BraceCompletionRequest extends FileLocationRequest { command: CommandTypes.BraceCompletion; arguments: BraceCompletionRequestArgs; } /** * Argument for BraceCompletionRequest request. */ export interface BraceCompletionRequestArgs extends FileLocationRequestArgs { /** * Kind of opening brace */ openingBrace: string; } export interface JsxClosingTagRequest extends FileLocationRequest { readonly command: CommandTypes.JsxClosingTag; readonly arguments: JsxClosingTagRequestArgs; } export interface JsxClosingTagRequestArgs extends FileLocationRequestArgs {} export interface JsxClosingTagResponse extends Response { readonly body: TextInsertion; } export interface LinkedEditingRangeRequest extends FileLocationRequest { readonly command: CommandTypes.LinkedEditingRange; } export interface LinkedEditingRangesBody { ranges: TextSpan[]; wordPattern?: string; } export interface LinkedEditingRangeResponse extends Response { readonly body: LinkedEditingRangesBody; } /** * Get document highlights request; value of command field is * "documentHighlights". Return response giving spans that are relevant * in the file at a given line and column. */ export interface DocumentHighlightsRequest extends FileLocationRequest { command: CommandTypes.DocumentHighlights; arguments: DocumentHighlightsRequestArgs; } /** * Span augmented with extra information that denotes the kind of the highlighting to be used for span. */ export interface HighlightSpan extends TextSpanWithContext { kind: HighlightSpanKind; } /** * Represents a set of highligh spans for a give name */ export interface DocumentHighlightsItem { /** * File containing highlight spans. */ file: string; /** * Spans to highlight in file. */ highlightSpans: HighlightSpan[]; } /** * Response for a DocumentHighlightsRequest request. */ export interface DocumentHighlightsResponse extends Response { body?: DocumentHighlightsItem[]; } /** * Find references request; value of command field is * "references". Return response giving the file locations that * reference the symbol found in file at location line, col. */ export interface ReferencesRequest extends FileLocationRequest { command: CommandTypes.References; } export interface ReferencesResponseItem extends FileSpanWithContext { /** * Text of line containing the reference. Including this * with the response avoids latency of editor loading files * to show text of reference line (the server already has loaded the referencing files). * * If {@link UserPreferences.disableLineTextInReferences} is enabled, the property won't be filled */ lineText?: string; /** * True if reference is a write location, false otherwise. */ isWriteAccess: boolean; /** * Present only if the search was triggered from a declaration. * True indicates that the references refers to the same symbol * (i.e. has the same meaning) as the declaration that began the * search. */ isDefinition?: boolean; } /** * The body of a "references" response message. */ export interface ReferencesResponseBody { /** * The file locations referencing the symbol. */ refs: readonly ReferencesResponseItem[]; /** * The name of the symbol. */ symbolName: string; /** * The start character offset of the symbol (on the line provided by the references request). */ symbolStartOffset: number; /** * The full display name of the symbol. */ symbolDisplayString: string; } /** * Response to "references" request. */ export interface ReferencesResponse extends Response { body?: ReferencesResponseBody; } export interface FileReferencesRequest extends FileRequest { command: CommandTypes.FileReferences; } export interface FileReferencesResponseBody { /** * The file locations referencing the symbol. */ refs: readonly ReferencesResponseItem[]; /** * The name of the symbol. */ symbolName: string; } export interface FileReferencesResponse extends Response { body?: FileReferencesResponseBody; } /** * Argument for RenameRequest request. */ export interface RenameRequestArgs extends FileLocationRequestArgs { /** * Should text at specified location be found/changed in comments? */ findInComments?: boolean; /** * Should text at specified location be found/changed in strings? */ findInStrings?: boolean; } /** * Rename request; value of command field is "rename". Return * response giving the file locations that reference the symbol * found in file at location line, col. Also return full display * name of the symbol so that client can print it unambiguously. */ export interface RenameRequest extends FileLocationRequest { command: CommandTypes.Rename; arguments: RenameRequestArgs; } /** @internal */ export interface RenameFullRequest extends FileLocationRequest { readonly command: CommandTypes.RenameLocationsFull; readonly arguments: RenameRequestArgs; } /** @internal @knipignore */ export interface RenameFullResponse extends Response { readonly body: readonly RenameLocation[]; } /** * Information about the item to be renamed. */ export type RenameInfo = RenameInfoSuccess | RenameInfoFailure; export type RenameInfoSuccess = ChangePropertyTypes<ts.RenameInfoSuccess, { triggerSpan: TextSpan; }>; /** * A group of text spans, all in 'file'. */ export interface SpanGroup { /** The file to which the spans apply */ file: string; /** The text spans in this group */ locs: RenameTextSpan[]; } export interface RenameTextSpan extends TextSpanWithContext { readonly prefixText?: string; readonly suffixText?: string; } export interface RenameResponseBody { /** * Information about the item to be renamed. */ info: RenameInfo; /** * An array of span groups (one per file) that refer to the item to be renamed. */ locs: readonly SpanGroup[]; } /** * Rename response message. */ export interface RenameResponse extends Response { body?: RenameResponseBody; } /** * Represents a file in external project. * External project is project whose set of files, compilation options and open\close state * is maintained by the client (i.e. if all this data come from .csproj file in Visual Studio). * External project will exist even if all files in it are closed and should be closed explicitly. * If external project includes one or more tsconfig.json/jsconfig.json files then tsserver will * create configured project for every config file but will maintain a link that these projects were created * as a result of opening external project so they should be removed once external project is closed. */ export interface ExternalFile { /** * Name of file file */ fileName: string; /** * Script kind of the file */ scriptKind?: ScriptKindName | ScriptKind; /** * Whether file has mixed content (i.e. .cshtml file that combines html markup with C#/JavaScript) */ hasMixedContent?: boolean; /** * Content of the file */ content?: string; } /** * Represent an external project */ export interface ExternalProject { /** * Project name */ projectFileName: string; /** * List of root files in project */ rootFiles: ExternalFile[]; /** * Compiler options for the project */ options: ExternalProjectCompilerOptions; /** * Explicitly specified type acquisition for the project */ typeAcquisition?: TypeAcquisition; } export interface CompileOnSaveMixin { /** * If compile on save is enabled for the project */ compileOnSave?: boolean; } /** * For external projects, some of the project settings are sent together with * compiler settings. */ export type ExternalProjectCompilerOptions = CompilerOptions & CompileOnSaveMixin & WatchOptions; /** * Contains information about current project version * * @internal */ export interface ProjectVersionInfo { /** * Project name */ projectName: string; /** * true if project is inferred or false if project is external or configured */ isInferred: boolean; /** * Project version */ version: number; /** * Current set of compiler options for project */ options: ts.CompilerOptions; /** * true if project language service is disabled */ languageServiceDisabled: boolean; /** * Filename of the last file analyzed before disabling the language service. undefined, if the language service is enabled. */ lastFileExceededProgramSize?: string; } export interface FileWithProjectReferenceRedirectInfo { /** * Name of file */ fileName: string; /** * True if the file is primarily included in a referenced project */ isSourceOfProjectReferenceRedirect: boolean; } /** * Represents a set of changes that happen in project */ export interface ProjectChanges { /** * List of added files */ added: string[] | FileWithProjectReferenceRedirectInfo[]; /** * List of removed files */ removed: string[] | FileWithProjectReferenceRedirectInfo[]; /** * List of updated files */ updated: string[] | FileWithProjectReferenceRedirectInfo[]; /** * List of files that have had their project reference redirect status updated * Only provided when the synchronizeProjectList request has includeProjectReferenceRedirectInfo set to true */ updatedRedirects?: FileWithProjectReferenceRedirectInfo[]; } /** * Describes set of files in the project. * info might be omitted in case of inferred projects * if files is set - then this is the entire set of files in the project * if changes is set - then this is the set of changes that should be applied to existing project * otherwise - assume that nothing is changed * * @internal */ export interface ProjectFiles { /** * Information about project verison */ info?: ProjectVersionInfo; /** * List of files in project (might be omitted if current state of project can be computed using only information from 'changes') * This property will have type FileWithProjectReferenceRedirectInfo[] if includeProjectReferenceRedirectInfo is set to true in * the corresponding SynchronizeProjectList request; otherwise, it will have type string[]. */ files?: string[] | FileWithProjectReferenceRedirectInfo[]; /** * Set of changes in project (omitted if the entire set of files in project should be replaced) */ changes?: ProjectChanges; } /** * Combines project information with project level errors. * * @internal */ export interface ProjectFilesWithDiagnostics extends ProjectFiles { /** * List of errors in project */ projectErrors: DiagnosticWithLinePosition[]; } /** * Represents set of changes in open file * * @internal */ export interface ChangedOpenFile { /** * Name of file */ fileName: string; /** * List of changes that should be applied to known open file */ changes: TextChange[]; } /** * Information found in a configure request. */ export interface ConfigureRequestArguments { /** * Information about the host, for example 'Emacs 24.4' or * 'Sublime Text version 3075' */ hostInfo?: string; /** * If present, tab settings apply only to this file. */ file?: string; /** * The format options to use during formatting and other code editing features. */ formatOptions?: FormatCodeSettings; preferences?: UserPreferences; /** * The host's additional supported .js file extensions */ extraFileExtensions?: FileExtensionInfo[]; watchOptions?: WatchOptions; } export const enum WatchFileKind { FixedPollingInterval = "FixedPollingInterval", PriorityPollingInterval = "PriorityPollingInterval", DynamicPriorityPolling = "DynamicPriorityPolling", FixedChunkSizePolling = "FixedChunkSizePolling", UseFsEvents = "UseFsEvents", UseFsEventsOnParentDirectory = "UseFsEventsOnParentDirectory", } export const enum WatchDirectoryKind { UseFsEvents = "UseFsEvents", FixedPollingInterval = "FixedPollingInterval", DynamicPriorityPolling = "DynamicPriorityPolling", FixedChunkSizePolling = "FixedChunkSizePolling", } export const enum PollingWatchKind { FixedInterval = "FixedInterval", PriorityInterval = "PriorityInterval", DynamicPriority = "DynamicPriority", FixedChunkSize = "FixedChunkSize", } export interface WatchOptions { watchFile?: WatchFileKind | ts.WatchFileKind; watchDirectory?: WatchDirectoryKind | ts.WatchDirectoryKind; fallbackPolling?: PollingWatchKind | ts.PollingWatchKind; synchronousWatchDirectory?: boolean; excludeDirectories?: string[]; excludeFiles?: string[]; [option: string]: CompilerOptionsValue | undefined; } /** * Configure request; value of command field is "configure". Specifies * host information, such as host type, tab size, and indent size. */ export interface ConfigureRequest extends Request { command: CommandTypes.Configure; arguments: ConfigureRequestArguments; } /** * Response to "configure" request. This is just an acknowledgement, so * no body field is required. */ export interface ConfigureResponse extends Response { } export interface ConfigurePluginRequestArguments { pluginName: string; configuration: any; } export interface ConfigurePluginRequest extends Request { command: CommandTypes.ConfigurePlugin; arguments: ConfigurePluginRequestArguments; } export interface ConfigurePluginResponse extends Response { } export interface SelectionRangeRequest extends FileRequest { command: CommandTypes.SelectionRange; arguments: SelectionRangeRequestArgs; } export interface SelectionRangeRequestArgs extends FileRequestArgs { locations: Location[]; } export interface SelectionRangeResponse extends Response { body?: SelectionRange[]; } export interface SelectionRange { textSpan: TextSpan; parent?: SelectionRange; } export interface ToggleLineCommentRequest extends FileRequest { command: CommandTypes.ToggleLineComment; arguments: FileRangeRequestArgs; } export interface ToggleMultilineCommentRequest extends FileRequest { command: CommandTypes.ToggleMultilineComment; arguments: FileRangeRequestArgs; } export interface CommentSelectionRequest extends FileRequest { command: CommandTypes.CommentSelection; arguments: FileRangeRequestArgs; } export interface UncommentSelectionRequest extends FileRequest { command: CommandTypes.UncommentSelection; arguments: FileRangeRequestArgs; } /** * Information found in an "open" request. */ export interface OpenRequestArgs extends FileRequestArgs { /** * Used when a version of the file content is known to be more up to date than the one on disk. * Then the known content will be used upon opening instead of the disk copy */ fileContent?: string; /** * Used to specify the script kind of the file explicitly. It could be one of the following: * "TS", "JS", "TSX", "JSX" */ scriptKindName?: ScriptKindName; /** * Used to limit the searching for project config file. If given the searching will stop at this * root path; otherwise it will go all the way up to the dist root path. */ projectRootPath?: string; } export type ScriptKindName = "TS" | "JS" | "TSX" | "JSX"; /** * Open request; value of command field is "open". Notify the * server that the client has file open. The server will not * monitor the filesystem for changes in this file and will assume * that the client is updating the server (using the change and/or * reload messages) when the file changes. Server does not currently * send a response to an open request. */ export interface OpenRequest extends Request { command: CommandTypes.Open; arguments: OpenRequestArgs; } /** * Request to open or update external project */ export interface OpenExternalProjectRequest extends Request { command: CommandTypes.OpenExternalProject; arguments: OpenExternalProjectArgs; } /** * Arguments to OpenExternalProjectRequest request */ export type OpenExternalProjectArgs = ExternalProject; /** * Request to open multiple external projects */ export interface OpenExternalProjectsRequest extends Request { command: CommandTypes.OpenExternalProjects; arguments: OpenExternalProjectsArgs; } /** * Arguments to OpenExternalProjectsRequest */ export interface OpenExternalProjectsArgs { /** * List of external projects to open or update */ projects: ExternalProject[]; } /** * Response to OpenExternalProjectRequest request. This is just an acknowledgement, so * no body field is required. */ export interface OpenExternalProjectResponse extends Response { } /** * Response to OpenExternalProjectsRequest request. This is just an acknowledgement, so * no body field is required. */ export interface OpenExternalProjectsResponse extends Response { } /** * Request to close external project. */ export interface CloseExternalProjectRequest extends Request { command: CommandTypes.CloseExternalProject; arguments: CloseExternalProjectRequestArgs; } /** * Arguments to CloseExternalProjectRequest request */ export interface CloseExternalProjectRequestArgs { /** * Name of the project to close */ projectFileName: string; } /** * Response to CloseExternalProjectRequest request. This is just an acknowledgement, so * no body field is required. */ export interface CloseExternalProjectResponse extends Response { } /** * Request to check if given list of projects is up-to-date and synchronize them if necessary * * @internal */ export interface SynchronizeProjectListRequest extends Request { arguments: SynchronizeProjectListRequestArgs; } /** * Arguments to SynchronizeProjectListRequest * * @internal */ export interface SynchronizeProjectListRequestArgs { /** * List of last known projects */ knownProjects: ProjectVersionInfo[]; /** * If true, response specifies whether or not each file in each project * is a source from a project reference redirect */ includeProjectReferenceRedirectInfo?: boolean; } /** * Request to synchronize list of open files with the client * * @internal */ export interface ApplyChangedToOpenFilesRequest extends Request { arguments: ApplyChangedToOpenFilesRequestArgs; } /** * Arguments to ApplyChangedToOpenFilesRequest * * @internal */ export interface ApplyChangedToOpenFilesRequestArgs { /** * List of newly open files */ openFiles?: ExternalFile[]; /** * List of open files files that were changes */ changedFiles?: ChangedOpenFile[]; /** * List of files that were closed */ closedFiles?: string[]; } /** * Request to synchronize list of open files with the client */ export interface UpdateOpenRequest extends Request { command: CommandTypes.UpdateOpen; arguments: UpdateOpenRequestArgs; } /** * Arguments to UpdateOpenRequest */ export interface UpdateOpenRequestArgs { /** * List of newly open files */ openFiles?: OpenRequestArgs[]; /** * List of open files files that were changes */ changedFiles?: FileCodeEdits[]; /** * List of files that were closed */ closedFiles?: string[]; } /** * External projects have a typeAcquisition option so they need to be added separately to compiler options for inferred projects. */ export type InferredProjectCompilerOptions = ExternalProjectCompilerOptions & TypeAcquisition; /** * Request to set compiler options for inferred projects. * External projects are opened / closed explicitly. * Configured projects are opened when user opens loose file that has 'tsconfig.json' or 'jsconfig.json' anywhere in one of containing folders. * This configuration file will be used to obtain a list of files and configuration settings for the project. * Inferred projects are created when user opens a loose file that is not the part of external project * or configured project and will contain only open file and transitive closure of referenced files if 'useOneInferredProject' is false, * or all open loose files and its transitive closure of referenced files if 'useOneInferredProject' is true. */ export interface SetCompilerOptionsForInferredProjectsRequest extends Request { command: CommandTypes.CompilerOptionsForInferredProjects; arguments: SetCompilerOptionsForInferredProjectsArgs; } /** * Argument for SetCompilerOptionsForInferredProjectsRequest request. */ export interface SetCompilerOptionsForInferredProjectsArgs { /** * Compiler options to be used with inferred projects. */ options: InferredProjectCompilerOptions; /** * Specifies the project root path used to scope compiler options. * It is an error to provide this property if the server has not been started with * `useInferredProjectPerProjectRoot` enabled. */ projectRootPath?: string; } /** * Response to SetCompilerOptionsForInferredProjectsResponse request. This is just an acknowledgement, so * no body field is required. */ export interface SetCompilerOptionsForInferredProjectsResponse extends Response { } /** * Exit request; value of command field is "exit". Ask the server process * to exit. */ export interface ExitRequest extends Request { command: CommandTypes.Exit; } /** * Close request; value of command field is "close". Notify the * server that the client has closed a previously open file. If * file is still referenced by open files, the server will resume * monitoring the filesystem for changes to file. Server does not * currently send a response to a close request. */ export interface CloseRequest extends FileRequest { command: CommandTypes.Close; } export interface WatchChangeRequest extends Request { command: CommandTypes.WatchChange; arguments: WatchChangeRequestArgs | readonly WatchChangeRequestArgs[]; } export interface WatchChangeRequestArgs { id: number; created?: string[]; deleted?: string[]; updated?: string[]; } /** * Request to obtain the list of files that should be regenerated if target file is recompiled. * NOTE: this us query-only operation and does not generate any output on disk. */ export interface CompileOnSaveAffectedFileListRequest extends FileRequest { command: CommandTypes.CompileOnSaveAffectedFileList; } /** * Contains a list of files that should be regenerated in a project */ export interface CompileOnSaveAffectedFileListSingleProject { /** * Project name */ projectFileName: string; /** * List of files names that should be recompiled */ fileNames: string[]; /** * true if project uses outFile or out compiler option */ projectUsesOutFile: boolean; } /** * Response for CompileOnSaveAffectedFileListRequest request; */ export interface CompileOnSaveAffectedFileListResponse extends Response { body: CompileOnSaveAffectedFileListSingleProject[]; } /** * Request to recompile the file. All generated outputs (.js, .d.ts or .js.map files) is written on disk. */ export interface CompileOnSaveEmitFileRequest extends FileRequest { command: CommandTypes.CompileOnSaveEmitFile; arguments: CompileOnSaveEmitFileRequestArgs; } /** * Arguments for CompileOnSaveEmitFileRequest */ export interface CompileOnSaveEmitFileRequestArgs extends FileRequestArgs { /** * if true - then file should be recompiled even if it does not have any changes. */ forced?: boolean; includeLinePosition?: boolean; /** if true - return response as object with emitSkipped and diagnostics */ richResponse?: boolean; } export interface CompileOnSaveEmitFileResponse extends Response { body: boolean | EmitResult; } export interface EmitResult { emitSkipped: boolean; diagnostics: Diagnostic[] | DiagnosticWithLinePosition[]; } /** * Quickinfo request; value of command field is * "quickinfo". Return response giving a quick type and * documentation string for the symbol found in file at location * line, col. */ export interface QuickInfoRequest extends FileLocationRequest { command: CommandTypes.Quickinfo; arguments: FileLocationRequestArgs; } export interface QuickInfoRequestArgs extends FileLocationRequestArgs { /** * This controls how many levels of definitions will be expanded in the quick info response. * The default value is 0. */ verbosityLevel?: number; } /** * Body of QuickInfoResponse. */ export interface QuickInfoResponseBody { /** * The symbol's kind (such as 'className' or 'parameterName' or plain 'text'). */ kind: ScriptElementKind; /** * Optional modifiers for the kind (such as 'public'). */ kindModifiers: string; /** * Starting file location of symbol. */ start: Location; /** * One past last character of symbol. */ end: Location; /** * Type and kind of symbol. */ displayString: string; /** * Documentation associated with symbol. * Display parts when UserPreferences.displayPartsForJSDoc is true, flattened to string otherwise. */ documentation: string | SymbolDisplayPart[]; /** * JSDoc tags associated with symbol. */ tags: JSDocTagInfo[]; /** * Whether the verbosity level can be increased for this quick info response. */ canIncreaseVerbosityLevel?: boolean; } /** * Quickinfo response message. */ export interface QuickInfoResponse extends Response { body?: QuickInfoResponseBody; } /** * Arguments for format messages. */ export interface FormatRequestArgs extends FileLocationRequestArgs { /** * Last line of range for which to format text in file. */ endLine: number; /** * Character offset on last line of range for which to format text in file. */ endOffset: number; /** * End position of the range for which to format text in file. * * @internal */ endPosition?: number; /** * Format options to be used. */ options?: FormatCodeSettings; } /** * Format request; value of command field is "format". Return * response giving zero or more edit instructions. The edit * instructions will be sorted in file order. Applying the edit * instructions in reverse to file will result in correctly * reformatted text. */ export interface FormatRequest extends FileLocationRequest { command: CommandTypes.Format; arguments: FormatRequestArgs; } /** * Object found in response messages defining an editing * instruction for a span of text in source code. The effect of * this instruction is to replace the text starting at start and * ending one character before end with newText. For an insertion, * the text span is empty. For a deletion, newText is empty. */ export interface CodeEdit { /** * First character of the text span to edit. */ start: Location; /** * One character past last character of the text span to edit. */ end: Location; /** * Replace the span defined above with this string (may be * the empty string). */ newText: string; } export interface FileCodeEdits { fileName: string; textChanges: CodeEdit[]; } export interface CodeFixResponse extends Response { /** The code actions that are available */ body?: CodeFixAction[]; } export interface CodeAction { /** Description of the code action to display in the UI of the editor */ description: string; /** Text changes to apply to each file as part of the code action */ changes: FileCodeEdits[]; /** A command is an opaque object that should be passed to `ApplyCodeActionCommandRequestArgs` without modification. */ commands?: {}[]; } export interface CombinedCodeActions { changes: readonly FileCodeEdits[]; commands?: readonly {}[]; } export interface CodeFixAction extends CodeAction { /** Short name to identify the fix, for use by telemetry. */ fixName: string; /** * If present, one may call 'getCombinedCodeFix' with this fixId. * This may be omitted to indicate that the code fix can't be applied in a group. */ fixId?: {}; /** Should be present if and only if 'fixId' is. */ fixAllDescription?: string; } /** * Format and format on key response message. */ export interface FormatResponse extends Response { body?: CodeEdit[]; } /** * Arguments for format on key messages. */ export interface FormatOnKeyRequestArgs extends FileLocationRequestArgs { /** * Key pressed (';', '\n', or '}'). */ key: string; options?: FormatCodeSettings; } /** * Format on key request; value of command field is * "formatonkey". Given file location and key typed (as string), * return response giving zero or more edit instructions. The * edit instructions will be sorted in file order. Applying the * edit instructions in reverse to file will result in correctly * reformatted text. */ export interface FormatOnKeyRequest extends FileLocationRequest { command: CommandTypes.Formatonkey; arguments: FormatOnKeyRequestArgs; } /** * Arguments for completions messages. */ export interface CompletionsRequestArgs extends FileLocationRequestArgs { /** * Optional prefix to apply to possible completions. */ prefix?: string; /** * Character that was responsible for triggering completion. * Should be `undefined` if a user manually requested completion. */ triggerCharacter?: CompletionsTriggerCharacter; triggerKind?: CompletionTriggerKind; /** * @deprecated Use UserPreferences.includeCompletionsForModuleExports */ includeExternalModuleExports?: boolean; /** * @deprecated Use UserPreferences.includeCompletionsWithInsertText */ includeInsertTextCompletions?: boolean; } /** * Completions request; value of command field is "completions". * Given a file location (file, line, col) and a prefix (which may * be the empty string), return the possible completions that * begin with prefix. */ export interface CompletionsRequest extends FileLocationRequest { command: CommandTypes.Completions | CommandTypes.CompletionInfo; arguments: CompletionsRequestArgs; } /** * Arguments for completion details request. */ export interface CompletionDetailsRequestArgs extends FileLocationRequestArgs { /** * Names of one or more entries for which to obtain details. */ entryNames: (string | CompletionEntryIdentifier)[]; } export interface CompletionEntryIdentifier { name: string; source?: string; data?: unknown; } /** * Completion entry details request; value of command field is * "completionEntryDetails". Given a file location (file, line, * col) and an array of completion entry names return more * detailed information for each completion entry. */ export interface CompletionDetailsRequest extends FileLocationRequest { command: CommandTypes.CompletionDetails; arguments: CompletionDetailsRequestArgs; } /** A part of a symbol description that links from a jsdoc @link tag to a declaration */ export interface JSDocLinkDisplayPart extends SymbolDisplayPart { /** The location of the declaration that the @link tag links to. */ target: FileSpan; } export type CompletionEntry = ChangePropertyTypes<Omit<ts.CompletionEntry, "symbol">, { replacementSpan: TextSpan; data: unknown; }>; /** * Additional completion entry details, available on demand */ export type CompletionEntryDetails = ChangePropertyTypes<ts.CompletionEntryDetails, { tags: JSDocTagInfo[]; codeActions: CodeAction[]; }>; /** @deprecated Prefer CompletionInfoResponse, which supports several top-level fields in addition to the array of entries. */ export interface CompletionsResponse extends Response { body?: CompletionEntry[]; } export interface CompletionInfoResponse extends Response { body?: CompletionInfo; } export type CompletionInfo = ChangePropertyTypes<ts.CompletionInfo, { entries: readonly CompletionEntry[]; optionalReplacementSpan: TextSpan; }>; export interface CompletionDetailsResponse extends Response { body?: CompletionEntryDetails[]; } /** * Represents a single signature to show in signature help. */ export type SignatureHelpItem = ChangePropertyTypes<ts.SignatureHelpItem, { tags: JSDocTagInfo[]; }>; /** * Signature help items found in the response of a signature help request. */ export interface SignatureHelpItems { /** * The signature help items. */ items: SignatureHelpItem[]; /** * The span for which signature help should appear on a signature */ applicableSpan: TextSpan; /** * The item selected in the set of available help items. */ selectedItemIndex: number; /** * The argument selected in the set of parameters. */ argumentIndex: number; /** * The argument count */ argumentCount: number; } /** * Arguments of a signature help request. */ export interface SignatureHelpRequestArgs extends FileLocationRequestArgs { /** * Reason why signature help was invoked. * See each individual possible */ triggerReason?: SignatureHelpTriggerReason; } /** * Signature help request; value of command field is "signatureHelp". * Given a file location (file, line, col), return the signature * help. */ export interface SignatureHelpRequest extends FileLocationRequest { command: CommandTypes.SignatureHelp; arguments: SignatureHelpRequestArgs; } /** * Response object for a SignatureHelpRequest. */ export interface SignatureHelpResponse extends Response { body?: SignatureHelpItems; } export interface InlayHintsRequestArgs extends FileRequestArgs { /** * Start position of the span. */ start: number; /** * Length of the span. */ length: number; } export interface InlayHintsRequest extends Request { command: CommandTypes.ProvideInlayHints; arguments: InlayHintsRequestArgs; } export type InlayHintItem = ChangePropertyTypes<ts.InlayHint, { position: Location; displayParts: InlayHintItemDisplayPart[]; }>; export interface InlayHintItemDisplayPart { text: string; span?: FileSpan; } export interface InlayHintsResponse extends Response { body?: InlayHintItem[]; } export interface MapCodeRequestArgs extends FileRequestArgs { /** * The files and changes to try and apply/map. */ mapping: MapCodeRequestDocumentMapping; } export interface MapCodeRequestDocumentMapping { /** * The specific code to map/insert/replace in the file. */ contents: string[]; /** * Areas of "focus" to inform the code mapper with. For example, cursor * location, current selection, viewport, etc. Nested arrays denote * priority: toplevel arrays are more important than inner arrays, and * inner array priorities are based on items within that array. Items * earlier in the arrays have higher priority. */ focusLocations?: TextSpan[][]; } export interface MapCodeRequest extends FileRequest { command: CommandTypes.MapCode; arguments: MapCodeRequestArgs; } export interface MapCodeResponse extends Response { body: readonly FileCodeEdits[]; } /** * Synchronous request for semantic diagnostics of one file. */ export interface SemanticDiagnosticsSyncRequest extends FileRequest { command: CommandTypes.SemanticDiagnosticsSync; arguments: SemanticDiagnosticsSyncRequestArgs; } export interface SemanticDiagnosticsSyncRequestArgs extends FileRequestArgs { includeLinePosition?: boolean; } /** * Response object for synchronous sematic diagnostics request. */ export interface SemanticDiagnosticsSyncResponse extends Response { body?: Diagnostic[] | DiagnosticWithLinePosition[]; } export interface SuggestionDiagnosticsSyncRequest extends FileRequest { command: CommandTypes.SuggestionDiagnosticsSync; arguments: SuggestionDiagnosticsSyncRequestArgs; } export type SuggestionDiagnosticsSyncRequestArgs = SemanticDiagnosticsSyncRequestArgs; export type SuggestionDiagnosticsSyncResponse = SemanticDiagnosticsSyncResponse; /** * Synchronous request for syntactic diagnostics of one file. */ export interface SyntacticDiagnosticsSyncRequest extends FileRequest { command: CommandTypes.SyntacticDiagnosticsSync; arguments: SyntacticDiagnosticsSyncRequestArgs; } export interface SyntacticDiagnosticsSyncRequestArgs extends FileRequestArgs { includeLinePosition?: boolean; } /** * Response object for synchronous syntactic diagnostics request. */ export interface SyntacticDiagnosticsSyncResponse extends Response { body?: Diagnostic[] | DiagnosticWithLinePosition[]; } /** * Arguments for GeterrForProject request. */ export interface GeterrForProjectRequestArgs { /** * the file requesting project error list */ file: string; /** * Delay in milliseconds to wait before starting to compute * errors for the files in the file list */ delay: number; } /** * GeterrForProjectRequest request; value of command field is * "geterrForProject". It works similarly with 'Geterr', only * it request for every file in this project. */ export interface GeterrForProjectRequest extends Request { command: CommandTypes.GeterrForProject; arguments: GeterrForProjectRequestArgs; } /** * Arguments for geterr messages. */ export interface GeterrRequestArgs { /** * List of file names for which to compute compiler errors. * The files will be checked in list order. */ files: (string | FileRangesRequestArgs)[]; /** * Delay in milliseconds to wait before starting to compute * errors for the files in the file list */ delay: number; } /** * Geterr request; value of command field is "geterr". Wait for * delay milliseconds and then, if during the wait no change or * reload messages have arrived for the first file in the files * list, get the syntactic errors for the file, field requests, * and then get the semantic errors for the file. Repeat with a * smaller delay for each subsequent file on the files list. Best * practice for an editor is to send a file list containing each * file that is currently visible, in most-recently-used order. */ export interface GeterrRequest extends Request { command: CommandTypes.Geterr; arguments: GeterrRequestArgs; } export interface FileRange { /** * The line number for the request (1-based). */ startLine: number; /** * The character offset (on the line) for the request (1-based). */ startOffset: number; /** * The line number for the request (1-based). */ endLine: number; /** * The character offset (on the line) for the request (1-based). */ endOffset: number; } export interface FileRangesRequestArgs extends Pick<FileRequestArgs, "file"> { ranges: FileRange[]; } export type RequestCompletedEventName = "requestCompleted"; /** * Event that is sent when server have finished processing request with specified id. */ export interface RequestCompletedEvent extends Event { event: RequestCompletedEventName; body: RequestCompletedEventBody; } export interface RequestCompletedEventBody { request_seq: number; performanceData?: PerformanceData; } /** * Item of diagnostic information found in a DiagnosticEvent message. */ export interface Diagnostic { /** * Starting file location at which text applies. */ start: Location; /** * The last file location at which the text applies. */ end: Location; /** * Text of diagnostic message. */ text: string; /** * The category of the diagnostic message, e.g. "error", "warning", or "suggestion". */ category: string; reportsUnnecessary?: {}; reportsDeprecated?: {}; /** * Any related spans the diagnostic may have, such as other locations relevant to an error, such as declarartion sites */ relatedInformation?: DiagnosticRelatedInformation[]; /** * The error code of the diagnostic message. */ code?: number; /** * The name of the plugin reporting the message. */ source?: string; } export interface DiagnosticWithFileName extends Diagnostic { /** * Name of the file the diagnostic is in */ fileName: string; } /** * Represents additional spans returned with a diagnostic which are relevant to it */ export interface DiagnosticRelatedInformation { /** * The category of the related information message, e.g. "error", "warning", or "suggestion". */ category: string; /** * The code used ot identify the related information */ code: number; /** * Text of related or additional information. */ message: string; /** * Associated location */ span?: FileSpan; } export interface DiagnosticEventBody { /** * The file for which diagnostic information is reported. */ file: string; /** * An array of diagnostic information items. */ diagnostics: Diagnostic[]; /** * Spans where the region diagnostic was requested, if this is a region semantic diagnostic event. */ spans?: TextSpan[]; } export type DiagnosticEventKind = "semanticDiag" | "syntaxDiag" | "suggestionDiag" | "regionSemanticDiag"; /** * Event message for DiagnosticEventKind event types. * These events provide syntactic and semantic errors for a file. */ export interface DiagnosticEvent extends Event { body?: DiagnosticEventBody; event: DiagnosticEventKind; } export interface ConfigFileDiagnosticEventBody { /** * The file which trigged the searching and error-checking of the config file */ triggerFile: string; /** * The name of the found config file. */ configFile: string; /** * An arry of diagnostic information items for the found config file. */ diagnostics: DiagnosticWithFileName[]; } /** * Event message for "configFileDiag" event type. * This event provides errors for a found config file. */ export interface ConfigFileDiagnosticEvent extends Event { body?: ConfigFileDiagnosticEventBody; event: "configFileDiag"; } export type ProjectLanguageServiceStateEventName = "projectLanguageServiceState"; export interface ProjectLanguageServiceStateEvent extends Event { event: ProjectLanguageServiceStateEventName; body?: ProjectLanguageServiceStateEventBody; } export interface ProjectLanguageServiceStateEventBody { /** * Project name that has changes in the state of language service. * For configured projects this will be the config file path. * For external projects this will be the name of the projects specified when project was open. * For inferred projects this event is not raised. */ projectName: string; /** * True if language service state switched from disabled to enabled * and false otherwise. */ languageServiceEnabled: boolean; } export type ProjectsUpdatedInBackgroundEventName = "projectsUpdatedInBackground"; export interface ProjectsUpdatedInBackgroundEvent extends Event { event: ProjectsUpdatedInBackgroundEventName; body: ProjectsUpdatedInBackgroundEventBody; } export interface ProjectsUpdatedInBackgroundEventBody { /** * Current set of open files */ openFiles: string[]; } export type ProjectLoadingStartEventName = "projectLoadingStart"; export interface ProjectLoadingStartEvent extends Event { event: ProjectLoadingStartEventName; body: ProjectLoadingStartEventBody; } export interface ProjectLoadingStartEventBody { /** name of the project */ projectName: string; /** reason for loading */ reason: string; } export type ProjectLoadingFinishEventName = "projectLoadingFinish"; export interface ProjectLoadingFinishEvent extends Event { event: ProjectLoadingFinishEventName; body: ProjectLoadingFinishEventBody; } export interface ProjectLoadingFinishEventBody { /** name of the project */ projectName: string; } export type SurveyReadyEventName = "surveyReady"; export interface SurveyReadyEvent extends Event { event: SurveyReadyEventName; body: SurveyReadyEventBody; } export interface SurveyReadyEventBody { /** Name of the survey. This is an internal machine- and programmer-friendly name */ surveyId: string; } export type LargeFileReferencedEventName = "largeFileReferenced"; export interface LargeFileReferencedEvent extends Event { event: LargeFileReferencedEventName; body: LargeFileReferencedEventBody; } export interface LargeFileReferencedEventBody { /** * name of the large file being loaded */ file: string; /** * size of the file */ fileSize: number; /** * max file size allowed on the server */ maxFileSize: number; } export type CreateFileWatcherEventName = "createFileWatcher"; export interface CreateFileWatcherEvent extends Event { readonly event: CreateFileWatcherEventName; readonly body: CreateFileWatcherEventBody; } export interface CreateFileWatcherEventBody { readonly id: number; readonly path: string; } export type CreateDirectoryWatcherEventName = "createDirectoryWatcher"; export interface CreateDirectoryWatcherEvent extends Event { readonly event: CreateDirectoryWatcherEventName; readonly body: CreateDirectoryWatcherEventBody; } export interface CreateDirectoryWatcherEventBody { readonly id: number; readonly path: string; readonly recursive: boolean; readonly ignoreUpdate?: boolean; } export type CloseFileWatcherEventName = "closeFileWatcher"; export interface CloseFileWatcherEvent extends Event { readonly event: CloseFileWatcherEventName; readonly body: CloseFileWatcherEventBody; } export interface CloseFileWatcherEventBody { readonly id: number; } /** @internal @knipignore */ export type AnyEvent = | RequestCompletedEvent | DiagnosticEvent | ConfigFileDiagnosticEvent | ProjectLanguageServiceStateEvent | TelemetryEvent | ProjectsUpdatedInBackgroundEvent | ProjectLoadingStartEvent | ProjectLoadingFinishEvent | SurveyReadyEvent | LargeFileReferencedEvent | CreateFileWatcherEvent | CreateDirectoryWatcherEvent | CloseFileWatcherEvent; /** * Arguments for reload request. */ export interface ReloadRequestArgs extends FileRequestArgs { /** * Name of temporary file from which to reload file * contents. May be same as file. */ tmpfile: string; } /** * Reload request message; value of command field is "reload". * Reload contents of file with name given by the 'file' argument * from temporary file with name given by the 'tmpfile' argument. * The two names can be identical. */ export interface ReloadRequest extends FileRequest { command: CommandTypes.Reload; arguments: ReloadRequestArgs; } /** * Response to "reload" request. This is just an acknowledgement, so * no body field is required. */ export interface ReloadResponse extends Response { } /** * Arguments for saveto request. */ export interface SavetoRequestArgs extends FileRequestArgs { /** * Name of temporary file into which to save server's view of * file contents. */ tmpfile: string; } /** * Saveto request message; value of command field is "saveto". * For debugging purposes, save to a temporaryfile (named by * argument 'tmpfile') the contents of file named by argument * 'file'. The server does not currently send a response to a * "saveto" request. */ export interface SavetoRequest extends FileRequest { command: CommandTypes.Saveto; arguments: SavetoRequestArgs; } /** * Arguments for navto request message. */ export interface NavtoRequestArgs { /** * Search term to navigate to from current location; term can * be '.*' or an identifier prefix. */ searchValue: string; /** * Optional limit on the number of items to return. */ maxResultCount?: number; /** * The file for the request (absolute pathname required). */ file?: string; /** * Optional flag to indicate we want results for just the current file * or the entire project. */ currentFileOnly?: boolean; projectFileName?: string; } /** * Navto request message; value of command field is "navto". * Return list of objects giving file locations and symbols that * match the search term given in argument 'searchTerm'. The * context for the search is given by the named file. */ export interface NavtoRequest extends Request { command: CommandTypes.Navto; arguments: NavtoRequestArgs; } /** * An item found in a navto response. */ export interface NavtoItem extends FileSpan { /** * The symbol's name. */ name: string; /** * The symbol's kind (such as 'className' or 'parameterName'). */ kind: ScriptElementKind; /** * exact, substring, or prefix. */ matchKind: string; /** * If this was a case sensitive or insensitive match. */ isCaseSensitive: boolean; /** * Optional modifiers for the kind (such as 'public'). */ kindModifiers?: string; /** * Name of symbol's container symbol (if any); for example, * the class name if symbol is a class member. */ containerName?: string; /** * Kind of symbol's container symbol (if any). */ containerKind?: ScriptElementKind; } /** * Navto response message. Body is an array of navto items. Each * item gives a symbol that matched the search term. */ export interface NavtoResponse extends Response { body?: NavtoItem[]; } /** * Arguments for change request message. */ export interface ChangeRequestArgs extends FormatRequestArgs { /** * Optional string to insert at location (file, line, offset). */ insertString?: string; } /** * Change request message; value of command field is "change". * Update the server's view of the file named by argument 'file'. * Server does not currently send a response to a change request. */ export interface ChangeRequest extends FileLocationRequest { command: CommandTypes.Change; arguments: ChangeRequestArgs; } /** * Response to "brace" request. */ export interface BraceResponse extends Response { body?: TextSpan[]; } /** * Brace matching request; value of command field is "brace". * Return response giving the file locations of matching braces * found in file at location line, offset. */ export interface BraceRequest extends FileLocationRequest { command: CommandTypes.Brace; } /** * NavBar items request; value of command field is "navbar". * Return response giving the list of navigation bar entries * extracted from the requested file. */ export interface NavBarRequest extends FileRequest { command: CommandTypes.NavBar; } /** * NavTree request; value of command field is "navtree". * Return response giving the navigation tree of the requested file. */ export interface NavTreeRequest extends FileRequest { command: CommandTypes.NavTree; } export interface NavigationBarItem { /** * The item's display text. */ text: string; /** * The symbol's kind (such as 'className' or 'parameterName'). */ kind: ScriptElementKind; /** * Optional modifiers for the kind (such as 'public'). */ kindModifiers?: string; /** * The definition locations of the item. */ spans: TextSpan[]; /** * Optional children. */ childItems?: NavigationBarItem[]; /** * Number of levels deep this item should appear. */ indent: number; } /** protocol.NavigationTree is identical to ts.NavigationTree, except using protocol.TextSpan instead of ts.TextSpan */ export interface NavigationTree { text: string; kind: ScriptElementKind; kindModifiers: string; spans: TextSpan[]; nameSpan: TextSpan | undefined; childItems?: NavigationTree[]; } export type TelemetryEventName = "telemetry"; export interface TelemetryEvent extends Event { event: TelemetryEventName; body: TelemetryEventBody; } export interface TelemetryEventBody { telemetryEventName: string; payload: any; } export type TypesInstallerInitializationFailedEventName = "typesInstallerInitializationFailed"; export interface TypesInstallerInitializationFailedEvent extends Event { event: TypesInstallerInitializationFailedEventName; body: TypesInstallerInitializationFailedEventBody; } export interface TypesInstallerInitializationFailedEventBody { message: string; } export type TypingsInstalledTelemetryEventName = "typingsInstalled"; export interface TypingsInstalledTelemetryEventBody extends TelemetryEventBody { telemetryEventName: TypingsInstalledTelemetryEventName; payload: TypingsInstalledTelemetryEventPayload; } // A __GDPR__FRAGMENT__ has no meaning until it is ${include}d by a __GDPR__ comment, at which point // the included properties are effectively inlined into the __GDPR__ declaration. In this case, for // example, any __GDPR__ comment including the TypeScriptCommonProperties will be updated with an // additional version property with the classification below. Obviously, the purpose of such a construct // is to reduce duplication and keep multiple use sites consistent (e.g. by making sure that all reflect // any newly added TypeScriptCommonProperties). Unfortunately, the system has limits - in particular, // these reusable __GDPR__FRAGMENT__s are not accessible across repo boundaries. Therefore, even though // the code for adding the common properties (i.e. version), along with the corresponding __GDPR__FRAGMENT__, // lives in the VS Code repo (see https://github.com/microsoft/vscode/blob/main/extensions/typescript-language-features/src/utils/telemetry.ts) // we have to duplicate it here. It would be nice to keep them in sync, but the only likely failure mode // is adding a property to the VS Code repro but not here and the only consequence would be having that // property suppressed on the events (i.e. __GDPT__ comments) in this repo that reference the out-of-date // local __GDPR__FRAGMENT__. /* __GDPR__FRAGMENT__ "TypeScriptCommonProperties" : { "version" : { "classification": "SystemMetaData", "purpose": "FeatureInsight" } } */ /* __GDPR__ "typingsinstalled" : { "${include}": ["${TypeScriptCommonProperties}"], "installedPackages": { "classification": "PublicNonPersonalData", "purpose": "FeatureInsight" }, "installSuccess": { "classification": "SystemMetaData", "purpose": "FeatureInsight" }, "typingsInstallerVersion": { "classification": "SystemMetaData", "purpose": "FeatureInsight" } } */ export interface TypingsInstalledTelemetryEventPayload { /** * Comma separated list of installed typing packages */ installedPackages: string; /** * true if install request succeeded, otherwise - false */ installSuccess: boolean; /** * version of typings installer */ typingsInstallerVersion: string; } export type BeginInstallTypesEventName = "beginInstallTypes"; export type EndInstallTypesEventName = "endInstallTypes"; export interface BeginInstallTypesEvent extends Event { event: BeginInstallTypesEventName; body: BeginInstallTypesEventBody; } export interface EndInstallTypesEvent extends Event { event: EndInstallTypesEventName; body: EndInstallTypesEventBody; } export interface InstallTypesEventBody { /** * correlation id to match begin and end events */ eventId: number; /** * list of packages to install */ packages: readonly string[]; } export interface BeginInstallTypesEventBody extends InstallTypesEventBody { } export interface EndInstallTypesEventBody extends InstallTypesEventBody { /** * true if installation succeeded, otherwise false */ success: boolean; } export interface NavBarResponse extends Response { body?: NavigationBarItem[]; } export interface NavTreeResponse extends Response { body?: NavigationTree; } export type CallHierarchyItem = ChangePropertyTypes<ts.CallHierarchyItem, { span: TextSpan; selectionSpan: TextSpan; }>; export interface CallHierarchyIncomingCall { from: CallHierarchyItem; fromSpans: TextSpan[]; } export interface CallHierarchyOutgoingCall { to: CallHierarchyItem; fromSpans: TextSpan[]; } export interface PrepareCallHierarchyRequest extends FileLocationRequest { command: CommandTypes.PrepareCallHierarchy; } export interface PrepareCallHierarchyResponse extends Response { readonly body: CallHierarchyItem | CallHierarchyItem[]; } export interface ProvideCallHierarchyIncomingCallsRequest extends FileLocationRequest { command: CommandTypes.ProvideCallHierarchyIncomingCalls; } export interface ProvideCallHierarchyIncomingCallsResponse extends Response { readonly body: CallHierarchyIncomingCall[]; } export interface ProvideCallHierarchyOutgoingCallsRequest extends FileLocationRequest { command: CommandTypes.ProvideCallHierarchyOutgoingCalls; } export interface ProvideCallHierarchyOutgoingCallsResponse extends Response { readonly body: CallHierarchyOutgoingCall[]; } export const enum IndentStyle { None = "None", Block = "Block", Smart = "Smart", } export type EditorSettings = ChangePropertyTypes<ts.EditorSettings, { indentStyle: IndentStyle | ts.IndentStyle; }>; export type FormatCodeSettings = ChangePropertyTypes<ts.FormatCodeSettings, { indentStyle: IndentStyle | ts.IndentStyle; }>; export type CompilerOptions = ChangePropertyTypes<ChangeStringIndexSignature<ts.CompilerOptions, CompilerOptionsValue>, { jsx: JsxEmit | ts.JsxEmit; module: ModuleKind | ts.ModuleKind; moduleResolution: ModuleResolutionKind | ts.ModuleResolutionKind; newLine: NewLineKind | ts.NewLineKind; target: ScriptTarget | ts.ScriptTarget; }>; export const enum JsxEmit { None = "none", Preserve = "preserve", ReactNative = "react-native", React = "react", ReactJSX = "react-jsx", ReactJSXDev = "react-jsxdev", } export const enum ModuleKind { None = "none", CommonJS = "commonjs", AMD = "amd", UMD = "umd", System = "system", ES6 = "es6", ES2015 = "es2015", ES2020 = "es2020", ES2022 = "es2022", ESNext = "esnext", Node16 = "node16", Node18 = "node18", Node20 = "node20", NodeNext = "nodenext", Preserve = "preserve", } export const enum ModuleResolutionKind { Classic = "classic", /** @deprecated Renamed to `Node10` */ Node = "node", /** @deprecated Renamed to `Node10` */ NodeJs = "node", /** @deprecated */ Node10 = "node10", Node16 = "node16", NodeNext = "nodenext", Bundler = "bundler", } export const enum NewLineKind { Crlf = "Crlf", Lf = "Lf", } // NOTE: We must reevaluate the target for upcoming features when each successive TC39 edition is ratified in // June of each year. This includes changes to `LanguageFeatureMinimumTarget`, `ScriptTarget`, // `ScriptTargetFeatures`, `CommandLineOptionOfCustomType`, transformers/esnext.ts, compiler/commandLineParser.ts, // compiler/utilitiesPublic.ts, and the contents of each lib/esnext.*.d.ts file. export const enum ScriptTarget { /** @deprecated */ ES3 = "es3", /** @deprecated */ ES5 = "es5", ES6 = "es6", ES2015 = "es2015", ES2016 = "es2016", ES2017 = "es2017", ES2018 = "es2018", ES2019 = "es2019", ES2020 = "es2020", ES2021 = "es2021", ES2022 = "es2022", ES2023 = "es2023", ES2024 = "es2024", ES2025 = "es2025", ESNext = "esnext", JSON = "json", Latest = ESNext, LatestStandard = ES2025, } { type AssertKeysComplete<Source extends { [K in keyof Target]: any; }, Target> = Source; // eslint-disable-next-line @typescript-eslint/no-unused-vars type CopiedTypesComplete = [ AssertKeysComplete<typeof ModuleResolutionKind, typeof ts.ModuleResolutionKind>, AssertKeysComplete<typeof ModuleKind, typeof ts.ModuleKind>, AssertKeysComplete<typeof ScriptTarget, typeof ts.ScriptTarget>, AssertKeysComplete<typeof JsxEmit, typeof ts.JsxEmit>, AssertKeysComplete<typeof IndentStyle, typeof ts.IndentStyle>, ]; }
typescript
github
https://github.com/microsoft/TypeScript
src/server/protocol.ts
<!doctype html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>React Router - Lazy Loading Example</title> </head> <body> <div id="root"></div> <script type="module" src="/src/main.tsx"></script> </body> </html>
html
github
https://github.com/remix-run/react-router
examples/lazy-loading/index.html
# Owner(s): ["module: cuda"] import unittest import torch import torch.testing._internal.common_utils as common from torch.testing._internal.common_cuda import ( TEST_CUDA, TEST_MULTIGPU, TEST_NUMBA_CUDA, ) from torch.testing._internal.common_utils import TEST_NUMPY if TEST_NUMPY: import numpy if TEST_NUMBA_CUDA: import numba.cuda class TestNumbaIntegration(common.TestCase): @unittest.skipIf(not TEST_NUMPY, "No numpy") @unittest.skipIf(not TEST_CUDA, "No cuda") def test_cuda_array_interface(self): """torch.Tensor exposes __cuda_array_interface__ for cuda tensors. An object t is considered a cuda-tensor if: hasattr(t, '__cuda_array_interface__') A cuda-tensor provides a tensor description dict: shape: (integer, ...) Tensor shape. strides: (integer, ...) Tensor strides, in bytes. typestr: (str) A numpy-style typestr. data: (int, boolean) A (data_ptr, read-only) tuple. version: (int) Version 0 See: https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html """ types = [ torch.DoubleTensor, torch.FloatTensor, torch.HalfTensor, torch.LongTensor, torch.IntTensor, torch.ShortTensor, torch.CharTensor, torch.ByteTensor, ] dtypes = [ numpy.float64, numpy.float32, numpy.float16, numpy.int64, numpy.int32, numpy.int16, numpy.int8, numpy.uint8, ] for tp, npt in zip(types, dtypes): # CPU tensors do not implement the interface. cput = tp(10) self.assertFalse(hasattr(cput, "__cuda_array_interface__")) self.assertRaises(AttributeError, lambda: cput.__cuda_array_interface__) # Sparse CPU/CUDA tensors do not implement the interface if tp != torch.HalfTensor: indices_t = torch.empty(1, cput.size(0), dtype=torch.long).clamp_(min=0) sparse_t = torch.sparse_coo_tensor(indices_t, cput) self.assertFalse(hasattr(sparse_t, "__cuda_array_interface__")) self.assertRaises( AttributeError, lambda: sparse_t.__cuda_array_interface__ ) sparse_cuda_t = torch.sparse_coo_tensor(indices_t, cput).cuda() self.assertFalse(hasattr(sparse_cuda_t, "__cuda_array_interface__")) self.assertRaises( AttributeError, lambda: sparse_cuda_t.__cuda_array_interface__ ) # CUDA tensors have the attribute and v2 interface cudat = tp(10).cuda() self.assertTrue(hasattr(cudat, "__cuda_array_interface__")) ar_dict = cudat.__cuda_array_interface__ self.assertEqual( set(ar_dict.keys()), {"shape", "strides", "typestr", "data", "version"} ) self.assertEqual(ar_dict["shape"], (10,)) self.assertIs(ar_dict["strides"], None) # typestr from numpy, cuda-native little-endian self.assertEqual(ar_dict["typestr"], numpy.dtype(npt).newbyteorder("<").str) self.assertEqual(ar_dict["data"], (cudat.data_ptr(), False)) self.assertEqual(ar_dict["version"], 2) @unittest.skipIf(not TEST_CUDA, "No cuda") @unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda") def test_array_adaptor(self): """Torch __cuda_array_adaptor__ exposes tensor data to numba.cuda.""" torch_dtypes = [ torch.complex64, torch.complex128, torch.float16, torch.float32, torch.float64, torch.uint8, torch.int8, torch.uint16, torch.int16, torch.uint32, torch.int32, torch.uint64, torch.int64, torch.bool, ] for dt in torch_dtypes: # CPU tensors of all types do not register as cuda arrays, # attempts to convert raise a type error. cput = torch.arange(10).to(dt) npt = cput.numpy() self.assertTrue(not numba.cuda.is_cuda_array(cput)) with self.assertRaises(TypeError): numba.cuda.as_cuda_array(cput) # Any cuda tensor is a cuda array. cudat = cput.to(device="cuda") self.assertTrue(numba.cuda.is_cuda_array(cudat)) numba_view = numba.cuda.as_cuda_array(cudat) self.assertIsInstance(numba_view, numba.cuda.devicearray.DeviceNDArray) # The reported type of the cuda array matches the numpy type of the cpu tensor. self.assertEqual(numba_view.dtype, npt.dtype) self.assertEqual(numba_view.strides, npt.strides) self.assertEqual(numba_view.shape, cudat.shape) # Pass back to cuda from host for all equality checks below, needed for # float16 comparisons, which aren't supported cpu-side. # The data is identical in the view. self.assertEqual(cudat, torch.tensor(numba_view.copy_to_host()).to("cuda")) # Writes to the torch.Tensor are reflected in the numba array. cudat[:5] = 11 self.assertEqual(cudat, torch.tensor(numba_view.copy_to_host()).to("cuda")) # Strided tensors are supported. strided_cudat = cudat[::2] strided_npt = cput[::2].numpy() strided_numba_view = numba.cuda.as_cuda_array(strided_cudat) self.assertEqual(strided_numba_view.dtype, strided_npt.dtype) self.assertEqual(strided_numba_view.strides, strided_npt.strides) self.assertEqual(strided_numba_view.shape, strided_cudat.shape) # As of numba 0.40.0 support for strided views is ...limited... # Cannot verify correctness of strided view operations. @unittest.skipIf(not TEST_CUDA, "No cuda") @unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda") def test_conversion_errors(self): """Numba properly detects array interface for tensor.Tensor variants.""" # CPU tensors are not cuda arrays. cput = torch.arange(100) self.assertFalse(numba.cuda.is_cuda_array(cput)) with self.assertRaises(TypeError): numba.cuda.as_cuda_array(cput) # Sparse tensors are not cuda arrays, regardless of device. sparset = torch.sparse_coo_tensor(cput[None, :], cput) self.assertFalse(numba.cuda.is_cuda_array(sparset)) with self.assertRaises(TypeError): numba.cuda.as_cuda_array(sparset) sparset.cuda() self.assertFalse(numba.cuda.is_cuda_array(sparset)) with self.assertRaises(TypeError): numba.cuda.as_cuda_array(sparset) # Device-status overrides gradient status. # CPU+gradient isn't a cuda array. cpu_gradt = torch.zeros(100).requires_grad_(True) self.assertFalse(numba.cuda.is_cuda_array(cpu_gradt)) with self.assertRaises(TypeError): numba.cuda.as_cuda_array(cpu_gradt) # CUDA+gradient raises a RuntimeError on check or conversion. # # Use of hasattr for interface detection causes interface change in # python2; it swallows all exceptions not just AttributeError. cuda_gradt = torch.zeros(100).requires_grad_(True).cuda() # conversion raises RuntimeError with self.assertRaises(RuntimeError): numba.cuda.is_cuda_array(cuda_gradt) with self.assertRaises(RuntimeError): numba.cuda.as_cuda_array(cuda_gradt) @unittest.skipIf(not TEST_CUDA, "No cuda") @unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda") @unittest.skipIf(not TEST_MULTIGPU, "No multigpu") def test_active_device(self): """'as_cuda_array' tensor device must match active numba context.""" # Both torch/numba default to device 0 and can interop freely cudat = torch.arange(10, device="cuda") self.assertEqual(cudat.device.index, 0) self.assertIsInstance( numba.cuda.as_cuda_array(cudat), numba.cuda.devicearray.DeviceNDArray ) # Tensors on non-default device raise api error if converted cudat = torch.arange(10, device=torch.device("cuda", 1)) with self.assertRaises(numba.cuda.driver.CudaAPIError): numba.cuda.as_cuda_array(cudat) # but can be converted when switching to the device's context with numba.cuda.devices.gpus[cudat.device.index]: self.assertIsInstance( numba.cuda.as_cuda_array(cudat), numba.cuda.devicearray.DeviceNDArray ) @unittest.skip( "Test is temporary disabled, see https://github.com/pytorch/pytorch/issues/54418" ) @unittest.skipIf(not TEST_NUMPY, "No numpy") @unittest.skipIf(not TEST_CUDA, "No cuda") @unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda") def test_from_cuda_array_interface(self): """torch.as_tensor() and torch.tensor() supports the __cuda_array_interface__ protocol. If an object exposes the __cuda_array_interface__, .as_tensor() and .tensor() will use the exposed device memory. See: https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html """ dtypes = [ numpy.complex64, numpy.complex128, numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.int16, numpy.int8, numpy.uint8, ] for dtype in dtypes: numpy_arys = [ numpy.ones((), dtype=dtype), numpy.arange(6).reshape(2, 3).astype(dtype), numpy.arange(6) .reshape(2, 3) .astype(dtype)[1:], # View offset should be ignored numpy.arange(6) .reshape(2, 3) .astype(dtype)[:, None], # change the strides but still contiguous ] # Zero-copy when using `torch.as_tensor()` for numpy_ary in numpy_arys: numba_ary = numba.cuda.to_device(numpy_ary) torch_ary = torch.as_tensor(numba_ary, device="cuda") self.assertEqual( numba_ary.__cuda_array_interface__, torch_ary.__cuda_array_interface__, ) self.assertEqual( torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype) ) # Check that `torch_ary` and `numba_ary` points to the same device memory torch_ary += 42 self.assertEqual( torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype) ) # Implicit-copy because `torch_ary` is a CPU array for numpy_ary in numpy_arys: numba_ary = numba.cuda.to_device(numpy_ary) torch_ary = torch.as_tensor(numba_ary, device="cpu") self.assertEqual( torch_ary.data.numpy(), numpy.asarray(numba_ary, dtype=dtype) ) # Check that `torch_ary` and `numba_ary` points to different memory torch_ary += 42 self.assertEqual( torch_ary.data.numpy(), numpy.asarray(numba_ary, dtype=dtype) + 42 ) # Explicit-copy when using `torch.tensor()` for numpy_ary in numpy_arys: numba_ary = numba.cuda.to_device(numpy_ary) torch_ary = torch.tensor(numba_ary, device="cuda") self.assertEqual( torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype) ) # Check that `torch_ary` and `numba_ary` points to different memory torch_ary += 42 self.assertEqual( torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary, dtype=dtype) + 42, ) @unittest.skipIf(not TEST_NUMPY, "No numpy") @unittest.skipIf(not TEST_CUDA, "No cuda") @unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda") def test_from_cuda_array_interface_inferred_strides(self): """torch.as_tensor(numba_ary) should have correct inferred (contiguous) strides""" # This could, in theory, be combined with test_from_cuda_array_interface but that test # is overly strict: it checks that the exported protocols are exactly the same, which # cannot handle differing exported protocol versions. dtypes = [ numpy.float64, numpy.float32, numpy.int64, numpy.int32, numpy.int16, numpy.int8, numpy.uint8, ] for dtype in dtypes: numpy_ary = numpy.arange(6).reshape(2, 3).astype(dtype) numba_ary = numba.cuda.to_device(numpy_ary) self.assertTrue(numba_ary.is_c_contiguous()) torch_ary = torch.as_tensor(numba_ary, device="cuda") self.assertTrue(torch_ary.is_contiguous()) @unittest.skip( "Test is temporary disabled, see https://github.com/pytorch/pytorch/issues/54418" ) @unittest.skipIf(not TEST_NUMPY, "No numpy") @unittest.skipIf(not TEST_CUDA, "No cuda") @unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda") def test_from_cuda_array_interface_lifetime(self): """torch.as_tensor(obj) tensor grabs a reference to obj so that the lifetime of obj exceeds the tensor""" numba_ary = numba.cuda.to_device(numpy.arange(6)) torch_ary = torch.as_tensor(numba_ary, device="cuda") self.assertEqual( torch_ary.__cuda_array_interface__, numba_ary.__cuda_array_interface__ ) # No copy del numba_ary self.assertEqual( torch_ary.cpu().data.numpy(), numpy.arange(6) ) # `torch_ary` is still alive @unittest.skip( "Test is temporary disabled, see https://github.com/pytorch/pytorch/issues/54418" ) @unittest.skipIf(not TEST_NUMPY, "No numpy") @unittest.skipIf(not TEST_CUDA, "No cuda") @unittest.skipIf(not TEST_NUMBA_CUDA, "No numba.cuda") @unittest.skipIf(not TEST_MULTIGPU, "No multigpu") def test_from_cuda_array_interface_active_device(self): """torch.as_tensor() tensor device must match active numba context.""" # Zero-copy: both torch/numba default to device 0 and can interop freely numba_ary = numba.cuda.to_device(numpy.arange(6)) torch_ary = torch.as_tensor(numba_ary, device="cuda") self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary)) self.assertEqual( torch_ary.__cuda_array_interface__, numba_ary.__cuda_array_interface__ ) # Implicit-copy: when the Numba and Torch device differ numba_ary = numba.cuda.to_device(numpy.arange(6)) torch_ary = torch.as_tensor(numba_ary, device=torch.device("cuda", 1)) self.assertEqual(torch_ary.get_device(), 1) self.assertEqual(torch_ary.cpu().data.numpy(), numpy.asarray(numba_ary)) if1 = torch_ary.__cuda_array_interface__ if2 = numba_ary.__cuda_array_interface__ self.assertNotEqual(if1["data"], if2["data"]) del if1["data"] del if2["data"] self.assertEqual(if1, if2) if __name__ == "__main__": common.run_tests()
python
github
https://github.com/pytorch/pytorch
test/test_numba_integration.py
import { createCodeFixActionWithoutFixAll, registerCodeFix, setJsonCompilerOptionValue, setJsonCompilerOptionValues, } from "../_namespaces/ts.codefix.js"; import { CodeFixAction, Diagnostics, Expression, factory, getEmitModuleKind, getEmitScriptTarget, getTsConfigObjectLiteralExpression, ModuleKind, ScriptTarget, textChanges, } from "../_namespaces/ts.js"; registerCodeFix({ errorCodes: [ Diagnostics.Top_level_await_expressions_are_only_allowed_when_the_module_option_is_set_to_es2022_esnext_system_node16_node18_node20_nodenext_or_preserve_and_the_target_option_is_set_to_es2017_or_higher.code, Diagnostics.Top_level_await_using_statements_are_only_allowed_when_the_module_option_is_set_to_es2022_esnext_system_node16_node18_node20_nodenext_or_preserve_and_the_target_option_is_set_to_es2017_or_higher.code, Diagnostics.Top_level_for_await_loops_are_only_allowed_when_the_module_option_is_set_to_es2022_esnext_system_node16_node18_node20_nodenext_or_preserve_and_the_target_option_is_set_to_es2017_or_higher.code, ], getCodeActions: function getCodeActionsToFixModuleAndTarget(context) { const compilerOptions = context.program.getCompilerOptions(); const { configFile } = compilerOptions; if (configFile === undefined) { return undefined; } const codeFixes: CodeFixAction[] = []; const moduleKind = getEmitModuleKind(compilerOptions); const moduleOutOfRange = moduleKind >= ModuleKind.ES2015 && moduleKind < ModuleKind.ESNext; if (moduleOutOfRange) { const changes = textChanges.ChangeTracker.with(context, changes => { setJsonCompilerOptionValue(changes, configFile, "module", factory.createStringLiteral("esnext")); }); codeFixes.push(createCodeFixActionWithoutFixAll("fixModuleOption", changes, [Diagnostics.Set_the_module_option_in_your_configuration_file_to_0, "esnext"])); } const target = getEmitScriptTarget(compilerOptions); const targetOutOfRange = target < ScriptTarget.ES2017 || target > ScriptTarget.ESNext; if (targetOutOfRange) { const changes = textChanges.ChangeTracker.with(context, tracker => { const configObject = getTsConfigObjectLiteralExpression(configFile); if (!configObject) return; const options: [string, Expression][] = [["target", factory.createStringLiteral("es2017")]]; if (moduleKind === ModuleKind.CommonJS) { // Ensure we preserve the default module kind (commonjs), as targets >= ES2015 have a default module kind of es2015. options.push(["module", factory.createStringLiteral("commonjs")]); } setJsonCompilerOptionValues(tracker, configFile, options); }); codeFixes.push(createCodeFixActionWithoutFixAll("fixTargetOption", changes, [Diagnostics.Set_the_target_option_in_your_configuration_file_to_0, "es2017"])); } return codeFixes.length ? codeFixes : undefined; }, });
typescript
github
https://github.com/microsoft/TypeScript
src/services/codefixes/fixModuleAndTargetOptions.ts
#!/usr/bin/env python # -*- coding: utf-8 -*- "Special module to handle differences between Python 2 and 3 versions" import sys PY3K = sys.version_info >= (3, 0) try: import cPickle as pickle except ImportError: import pickle try: from urllib import urlopen except ImportError: from urllib.request import urlopen try: from hashlib import md5 except ImportError: try: from md5 import md5 except ImportError: md5 = None def hashpath(fn): h = md5() if PY3K: h.update(fn.encode("UTF-8")) else: h.update(fn) return h.hexdigest() # Check if PIL is available (tries importing both pypi version and corrected or manually installed versions). # Necessary for JPEG and GIF support. # TODO: Pillow support try: from PIL import Image except ImportError: try: import Image except ImportError: Image = None try: from HTMLParser import HTMLParser except ImportError: from html.parser import HTMLParser if PY3K: basestring = str unicode = str ord = lambda x: x else: basestring = basestring unicode = unicode ord = ord # shortcut to bytes conversion (b prefix) def b(s): if isinstance(s, basestring): return s.encode("latin1") elif isinstance(s, int): if PY3K: return bytes([s]) # http://bugs.python.org/issue4588 else: return chr(s) def exception(): "Return the current the exception instance currently being handled" # this is needed to support Python 2.5 that lacks "as" syntax return sys.exc_info()[1]
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2015 Stephane Caron <stephane.caron@normalesup.org> # # This file is part of pymanoid. # # pymanoid is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # pymanoid is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # pymanoid. If not, see <http://www.gnu.org/licenses/>. from math import atan2, asin, cos, sin from numpy import array __quat_to_rot__ = array([[ # [0, 0]: a^2 + b^2 - c^2 - d^2 [[+1, 0, 0, 0], [.0, +1, 0, 0], [.0, 0, -1, 0], [.0, 0, 0, -1]], # [0, 1]: 2bc - 2ad [[.0, 0, 0, -2], [.0, 0, +2, 0], [.0, 0, 0, 0], [.0, 0, 0, 0]], # [0, 2]: 2bd + 2ac [[.0, 0, +2, 0], [.0, 0, 0, +2], [.0, 0, 0, 0], [.0, 0, 0, 0]]], [ # [1, 0]: 2bc + 2ad [[.0, 0, 0, +2], [.0, 0, +2, 0], [.0, 0, 0, 0], [.0, 0, 0, 0]], # [1, 1]: a^2 - b^2 + c^2 - d^2 [[+1, 0, 0, 0], [.0, -1, 0, 0], [.0, 0, +1, 0], [.0, 0, 0, -1]], # [1, 2]: 2cd - 2ab [[.0, -2, 0, 0], [.0, 0, 0, 0], [.0, 0, 0, +2], [.0, 0, 0, 0]]], [ # [2, 0]: 2bd - 2ac [[.0, 0, -2, 0], [.0, 0, 0, +2], [.0, 0, 0, 0], [.0, 0, 0, 0]], # [2, 1]: 2cd + 2ab [[0, +2, 0, 0], [0, 0, 0, 0], [0, 0, 0, +2], [0, 0, 0, 0]], # [2, 2]: a^2 - b^2 - c^2 + d^2 [[+1, 0, 0, 0], [.0, -1, 0, 0], [.0, 0, -1, 0], [.0, 0, 0, +1]]]]) quat_to_rot_tensor = __quat_to_rot__.transpose([2, 0, 1, 3]) # quat_to_rot_tensor.shape == (4, 3, 3, 4) # R = dot(quat, dot(quat_to_rot_tensor, quat)) def rpy_from_quat(q): roll = atan2( 2 * q[2] * q[3] + 2 * q[0] * q[1], q[3] ** 2 - q[2] ** 2 - q[1] ** 2 + q[0] ** 2) pitch = -asin( 2 * q[1] * q[3] - 2 * q[0] * q[2]) yaw = atan2( 2 * q[1] * q[2] + 2 * q[0] * q[3], q[1] ** 2 + q[0] ** 2 - q[3] ** 2 - q[2] ** 2) return array([roll, pitch, yaw]) def quat_from_rpy(roll, pitch, yaw): cr, cp, cy = cos(roll / 2), cos(pitch / 2), cos(yaw / 2) sr, sp, sy = sin(roll / 2), sin(pitch / 2), sin(yaw / 2) return array([ cr * cp * cy + sr * sp * sy, -cr * sp * sy + cp * cy * sr, cr * cy * sp + sr * cp * sy, cr * cp * sy - sr * cy * sp])
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import sys import csv import glob # This lines increases the csv's line max size csv.field_size_limit(sys.maxsize) def readImagesFileNames(_path): imagesList = [] imgL = glob.glob(_path) for item in imgL: item = item.split('/') item = item[1].split('.') item = item[0] imagesList.append(item) return imagesList def writeTags(_iFileName, _oFileName): oFile = open(_oFileName, 'wb') wr = csv.writer(oFile) with open(_iFileName) as f: reader = csv.reader(f) for row in reader: image_id = str(row).strip('[ \' ]').split(r' ') image_id = image_id[0] for image in evalImages: if image == image_id: wr.writerow(row) break def writeGroundTruth(_iFileName, _oFileName): oFile = open(_oFileName, 'wb') wr = csv.writer(oFile) with open(_iFileName, 'rb') as f: reader = csv.reader(f) for row in reader: image_id = str(row).strip('[ \' ]').split(r' ') image_id = image_id[0] for image in evalImages: if image == image_id: wr.writerow(row) break ###################### #### MAIN PROGRAM #### ###################### #FILES VARS images_path = "images/*" main_id_tag_filename = "id_tag/document_id_tag.csv" eval_id_tag_filename = "id_tag/evaluable_document_id_tag.csv" main_groundtruth_filename = "groundtruth/groundtruth.csv" eval_groundtruth_filename = "groundtruth/evaluable_groundtruth.csv" print("Reading image names...") evalImages = readImagesFileNames(images_path) print("Writing image-tags file...") writeTags(main_id_tag_filename, eval_id_tag_filename) print("Writing groundtruth file...") writeGroundTruth(main_groundtruth_filename, eval_groundtruth_filename)
unknown
codeparrot/codeparrot-clean
test_kind: fsm_workload_test selector: roots: - jstests/concurrency/fsm_workloads/**/*.js - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js exclude_files: # SERVER-14669 Multi-removes that use $where miscount removed documents # Disabled due to MongoDB restrictions and/or workload restrictions # These workloads sometimes trigger 'Could not lock auth data update lock' # errors because the AuthorizationManager currently waits for only five # seconds to acquire the lock for authorization documents # uses >100MB of data, which can overwhelm test hosts # compact can only be run against a standalone mongod # can cause OOM kills on test hosts # cannot createIndex after dropDatabase without sharding first # The WTWriteConflictException failpoint is not supported on mongos. # SERVER-20361 Improve the behaviour of multi-update/delete against a sharded collection - jstests/concurrency/fsm_workloads/query/update/update_where.js # This test runs a large number of inserts, which can cause moveCollection to take a long time # to finish. This can cause the CheckMetadataConsistency hook to hit LockBusy errors. - jstests/concurrency/fsm_workloads/timeseries/timeseries_insert_idle_bucket_expiration.js exclude_with_any_tags: - requires_standalone - does_not_support_causal_consistency # This suite uses secondary read preference, which isn't currently compatible with transactions. - uses_transactions - assumes_balancer_off - requires_replication # Tests which use $currentOp. Running an aggregation with $currentOp and read preference # secondary doesn't make much sense, since there's no guarantee *which* secondary you get results # from. - uses_curop_agg_stage # mongos has no system.profile collection. - requires_profiling - assumes_against_mongod_not_mongos - assumes_unsharded_collection # implicitly_retry_on_migration_in_progress.js alters find/aggregate commands # so that the whole result set is returned through a single batch - assumes_no_implicit_cursor_exhaustion executor: archive: hooks: - CheckReplDBHashInBackground - CheckReplDBHash - CheckMetadataConsistencyInBackground - ValidateCollections tests: true config: shell_options: eval: >- await import("jstests/libs/override_methods/implicitly_shard_accessed_collections.js"); await import("jstests/libs/override_methods/implicitly_retry_on_migration_in_progress.js"); global_vars: TestData: runningWithCausalConsistency: true runningWithBalancer: true implicitlyShardOnCreateCollectionOnly: true hooks: - class: CheckIdleCursors - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: class: ShardedClusterFixture mongos_options: set_parameters: enableTestCommands: 1 queryAnalysisSamplerConfigurationRefreshSecs: 1 shard_options: mongod_options: oplogSize: 1024 mongod_options: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 queryAnalysisWriterIntervalSecs: 1 queryAnalysisSamplerConfigurationRefreshSecs: 1 skipDroppingHashedShardKeyIndex: true reshardingMinimumOperationDurationMillis: 0 num_rs_nodes_per_shard: 3 num_shards: 2 num_mongos: 2 enable_balancer: true random_migrations: true
unknown
github
https://github.com/mongodb/mongo
buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml
# AGENTS.md <!-- version: 2.0.0 --> This file provides guidance to AI agents when working with code in the Grafana repository. **Directory-scoped agent files exist for specialized areas — read them when working in those directories:** - `docs/AGENTS.md` — Documentation style guide (for work under `docs/`) - `public/app/features/alerting/unified/CLAUDE.md` — Alerting squad patterns ## Project Overview Grafana is a monitoring and observability platform. Go backend, TypeScript/React frontend, monorepo with Yarn workspaces (frontend) and Go workspaces (backend). ## Principles - Follow existing patterns in the surrounding code - Write tests for new functionality - Keep changes focused — avoid over-engineering - Separate PRs for frontend and backend changes (deployed at different cadences) - Security: prevent XSS, SQL injection, command injection ## Commands ### Build & Run ```bash make run # Backend with hot reload (localhost:3000, admin/admin) make build-backend # Backend only yarn start # Frontend dev server (watches for changes) yarn build # Frontend production build ``` ### Test ```bash # Backend go test -run TestName ./pkg/services/myservice/ # Specific test make test-go-unit # All unit tests make test-go-integration # Integration tests # Frontend yarn test path/to/file # Specific file yarn test -t "pattern" # By name pattern yarn test -u # Update snapshots # E2E yarn e2e:playwright path/to/test.spec.ts # Specific test ``` ### Lint & Format ```bash make lint-go # Go linter yarn lint # ESLint yarn lint:fix # ESLint auto-fix yarn prettier:write # Prettier auto-format yarn typecheck # TypeScript check ``` ### Code Generation ```bash make gen-go # Wire DI (after changing service init) make gen-cue # CUE schemas (after changing kinds/) make gen-apps # App SDK apps make swagger-gen # OpenAPI/Swagger specs make gen-feature-toggles # Feature flags (pkg/services/featuremgmt/) make i18n-extract # i18n strings make update-workspace # Go workspace (after adding modules) ``` ### Dev Environment ```bash yarn install --immutable # Install frontend deps make devenv sources=postgres,influxdb,loki # Start backing services make devenv-down # Stop backing services make lefthook-install # Pre-commit hooks ``` ## Architecture ### Backend (`pkg/`) | Directory | Purpose | | ----------------- | ----------------------------------------------------------- | | `pkg/api/` | HTTP API handlers and routes | | `pkg/services/` | Business logic by domain (alerting, dashboards, auth, etc.) | | `pkg/server/` | Server init and Wire DI setup (`wire.go`) | | `pkg/tsdb/` | Time series database query backends | | `pkg/plugins/` | Plugin system and loader | | `pkg/infra/` | Logging, metrics, database access | | `pkg/middleware/` | HTTP middleware | | `pkg/setting/` | Configuration management | **Patterns**: Wire DI (regenerate with `make gen-go`), services implement interfaces in same package, business logic in `pkg/services/<domain>/` not in API handlers, database via `sqlstore`, plugin communication via gRPC/protobuf. ### Frontend (`public/app/`) | Directory | Purpose | | ---------------------- | ----------------------------------------------------- | | `public/app/core/` | Shared services, components, utilities | | `public/app/features/` | Feature code by domain (dashboard, alerting, explore) | | `public/app/plugins/` | Built-in plugins (many are Yarn workspaces) | | `public/app/types/` | TypeScript type definitions | | `public/app/store/` | Redux store configuration | **Patterns**: Redux Toolkit with slices (not old Redux), function components with hooks, Emotion CSS-in-JS via `useStyles2`, RTK Query for data fetching, React Testing Library for tests. ### Shared Packages (`packages/`) `@grafana/data` (data structures), `@grafana/ui` (components), `@grafana/runtime` (runtime services), `@grafana/schema` (CUE-generated types), `@grafana/scenes` (dashboard framework). ### Backend Apps (`apps/`) Standalone Go apps using Grafana App SDK: `apps/dashboard/`, `apps/folder/`, `apps/alerting/`. ### Plugin Workspaces These built-in plugins require separate build steps: `azuremonitor`, `cloud-monitoring`, `grafana-postgresql-datasource`, `loki`, `tempo`, `jaeger`, `mysql`, `parca`, `zipkin`, `grafana-pyroscope-datasource`, `grafana-testdata-datasource`. Build a specific plugin: `yarn workspace @grafana-plugins/<name> dev` ## Key Notes - **Wire DI**: Backend service init changes require `make gen-go`. Wire catches circular deps at compile time. - **CUE schemas**: Dashboard/panel schemas in `kinds/` generate both Go and TS code via `make gen-cue`. - **Feature toggles**: Defined in `pkg/services/featuremgmt/`, auto-generate code. Run `make gen-feature-toggles` after changes. - **Go workspace**: Defined in `go.work`. Run `make update-workspace` when adding Go modules. - **Build tags**: `oss` (default), `enterprise`, `pro`. - **Config**: Defaults in `conf/defaults.ini`, overrides in `conf/custom.ini`. - **Database migrations**: Live in `pkg/services/sqlstore/migrations/`. Test with `make devenv sources=postgres_tests,mysql_tests` then `make test-go-integration-postgres`. - **CI sharding**: Backend tests use `SHARD`/`SHARDS` env vars for parallelization.
unknown
github
https://github.com/grafana/grafana
AGENTS.md
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.security.authenticator; import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; import org.apache.kafka.common.security.auth.Login; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.Map; import javax.security.auth.Subject; import javax.security.auth.callback.Callback; import javax.security.auth.callback.NameCallback; import javax.security.auth.callback.PasswordCallback; import javax.security.auth.callback.UnsupportedCallbackException; import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.Configuration; import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginException; import javax.security.sasl.RealmCallback; /** * Base login class that implements methods common to typical SASL mechanisms. */ public abstract class AbstractLogin implements Login { private static final Logger log = LoggerFactory.getLogger(AbstractLogin.class); private String contextName; private Configuration configuration; private LoginContext loginContext; private AuthenticateCallbackHandler loginCallbackHandler; @Override public void configure(Map<String, ?> configs, String contextName, Configuration configuration, AuthenticateCallbackHandler loginCallbackHandler) { this.contextName = contextName; this.configuration = configuration; this.loginCallbackHandler = loginCallbackHandler; } @Override public LoginContext login() throws LoginException { loginContext = new LoginContext(contextName, null, loginCallbackHandler, configuration); loginContext.login(); log.info("Successfully logged in."); return loginContext; } @Override public Subject subject() { return loginContext.getSubject(); } protected String contextName() { return contextName; } protected Configuration configuration() { return configuration; } /** * Callback handler for creating login context. Login callback handlers * should support the callbacks required for the login modules used by * the KafkaServer and KafkaClient contexts. Kafka does not support * callback handlers which require additional user input. * */ public static class DefaultLoginCallbackHandler implements AuthenticateCallbackHandler { @Override public void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries) { } @Override public void handle(Callback[] callbacks) throws UnsupportedCallbackException { for (Callback callback : callbacks) { if (callback instanceof NameCallback) { NameCallback nc = (NameCallback) callback; nc.setName(nc.getDefaultName()); } else if (callback instanceof PasswordCallback) { String errorMessage = "Could not login: the client is being asked for a password, but the Kafka" + " client code does not currently support obtaining a password from the user."; throw new UnsupportedCallbackException(callback, errorMessage); } else if (callback instanceof RealmCallback) { RealmCallback rc = (RealmCallback) callback; rc.setText(rc.getDefaultText()); } else { throw new UnsupportedCallbackException(callback, "Unrecognized SASL Login callback"); } } } @Override public void close() { } } }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/common/security/authenticator/AbstractLogin.java
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ The I{sxbuiltin} module provides classes that represent XSD I{builtin} schema objects. """ from logging import getLogger from suds import * from suds.xsd import * from suds.sax.date import * from suds.xsd.sxbase import XBuiltin import datetime as dt log = getLogger(__name__) class XString(XBuiltin): """ Represents an (xsd) <xs:string/> node """ pass class XAny(XBuiltin): """ Represents an (xsd) <any/> node """ def __init__(self, schema, name): XBuiltin.__init__(self, schema, name) self.nillable = False def get_child(self, name): child = XAny(self.schema, name) return (child, []) def any(self): return True class XBoolean(XBuiltin): """ Represents an (xsd) boolean builtin type. """ translation = ( { '1':True,'true':True,'0':False,'false':False }, { True:'true',1:'true',False:'false',0:'false' }, ) def translate(self, value, topython=True): if topython: if isinstance(value, str): return XBoolean.translation[0].get(value) else: return None else: if isinstance(value, (bool,int)): return XBoolean.translation[1].get(value) else: return value class XInteger(XBuiltin): """ Represents an (xsd) xs:int builtin type. """ def translate(self, value, topython=True): if topython: if isinstance(value, str) and len(value): return int(value) else: return None else: if isinstance(value, int): return str(value) else: return value class XLong(XBuiltin): """ Represents an (xsd) xs:long builtin type. """ def translate(self, value, topython=True): if topython: if isinstance(value, str) and len(value): return int(value) else: return None else: if isinstance(value, int): return str(value) else: return value class XFloat(XBuiltin): """ Represents an (xsd) xs:float builtin type. """ def translate(self, value, topython=True): if topython: if isinstance(value, str) and len(value): return float(value) else: return None else: if isinstance(value, float): return str(value) else: return value class XDate(XBuiltin): """ Represents an (xsd) xs:date builtin type. """ def translate(self, value, topython=True): if topython: if isinstance(value, str) and len(value): return Date(value).date else: return None else: if isinstance(value, dt.date): return str(Date(value)) else: return value class XTime(XBuiltin): """ Represents an (xsd) xs:time builtin type. """ def translate(self, value, topython=True): if topython: if isinstance(value, str) and len(value): return Time(value).time else: return None else: if isinstance(value, dt.date): return str(Time(value)) else: return value class XDateTime(XBuiltin): """ Represents an (xsd) xs:datetime builtin type. """ def translate(self, value, topython=True): if topython: if isinstance(value, str) and len(value): return DateTime(value).datetime else: return None else: if isinstance(value, dt.date): return str(DateTime(value)) else: return value class Factory: tags =\ { # any 'anyType' : XAny, # strings 'string' : XString, 'normalizedString' : XString, 'ID' : XString, 'Name' : XString, 'QName' : XString, 'NCName' : XString, 'anySimpleType' : XString, 'anyURI' : XString, 'NOTATION' : XString, 'token' : XString, 'language' : XString, 'IDREFS' : XString, 'ENTITIES' : XString, 'IDREF' : XString, 'ENTITY' : XString, 'NMTOKEN' : XString, 'NMTOKENS' : XString, # binary 'hexBinary' : XString, 'base64Binary' : XString, # integers 'int' : XInteger, 'integer' : XInteger, 'unsignedInt' : XInteger, 'positiveInteger' : XInteger, 'negativeInteger' : XInteger, 'nonPositiveInteger' : XInteger, 'nonNegativeInteger' : XInteger, # longs 'long' : XLong, 'unsignedLong' : XLong, # shorts 'short' : XInteger, 'unsignedShort' : XInteger, 'byte' : XInteger, 'unsignedByte' : XInteger, # floats 'float' : XFloat, 'double' : XFloat, 'decimal' : XFloat, # dates & times 'date' : XDate, 'time' : XTime, 'dateTime': XDateTime, 'duration': XString, 'gYearMonth' : XString, 'gYear' : XString, 'gMonthDay' : XString, 'gDay' : XString, 'gMonth' : XString, # boolean 'boolean' : XBoolean, } @classmethod def maptag(cls, tag, fn): """ Map (override) tag => I{class} mapping. @param tag: An xsd tag name. @type tag: str @param fn: A function or class. @type fn: fn|class. """ cls.tags[tag] = fn @classmethod def create(cls, schema, name): """ Create an object based on the root tag name. @param schema: A schema object. @type schema: L{schema.Schema} @param name: The name. @type name: str @return: The created object. @rtype: L{XBuiltin} """ fn = cls.tags.get(name) if fn is not None: return fn(schema, name) else: return XBuiltin(schema, name)
unknown
codeparrot/codeparrot-clean
""" Copyright 2008-2011 Free Software Foundation, Inc. This file is part of GNU Radio GNU Radio Companion is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GNU Radio Companion is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA """ import expr_utils from .. base.FlowGraph import FlowGraph as _FlowGraph from .. gui.FlowGraph import FlowGraph as _GUIFlowGraph from .. base.odict import odict import re _variable_matcher = re.compile('^(variable\w*)$') _parameter_matcher = re.compile('^(parameter)$') _monitors_searcher = re.compile('(ctrlport_monitor)') _bussink_searcher = re.compile('^(bus_sink)$') _bussrc_searcher = re.compile('^(bus_source)$') _bus_struct_sink_searcher = re.compile('^(bus_structure_sink)$') _bus_struct_src_searcher = re.compile('^(bus_structure_source)$') class FlowGraph(_FlowGraph, _GUIFlowGraph): def __init__(self, **kwargs): _FlowGraph.__init__(self, **kwargs) _GUIFlowGraph.__init__(self) self._eval_cache = dict() def _eval(self, code, namespace, namespace_hash): """ Evaluate the code with the given namespace. Args: code: a string with python code namespace: a dict representing the namespace namespace_hash: a unique hash for the namespace Returns: the resultant object """ if not code: raise Exception, 'Cannot evaluate empty statement.' my_hash = hash(code) ^ namespace_hash #cache if does not exist if not self._eval_cache.has_key(my_hash): self._eval_cache[my_hash] = eval(code, namespace, namespace) #return from cache return self._eval_cache[my_hash] def get_hier_block_stream_io(self, direction): """ Get a list of stream io signatures for this flow graph. Args: direction: a string of 'in' or 'out' Returns: a list of dicts with: type, label, vlen, size, optional """ return filter(lambda p: p['type'] != "message", self.get_hier_block_io(direction)) def get_hier_block_message_io(self, direction): """ Get a list of message io signatures for this flow graph. Args: direction: a string of 'in' or 'out' Returns: a list of dicts with: type, label, vlen, size, optional """ return filter(lambda p: p['type'] == "message", self.get_hier_block_io(direction)) def get_hier_block_io(self, direction): """ Get a list of io ports for this flow graph. Args: direction: a string of 'in' or 'out' Returns: a list of dicts with: type, label, vlen, size, optional """ pads = self.get_pad_sources() if direction in ('sink', 'in') else \ self.get_pad_sinks() if direction in ('source', 'out') else [] ports = [] for pad in pads: master = { 'label': str(pad.get_param('label').get_evaluated()), 'type': str(pad.get_param('type').get_evaluated()), 'vlen': str(pad.get_param('vlen').get_value()), 'size': pad.get_param('type').get_opt('size'), 'optional': bool(pad.get_param('optional').get_evaluated()), } num_ports = pad.get_param('num_streams').get_evaluated() if num_ports > 1: for i in xrange(num_ports): clone = master.copy() clone['label'] += str(i) ports.append(clone) else: ports.append(master) return ports def get_pad_sources(self): """ Get a list of pad source blocks sorted by id order. Returns: a list of pad source blocks in this flow graph """ pads = filter(lambda b: b.get_key() == 'pad_source', self.get_enabled_blocks()) return sorted(pads, lambda x, y: cmp(x.get_id(), y.get_id())) def get_pad_sinks(self): """ Get a list of pad sink blocks sorted by id order. Returns: a list of pad sink blocks in this flow graph """ pads = filter(lambda b: b.get_key() == 'pad_sink', self.get_enabled_blocks()) return sorted(pads, lambda x, y: cmp(x.get_id(), y.get_id())) def get_pad_port_global_key(self, port): """ Get the key for a port of a pad source/sink to use in connect() This takes into account that pad blocks may have multiple ports Returns: the key (str) """ key_offset = 0 pads = self.get_pad_sources() if port.is_source() else self.get_pad_sinks() for pad in pads: # using the block param 'type' instead of the port domain here # to emphasize that hier block generation is domain agnostic is_message_pad = pad.get_param('type').get_evaluated() == "message" if port.get_parent() == pad: if is_message_pad: key = pad.get_param('label').get_value() else: key = str(key_offset + int(port.get_key())) return key else: # assuming we have either only sources or sinks if not is_message_pad: key_offset += len(pad.get_ports()) return -1 def get_imports(self): """ Get a set of all import statments in this flow graph namespace. Returns: a set of import statements """ imports = sum([block.get_imports() for block in self.get_enabled_blocks()], []) imports = sorted(set(imports)) return imports def get_variables(self): """ Get a list of all variables in this flow graph namespace. Exclude paramterized variables. Returns: a sorted list of variable blocks in order of dependency (indep -> dep) """ variables = filter(lambda b: _variable_matcher.match(b.get_key()), self.get_enabled_blocks()) return expr_utils.sort_objects(variables, lambda v: v.get_id(), lambda v: v.get_var_make()) def get_parameters(self): """ Get a list of all paramterized variables in this flow graph namespace. Returns: a list of paramterized variables """ parameters = filter(lambda b: _parameter_matcher.match(b.get_key()), self.get_enabled_blocks()) return parameters def get_monitors(self): """ Get a list of all ControlPort monitors """ monitors = filter(lambda b: _monitors_searcher.search(b.get_key()), self.get_enabled_blocks()) return monitors def get_bussink(self): bussink = filter(lambda b: _bussink_searcher.search(b.get_key()), self.get_enabled_blocks()) for i in bussink: for j in i.get_params(): if j.get_name() == 'On/Off' and j.get_value() == 'on': return True; return False def get_bussrc(self): bussrc = filter(lambda b: _bussrc_searcher.search(b.get_key()), self.get_enabled_blocks()) for i in bussrc: for j in i.get_params(): if j.get_name() == 'On/Off' and j.get_value() == 'on': return True; return False def get_bus_structure_sink(self): bussink = filter(lambda b: _bus_struct_sink_searcher.search(b.get_key()), self.get_enabled_blocks()) return bussink def get_bus_structure_src(self): bussrc = filter(lambda b: _bus_struct_src_searcher.search(b.get_key()), self.get_enabled_blocks()) return bussrc def rewrite(self): """ Flag the namespace to be renewed. """ def reconnect_bus_blocks(): for block in self.get_blocks(): if 'bus' in map(lambda a: a.get_type(), block.get_sources_gui()): for i in range(len(block.get_sources_gui())): if len(block.get_sources_gui()[i].get_connections()) > 0: source = block.get_sources_gui()[i] sink = [] for j in range(len(source.get_connections())): sink.append(source.get_connections()[j].get_sink()); for elt in source.get_connections(): self.remove_element(elt); for j in sink: self.connect(source, j); self._renew_eval_ns = True _FlowGraph.rewrite(self); reconnect_bus_blocks(); def evaluate(self, expr): """ Evaluate the expression. Args: expr: the string expression @throw Exception bad expression Returns: the evaluated data """ if self._renew_eval_ns: self._renew_eval_ns = False #reload namespace n = dict() #load imports for imp in self.get_imports(): try: exec imp in n except: pass #load parameters np = dict() for parameter in self.get_parameters(): try: e = eval(parameter.get_param('value').to_code(), n, n) np[parameter.get_id()] = e except: pass n.update(np) #merge param namespace #load variables for variable in self.get_variables(): try: e = eval(variable.get_var_value(), n, n) n[variable.get_id()] = e except: pass #make namespace public self.n = n self.n_hash = hash(str(n)) #evaluate e = self._eval(expr, self.n, self.n_hash) return e
unknown
codeparrot/codeparrot-clean
from __future__ import absolute_import from rpython.rlib import jit from som.vmobjects.abstract_object import AbstractObject class AstMethod(AbstractObject): _immutable_fields_ = ["_signature", "_invokable", "_embedded_block_methods", "_universe", "_holder"] def __init__(self, signature, invokable, embedded_block_methods, universe): AbstractObject.__init__(self) self._signature = signature self._invokable = invokable self._embedded_block_methods = embedded_block_methods self._universe = universe self._holder = None def get_universe(self): return self._universe @staticmethod def is_primitive(): return False @staticmethod def is_invokable(): """ We use this method to identify methods and primitives """ return True def get_signature(self): return self._signature def get_holder(self): return self._holder def set_holder(self, value): self._holder = value for method in self._embedded_block_methods: method.set_holder(value) @jit.elidable_promote('all') def get_number_of_arguments(self): return self.get_signature().get_number_of_signature_arguments() def invoke(self, receiver, args): return self._invokable.invoke(receiver, args) def __str__(self): return ("Method(" + self.get_holder().get_name().get_embedded_string() + ">>" + str(self.get_signature()) + ")") def get_class(self, universe): return universe.methodClass def merge_point_string(self): """ debug info for the jit """ return "%s>>%s" % (self.get_holder().get_name().get_embedded_string(), self.get_signature().get_embedded_string())
unknown
codeparrot/codeparrot-clean
from django.db import models from django.db.models.query_utils import DeferredAttribute class CustomTypedField(models.TextField): def db_type(self, connection): return "custom_field" class CustomDeferredAttribute(DeferredAttribute): def __get__(self, instance, cls=None): self._count_call(instance, "get") return super().__get__(instance, cls) def __set__(self, instance, value): self._count_call(instance, "set") instance.__dict__[self.field.attname] = value def _count_call(self, instance, get_or_set): count_attr = "_%s_%s_count" % (self.field.attname, get_or_set) count = getattr(instance, count_attr, 0) setattr(instance, count_attr, count + 1) class CustomDescriptorField(models.CharField): descriptor_class = CustomDeferredAttribute
python
github
https://github.com/django/django
tests/field_subclassing/fields.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet from neutron.agent.linux import async_process from neutron.openstack.common import log as logging LOG = logging.getLogger(__name__) class OvsdbMonitor(async_process.AsyncProcess): """Manages an invocation of 'ovsdb-client monitor'.""" def __init__(self, table_name, columns=None, format=None, root_helper=None, respawn_interval=None): cmd = ['ovsdb-client', 'monitor', table_name] if columns: cmd.append(','.join(columns)) if format: cmd.append('--format=%s' % format) super(OvsdbMonitor, self).__init__(cmd, root_helper=root_helper, respawn_interval=respawn_interval) def _read_stdout(self): data = self._process.stdout.readline() if not data: return #TODO(marun) The default root helper outputs exit errors to # stdout due to bug #1219530. This check can be moved to # _read_stderr once the error is correctly output to stderr. if self.root_helper and self.root_helper in data: self._stderr_lines.put(data) LOG.error(_('Error received from ovsdb monitor: %s') % data) else: self._stdout_lines.put(data) LOG.debug(_('Output received from ovsdb monitor: %s') % data) return data def _read_stderr(self): data = super(OvsdbMonitor, self)._read_stderr() if data: LOG.error(_('Error received from ovsdb monitor: %s') % data) # Do not return value to ensure that stderr output will # stop the monitor. class SimpleInterfaceMonitor(OvsdbMonitor): """Monitors the Interface table of the local host's ovsdb for changes. The has_updates() method indicates whether changes to the ovsdb Interface table have been detected since the monitor started or since the previous access. """ def __init__(self, root_helper=None, respawn_interval=None): super(SimpleInterfaceMonitor, self).__init__( 'Interface', columns=['name'], format='json', root_helper=root_helper, respawn_interval=respawn_interval, ) self.data_received = False @property def is_active(self): return (self.data_received and self._kill_event and not self._kill_event.ready()) @property def has_updates(self): """Indicate whether the ovsdb Interface table has been updated. True will be returned if the monitor process is not active. This 'failing open' minimizes the risk of falsely indicating the absense of updates at the expense of potential false positives. """ return bool(list(self.iter_stdout())) or not self.is_active def start(self, block=False, timeout=5): super(SimpleInterfaceMonitor, self).start() if block: eventlet.timeout.Timeout(timeout) while not self.is_active: eventlet.sleep() def _kill(self, *args, **kwargs): self.data_received = False super(SimpleInterfaceMonitor, self)._kill(*args, **kwargs) def _read_stdout(self): data = super(SimpleInterfaceMonitor, self)._read_stdout() if data and not self.data_received: self.data_received = True return data
unknown
codeparrot/codeparrot-clean
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from constants import constants from core.domain import activity_domain from core.domain import activity_services from core.domain import collection_domain from core.domain import collection_services from core.domain import exp_services from core.domain import exp_services_test from core.domain import rating_services from core.domain import rights_manager from core.domain import summary_services from core.domain import user_services from core.tests import test_utils import feconf import utils class ExplorationDisplayableSummariesTest( exp_services_test.ExplorationServicesUnitTests): """Test functions for getting displayable exploration summary dicts.""" ALBERT_EMAIL = 'albert@example.com' BOB_EMAIL = 'bob@example.com' ALBERT_NAME = 'albert' BOB_NAME = 'bob' USER_C_NAME = 'c' USER_D_NAME = 'd' USER_C_EMAIL = 'c@example.com' USER_D_EMAIL = 'd@example.com' USER_C_PROFILE_PICTURE = 'c_profile_picture' EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXP_ID_3 = 'eid3' EXP_ID_4 = 'eid4' EXP_ID_5 = 'eid5' EXPECTED_VERSION_1 = 4 EXPECTED_VERSION_2 = 2 def setUp(self): """Populate the database of explorations and their summaries. The sequence of events is: - (1) Albert creates EXP_ID_1. - (2) Bob edits the title of EXP_ID_1. - (3) Albert creates EXP_ID_2. - (4) Albert edits the title of EXP_ID_1. - (5) Albert edits the title of EXP_ID_2. - (6) Bob reverts Albert's last edit to EXP_ID_1. - Bob tries to publish EXP_ID_2, and is denied access. - (7) Albert publishes EXP_ID_2. - (8) Albert creates EXP_ID_3 - (9) Albert publishes EXP_ID_3 - (10) Albert deletes EXP_ID_3 - (1) User_3 (has a profile_picture) creates EXP_ID_4. - (2) User_4 edits the title of EXP_ID_4. - (3) User_4 edits the title of EXP_ID_4. """ super(ExplorationDisplayableSummariesTest, self).setUp() self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL) self.bob_id = self.get_user_id_from_email(self.BOB_EMAIL) self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.signup(self.BOB_EMAIL, self.BOB_NAME) self.albert = user_services.UserActionsInfo(self.albert_id) self.bob = user_services.UserActionsInfo(self.bob_id) self.save_new_valid_exploration(self.EXP_ID_1, self.albert_id) exp_services.update_exploration( self.bob_id, self.EXP_ID_1, [{ 'cmd': 'edit_exploration_property', 'property_name': 'title', 'new_value': 'Exploration 1 title' }], 'Changed title.') self.save_new_valid_exploration(self.EXP_ID_2, self.albert_id) exp_services.update_exploration( self.albert_id, self.EXP_ID_1, [{ 'cmd': 'edit_exploration_property', 'property_name': 'title', 'new_value': 'Exploration 1 Albert title' }], 'Changed title to Albert1 title.') exp_services.update_exploration( self.albert_id, self.EXP_ID_2, [{ 'cmd': 'edit_exploration_property', 'property_name': 'title', 'new_value': 'Exploration 2 Albert title' }], 'Changed title to Albert2 title.') exp_services.revert_exploration(self.bob_id, self.EXP_ID_1, 3, 2) with self.assertRaisesRegexp( Exception, 'This exploration cannot be published' ): rights_manager.publish_exploration(self.bob, self.EXP_ID_2) rights_manager.publish_exploration(self.albert, self.EXP_ID_2) self.save_new_valid_exploration(self.EXP_ID_3, self.albert_id) rights_manager.publish_exploration(self.albert, self.EXP_ID_3) exp_services.delete_exploration(self.albert_id, self.EXP_ID_3) self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL) self.user_d_id = self.get_user_id_from_email(self.USER_D_EMAIL) self.signup(self.USER_C_EMAIL, self.USER_C_NAME) self.signup(self.USER_D_EMAIL, self.USER_D_NAME) user_services.update_profile_picture_data_url( self.user_c_id, self.USER_C_PROFILE_PICTURE) self.save_new_valid_exploration(self.EXP_ID_4, self.user_c_id) exp_services.update_exploration( self.user_d_id, self.EXP_ID_4, [{ 'cmd': 'edit_exploration_property', 'property_name': 'title', 'new_value': 'Exploration updated title' }], 'Changed title once.') exp_services.update_exploration( self.user_d_id, self.EXP_ID_4, [{ 'cmd': 'edit_exploration_property', 'property_name': 'title', 'new_value': 'Exploration updated title again' }], 'Changed title twice.') self.save_new_valid_exploration(self.EXP_ID_5, self.bob_id) def test_get_human_readable_contributors_summary(self): contributors_summary = {self.albert_id: 10, self.bob_id: 13} self.assertEqual({ self.ALBERT_NAME: { 'num_commits': 10, }, self.BOB_NAME: { 'num_commits': 13, } }, summary_services.get_human_readable_contributors_summary( contributors_summary)) contributors_summary = {self.user_c_id: 1, self.user_d_id: 2} self.assertEqual({ self.USER_C_NAME: { 'num_commits': 1, }, self.USER_D_NAME: { 'num_commits': 2, } }, summary_services.get_human_readable_contributors_summary( contributors_summary)) def test_get_displayable_exp_summary_dicts_matching_ids(self): # A list of exp_id's are passed in: # EXP_ID_1 -- private exploration owned by Albert # EXP_ID_2 -- pubished exploration owned by Albert # EXP_ID_3 -- deleted exploration # EXP_ID_5 -- private exploration owned by Bob # Should only return [EXP_ID_2] displayable_summaries = ( summary_services.get_displayable_exp_summary_dicts_matching_ids( [self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5])) expected_summary = { 'category': u'A category', 'community_owned': False, 'id': self.EXP_ID_2, 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'num_views': 0, 'objective': u'An objective', 'ratings': feconf.get_empty_ratings(), 'status': 'public', 'tags': [], 'thumbnail_bg_color': '#a33f40', 'thumbnail_icon_url': '/subjects/Lightbulb.svg', 'title': u'Exploration 2 Albert title', } self.assertIn('last_updated_msec', displayable_summaries[0]) self.assertDictContainsSubset(expected_summary, displayable_summaries[0]) def test_get_public_and_filtered_private_summary_dicts_for_creator(self): # If a new exploration is created by another user (Bob) and not public, # then Albert cannot see it when querying for explorations. displayable_summaries = ( summary_services.get_displayable_exp_summary_dicts_matching_ids( [self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5], user=self.albert)) self.assertEqual(len(displayable_summaries), 2) self.assertEqual(displayable_summaries[0]['id'], self.EXP_ID_1) self.assertEqual(displayable_summaries[1]['id'], self.EXP_ID_2) # However, if Albert is granted editor access to Bob's exploration, # then Albert has access to the corresponding summary. rights_manager.assign_role_for_exploration( self.bob, self.EXP_ID_5, self.albert_id, rights_manager.ROLE_EDITOR) displayable_summaries = ( summary_services.get_displayable_exp_summary_dicts_matching_ids( [self.EXP_ID_1, self.EXP_ID_2, self.EXP_ID_3, self.EXP_ID_5], user=self.albert)) self.assertEqual(len(displayable_summaries), 3) self.assertEqual(displayable_summaries[0]['status'], 'private') self.assertEqual(displayable_summaries[0]['id'], self.EXP_ID_1) self.assertEqual(displayable_summaries[1]['status'], 'public') self.assertEqual(displayable_summaries[1]['id'], self.EXP_ID_2) self.assertEqual(displayable_summaries[2]['status'], 'private') self.assertEqual(displayable_summaries[2]['id'], self.EXP_ID_5) class LibraryGroupsTest(exp_services_test.ExplorationServicesUnitTests): """Test functions for getting summary dicts for library groups.""" def setUp(self): """Populate the database of explorations and their summaries. The sequence of events is: - (1) Admin logs in. - (2) Admin access admin page. - (3) Admin reloads exploration with id '2'. - (4) Admin logs out. """ super(LibraryGroupsTest, self).setUp() self.login(self.ADMIN_EMAIL, is_super_admin=True) response = self.testapp.get('/admin') csrf_token = self.get_csrf_token_from_response(response) self.post_json('/adminhandler', { 'action': 'reload_exploration', 'exploration_id': '2' }, csrf_token) self.logout() def test_get_library_groups(self): """The exploration with id '2' is an exploration in the Mathematics category. The call to get_library_groups() should return the exploration as part of the Mathematics & Statistics group. """ library_groups = summary_services.get_library_groups([]) expected_exploration_summary_dict = { 'category': u'Algorithms', 'community_owned': True, 'id': '2', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'num_views': 0, 'objective': u'discover the binary search algorithm', 'ratings': feconf.get_empty_ratings(), 'status': u'public', 'tags': [], 'title': u'The Lazy Magician', 'thumbnail_bg_color': '#d0982a', 'thumbnail_icon_url': '/subjects/Algorithms.svg', } expected_group = { 'categories': ['Algorithms', 'Computing', 'Programming'], 'header_i18n_id': 'I18N_LIBRARY_GROUPS_COMPUTING', } self.assertEqual(len(library_groups), 1) self.assertDictContainsSubset(expected_group, library_groups[0]) self.assertEqual( len(library_groups[0]['activity_summary_dicts']), 1) actual_exploration_summary_dict = ( library_groups[0]['activity_summary_dicts'][0]) self.assertDictContainsSubset(expected_exploration_summary_dict, ( actual_exploration_summary_dict)) class FeaturedExplorationDisplayableSummariesTest( test_utils.GenericTestBase): """Test functions for getting displayable featured exploration summary dicts. """ ALBERT_NAME = 'albert' ALBERT_EMAIL = 'albert@example.com' EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' LANGUAGE_CODE_ES = 'es' def setUp(self): """Populate the database of explorations and their summaries. The sequence of events is: - (1) Albert creates EXP_ID_1. - (2) Albert creates EXP_ID_2. - (3) Albert publishes EXP_ID_1. - (4) Albert publishes EXP_ID_2. - (5) Admin user is set up. """ super(FeaturedExplorationDisplayableSummariesTest, self).setUp() self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL) self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL) self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME) self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.albert = user_services.UserActionsInfo(self.albert_id) self.save_new_valid_exploration( self.EXP_ID_1, self.albert_id, language_code=self.LANGUAGE_CODE_ES) self.save_new_valid_exploration(self.EXP_ID_2, self.albert_id) rights_manager.publish_exploration(self.albert, self.EXP_ID_1) rights_manager.publish_exploration(self.albert, self.EXP_ID_2) self.set_admins([self.ADMIN_USERNAME]) def test_for_featured_explorations(self): """Note that both EXP_ID_1 and EXP_ID_2 are public. However, only EXP_ID_2 is featured, so the call to get_featured_explorations() should only return [EXP_ID_2]. """ activity_services.update_featured_activity_references([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_2) ]) featured_activity_summaries = ( summary_services.get_featured_activity_summary_dicts([ constants.DEFAULT_LANGUAGE_CODE])) self.assertEqual(len(featured_activity_summaries), 1) self.assertDictContainsSubset({ 'status': 'public', 'thumbnail_bg_color': '#a33f40', 'community_owned': False, 'tags': [], 'thumbnail_icon_url': '/subjects/Lightbulb.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_2, 'category': 'A category', 'ratings': feconf.get_empty_ratings(), 'title': 'A title', 'num_views': 0, 'objective': 'An objective' }, featured_activity_summaries[0]) def test_language_code_filter(self): """Note that both EXP_ID_1 is in Spanish and EXP_ID_2 is in English.""" activity_services.update_featured_activity_references([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_1), activity_domain.ActivityReference( constants.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_2) ]) featured_activity_summaries = ( summary_services.get_featured_activity_summary_dicts([ constants.DEFAULT_LANGUAGE_CODE])) self.assertEqual(len(featured_activity_summaries), 1) self.assertDictContainsSubset({ 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_2, }, featured_activity_summaries[0]) featured_activity_summaries = ( summary_services.get_featured_activity_summary_dicts([ self.LANGUAGE_CODE_ES])) self.assertEqual(len(featured_activity_summaries), 1) self.assertDictContainsSubset({ 'language_code': self.LANGUAGE_CODE_ES, 'id': self.EXP_ID_1, }, featured_activity_summaries[0]) featured_activity_summaries = ( summary_services.get_featured_activity_summary_dicts([ constants.DEFAULT_LANGUAGE_CODE, self.LANGUAGE_CODE_ES])) self.assertEqual(len(featured_activity_summaries), 2) self.assertDictContainsSubset({ 'language_code': self.LANGUAGE_CODE_ES, 'id': self.EXP_ID_1, }, featured_activity_summaries[0]) self.assertDictContainsSubset({ 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_2, }, featured_activity_summaries[1]) featured_activity_summaries = ( summary_services.get_featured_activity_summary_dicts([ 'nonexistent_language_code'])) self.assertEqual(len(featured_activity_summaries), 0) featured_activity_summaries = ( summary_services.get_featured_activity_summary_dicts([])) self.assertEqual(len(featured_activity_summaries), 0) class CollectionLearnerDictTests(test_utils.GenericTestBase): """Test get_learner_collection_dict_by_id.""" EXP_ID = 'exploration_id' EXP_ID_1 = 'exp_id1' COLLECTION_ID = 'A_collection_id' def setUp(self): super(CollectionLearnerDictTests, self).setUp() self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) user_services.create_new_user(self.owner_id, self.OWNER_EMAIL) user_services.create_new_user(self.editor_id, self.EDITOR_EMAIL) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.owner = user_services.UserActionsInfo(self.owner_id) self.editor = user_services.UserActionsInfo(self.editor_id) def test_get_learner_dict_with_deleted_exp_fails_validation(self): self.save_new_valid_collection( self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID) summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) exp_services.delete_exploration(self.owner_id, self.EXP_ID) with self.assertRaisesRegexp( utils.ValidationError, 'Expected collection to only reference valid explorations, but ' 'found an exploration with ID: exploration_id'): summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) def test_get_learner_dict_when_referencing_inaccessible_explorations(self): self.save_new_default_collection(self.COLLECTION_ID, self.owner_id) self.save_new_valid_exploration(self.EXP_ID, self.editor_id) collection_services.update_collection( self.owner_id, self.COLLECTION_ID, [{ 'cmd': collection_domain.CMD_ADD_COLLECTION_NODE, 'exploration_id': self.EXP_ID }], 'Added another creator\'s private exploration') # A collection cannot access someone else's private exploration. rights_manager.publish_collection(self.owner, self.COLLECTION_ID) with self.assertRaisesRegexp( utils.ValidationError, 'Expected collection to only reference valid explorations, but ' 'found an exploration with ID: exploration_id'): summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) # After the exploration is published, the dict can now be created. rights_manager.publish_exploration(self.editor, self.EXP_ID) summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) def test_get_learner_dict_with_private_exp_fails_validation(self): self.save_new_valid_collection( self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID) # Since both the collection and exploration are private, the learner # dict can be created. summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) # A public collection referencing a private exploration is bad, however. rights_manager.publish_collection(self.owner, self.COLLECTION_ID) with self.assertRaisesRegexp( utils.ValidationError, 'Cannot reference a private exploration within a public ' 'collection, exploration ID: exploration_id'): summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) # After the exploration is published, the learner dict can be crated # again. rights_manager.publish_exploration(self.owner, self.EXP_ID) summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner) def test_get_learner_dict_with_allowed_private_exps(self): self.save_new_valid_collection( self.COLLECTION_ID, self.owner_id, exploration_id=self.EXP_ID) self.save_new_valid_exploration(self.EXP_ID_1, self.editor_id) collection_services.update_collection( self.owner_id, self.COLLECTION_ID, [{ 'cmd': collection_domain.CMD_ADD_COLLECTION_NODE, 'exploration_id': self.EXP_ID_1 }], 'Added another creator\'s private exploration') rights_manager.publish_collection(self.owner, self.COLLECTION_ID) collection_dict = summary_services.get_learner_collection_dict_by_id( self.COLLECTION_ID, self.owner, allow_invalid_explorations=True) # The author's private exploration will be contained in the public # collection since invalid explorations are being allowed, but the # private exploration of another author will not. collection_node_dicts = collection_dict['nodes'] self.assertEqual( collection_node_dicts[0]['exploration_summary']['id'], self.EXP_ID) self.assertIsNone(collection_node_dicts[1]['exploration_summary']) class TopRatedExplorationDisplayableSummariesTest( test_utils.GenericTestBase): """Test functions for getting displayable top rated exploration summary dicts. """ ALBERT_EMAIL = 'albert@example.com' ALICE_EMAIL = 'alice@example.com' BOB_EMAIL = 'bob@example.com' ALBERT_NAME = 'albert' ALICE_NAME = 'alice' BOB_NAME = 'bob' EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXP_ID_3 = 'eid3' EXP_ID_4 = 'eid4' EXP_ID_5 = 'eid5' EXP_ID_6 = 'eid6' EXP_ID_7 = 'eid7' EXP_ID_8 = 'eid8' EXP_ID_9 = 'eid9' def setUp(self): """Populate the database of explorations and their summaries. The sequence of events is: - (1) Albert creates EXP_ID_1. - (2) Albert creates EXP_ID_2. - (3) Albert creates EXP_ID_3. - (4) Albert creates EXP_ID_4. - (5) Albert creates EXP_ID_5. - (6) Albert creates EXP_ID_6. - (7) Albert creates EXP_ID_7. - (8) Albert creates EXP_ID_8. - (9) Albert creates EXP_ID_9. - (10) Albert publishes EXP_ID_1. - (11) Albert publishes EXP_ID_2. - (12) Albert publishes EXP_ID_3. - (13) Albert publishes EXP_ID_4. - (14) Albert publishes EXP_ID_5. - (15) Albert publishes EXP_ID_6. - (16) Albert publishes EXP_ID_7. - (17) Albert publishes EXP_ID_8. - (18) Albert publishes EXP_ID_9. - (19) Admin user is set up. """ super(TopRatedExplorationDisplayableSummariesTest, self).setUp() self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL) self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL) self.alice_id = self.get_user_id_from_email(self.ALICE_EMAIL) self.bob_id = self.get_user_id_from_email(self.BOB_EMAIL) self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME) self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.signup(self.ALICE_EMAIL, self.ALICE_NAME) self.signup(self.BOB_EMAIL, self.BOB_NAME) self.albert = user_services.UserActionsInfo(self.albert_id) self.save_new_valid_exploration(self.EXP_ID_1, self.albert_id) self.save_new_valid_exploration(self.EXP_ID_2, self.albert_id) self.save_new_valid_exploration(self.EXP_ID_3, self.albert_id) self.save_new_valid_exploration(self.EXP_ID_4, self.albert_id) self.save_new_valid_exploration(self.EXP_ID_5, self.albert_id) self.save_new_valid_exploration(self.EXP_ID_6, self.albert_id) self.save_new_valid_exploration(self.EXP_ID_7, self.albert_id) self.save_new_valid_exploration(self.EXP_ID_8, self.albert_id) self.save_new_valid_exploration(self.EXP_ID_9, self.albert_id) rights_manager.publish_exploration(self.albert, self.EXP_ID_1) rights_manager.publish_exploration(self.albert, self.EXP_ID_2) rights_manager.publish_exploration(self.albert, self.EXP_ID_3) rights_manager.publish_exploration(self.albert, self.EXP_ID_4) rights_manager.publish_exploration(self.albert, self.EXP_ID_5) rights_manager.publish_exploration(self.albert, self.EXP_ID_6) rights_manager.publish_exploration(self.albert, self.EXP_ID_7) rights_manager.publish_exploration(self.albert, self.EXP_ID_8) rights_manager.publish_exploration(self.albert, self.EXP_ID_9) self.set_admins([self.ADMIN_USERNAME]) def test_at_most_eight_top_rated_explorations(self): """Note that at most 8 explorations should be returned. """ rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_2, 5) rating_services.assign_rating_to_exploration( self.alice_id, self.EXP_ID_3, 5) rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_3, 4) rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_4, 4) rating_services.assign_rating_to_exploration( self.alice_id, self.EXP_ID_5, 4) rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_5, 3) rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_6, 3) rating_services.assign_rating_to_exploration( self.alice_id, self.EXP_ID_6, 2) rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_8, 2) rating_services.assign_rating_to_exploration( self.alice_id, self.EXP_ID_8, 2) rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_7, 2) rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_9, 2) rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_1, 1) top_rated_exploration_summaries = ( summary_services.get_top_rated_exploration_summary_dicts( [constants.DEFAULT_LANGUAGE_CODE], feconf.NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE)) expected_summary = { 'status': u'public', 'thumbnail_bg_color': '#a33f40', 'community_owned': False, 'tags': [], 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'thumbnail_icon_url': '/subjects/Lightbulb.svg', 'id': self.EXP_ID_3, 'category': u'A category', 'ratings': {u'1': 0, u'3': 0, u'2': 0, u'5': 1, u'4': 1}, 'title': u'A title', 'num_views': 0, 'objective': u'An objective' } self.assertDictContainsSubset( expected_summary, top_rated_exploration_summaries[0]) expected_ordering = [ self.EXP_ID_3, self.EXP_ID_2, self.EXP_ID_5, self.EXP_ID_4, self.EXP_ID_6, self.EXP_ID_8, self.EXP_ID_7, self.EXP_ID_9] actual_ordering = [exploration['id'] for exploration in top_rated_exploration_summaries] self.assertEqual(expected_ordering, actual_ordering) def test_only_explorations_with_ratings_are_returned(self): """Note that only explorations with ratings will be included """ rating_services.assign_rating_to_exploration( self.bob_id, self.EXP_ID_2, 5) top_rated_exploration_summaries = ( summary_services.get_top_rated_exploration_summary_dicts( [constants.DEFAULT_LANGUAGE_CODE], feconf.NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE)) expected_summary = { 'status': u'public', 'thumbnail_bg_color': '#a33f40', 'community_owned': False, 'tags': [], 'thumbnail_icon_url': '/subjects/Lightbulb.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_2, 'category': u'A category', 'ratings': {u'1': 0, u'3': 0, u'2': 0, u'5': 1, u'4': 0}, 'title': u'A title', 'num_views': 0, 'objective': u'An objective' } self.assertDictContainsSubset( expected_summary, top_rated_exploration_summaries[0]) expected_ordering = [self.EXP_ID_2] actual_ordering = [exploration['id'] for exploration in top_rated_exploration_summaries] self.assertEqual(expected_ordering, actual_ordering) class RecentlyPublishedExplorationDisplayableSummariesTest( test_utils.GenericTestBase): """Test functions for getting displayable recently published exploration summary dicts. """ ALBERT_NAME = 'albert' ALBERT_EMAIL = 'albert@example.com' EXP_ID_1 = 'eid1' EXP_ID_2 = 'eid2' EXP_ID_3 = 'eid3' def setUp(self): """Populate the database of explorations and their summaries. The sequence of events is: - (1) Albert creates EXP_ID_1. - (2) Albert creates EXP_ID_2. - (3) Albert creates EXP_ID_3. - (4) Albert publishes EXP_ID_1. - (5) Albert publishes EXP_ID_2. - (6) Albert publishes EXP_ID_3. - (7) Admin user is set up. """ super(RecentlyPublishedExplorationDisplayableSummariesTest, self).setUp() self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL) self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL) self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME) self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.albert = user_services.UserActionsInfo(self.albert_id) self.save_new_valid_exploration( self.EXP_ID_1, self.albert_id, end_state_name='End') self.save_new_valid_exploration( self.EXP_ID_2, self.albert_id, end_state_name='End') self.save_new_valid_exploration( self.EXP_ID_3, self.albert_id, end_state_name='End') rights_manager.publish_exploration(self.albert, self.EXP_ID_2) rights_manager.publish_exploration(self.albert, self.EXP_ID_1) rights_manager.publish_exploration(self.albert, self.EXP_ID_3) self.set_admins([self.ADMIN_USERNAME]) def test_for_recently_published_explorations(self): """ Tests for recently published explorations. """ recently_published_exploration_summaries = ( summary_services.get_recently_published_exp_summary_dicts( feconf.RECENTLY_PUBLISHED_QUERY_LIMIT_FOR_LIBRARY_PAGE)) test_summary_1 = { 'status': 'public', 'thumbnail_bg_color': '#a33f40', 'community_owned': False, 'tags': [], 'thumbnail_icon_url': '/subjects/Lightbulb.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_1, 'category': u'A category', 'ratings': feconf.get_empty_ratings(), 'title': u'A title', 'num_views': 0, 'objective': u'An objective' } test_summary_2 = { 'status': 'public', 'thumbnail_bg_color': '#a33f40', 'community_owned': False, 'tags': [], 'thumbnail_icon_url': '/subjects/Lightbulb.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_2, 'category': u'A category', 'ratings': feconf.get_empty_ratings(), 'title': u'A title', 'num_views': 0, 'objective': u'An objective' } test_summary_3 = { 'status': 'public', 'thumbnail_bg_color': '#a33f40', 'community_owned': False, 'tags': [], 'thumbnail_icon_url': '/subjects/Lightbulb.svg', 'language_code': constants.DEFAULT_LANGUAGE_CODE, 'id': self.EXP_ID_3, 'category': u'A category', 'ratings': feconf.get_empty_ratings(), 'title': u'A title', 'num_views': 0, 'objective': u'An objective' } self.assertDictContainsSubset( test_summary_3, recently_published_exploration_summaries[0]) self.assertDictContainsSubset( test_summary_1, recently_published_exploration_summaries[1]) self.assertDictContainsSubset( test_summary_2, recently_published_exploration_summaries[2]) # Test that editing an exploration does not change its # 'recently-published' status. exp_services.update_exploration( self.albert_id, self.EXP_ID_1, [{ 'cmd': 'edit_exploration_property', 'property_name': 'title', 'new_value': 'New title' }], 'Changed title.') recently_published_exploration_summaries = ( summary_services.get_recently_published_exp_summary_dicts( feconf.RECENTLY_PUBLISHED_QUERY_LIMIT_FOR_LIBRARY_PAGE)) self.assertEqual( recently_published_exploration_summaries[1]['title'], 'New title') self.assertDictContainsSubset( test_summary_3, recently_published_exploration_summaries[0]) class ActivityReferenceAccessCheckerTests(test_utils.GenericTestBase): """Tests for requiring that activity references are public.""" EXP_ID_0 = 'exp_id_0' EXP_ID_1 = 'exp_id_1' COL_ID_2 = 'col_id_2' def setUp(self): super(ActivityReferenceAccessCheckerTests, self).setUp() self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.owner = user_services.UserActionsInfo(self.owner_id) def test_requiring_nonexistent_activities_be_public_raises_exception(self): with self.assertRaisesRegexp(Exception, 'non-existent exploration'): summary_services.require_activities_to_be_public([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_EXPLORATION, 'fake')]) with self.assertRaisesRegexp(Exception, 'non-existent collection'): summary_services.require_activities_to_be_public([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_COLLECTION, 'fake')]) def test_requiring_private_activities_to_be_public_raises_exception(self): self.save_new_valid_exploration(self.EXP_ID_0, self.owner_id) self.save_new_valid_exploration(self.EXP_ID_1, self.owner_id) self.save_new_valid_collection( self.COL_ID_2, self.owner_id, exploration_id=self.EXP_ID_0) with self.assertRaisesRegexp(Exception, 'private exploration'): summary_services.require_activities_to_be_public([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_0)]) with self.assertRaisesRegexp(Exception, 'private collection'): summary_services.require_activities_to_be_public([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_COLLECTION, self.COL_ID_2)]) def test_requiring_public_activities_to_be_public_succeeds(self): self.save_new_valid_exploration(self.EXP_ID_0, self.owner_id) self.save_new_valid_collection( self.COL_ID_2, self.owner_id, exploration_id=self.EXP_ID_0) rights_manager.publish_exploration(self.owner, self.EXP_ID_0) rights_manager.publish_collection(self.owner, self.COL_ID_2) # There are no validation errors. summary_services.require_activities_to_be_public([ activity_domain.ActivityReference( constants.ACTIVITY_TYPE_EXPLORATION, self.EXP_ID_0), activity_domain.ActivityReference( constants.ACTIVITY_TYPE_COLLECTION, self.COL_ID_2)]) class CollectionNodeMetadataDictsTest( exp_services_test.ExplorationServicesUnitTests): """Test functions for getting collection node metadata dicts.""" ALBERT_EMAIL = 'albert@example.com' ALBERT_NAME = 'albert' BOB_EMAIL = 'bob@example.com' BOB_NAME = 'bob' EXP_ID1 = 'eid1' EXP_ID2 = 'eid2' EXP_ID3 = 'eid3' EXP_ID4 = 'eid4' EXP_ID5 = 'eid5' INVALID_EXP_ID = 'invalid_exp_id' def setUp(self): super(CollectionNodeMetadataDictsTest, self).setUp() self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL) self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME) self.bob_id = self.get_user_id_from_email(self.BOB_EMAIL) self.signup(self.BOB_EMAIL, self.BOB_NAME) self.albert = user_services.UserActionsInfo(self.albert_id) self.bob = user_services.UserActionsInfo(self.bob_id) self.save_new_valid_exploration(self.EXP_ID1, self.albert_id, title='Exploration 1 Albert title', objective='An objective 1') self.save_new_valid_exploration(self.EXP_ID2, self.albert_id, title='Exploration 2 Albert title', objective='An objective 2') self.save_new_valid_exploration(self.EXP_ID3, self.albert_id, title='Exploration 3 Albert title', objective='An objective 3') self.save_new_valid_exploration(self.EXP_ID4, self.bob_id, title='Exploration 4 Bob title', objective='An objective 4') self.save_new_valid_exploration(self.EXP_ID5, self.albert_id, title='Exploration 5 Albert title', objective='An objective 5') rights_manager.publish_exploration(self.albert, self.EXP_ID1) rights_manager.publish_exploration(self.albert, self.EXP_ID2) rights_manager.publish_exploration(self.albert, self.EXP_ID3) rights_manager.publish_exploration(self.bob, self.EXP_ID4) exp_services.index_explorations_given_ids([ self.EXP_ID1, self.EXP_ID2, self.EXP_ID3, self.EXP_ID4]) def test_get_exploration_metadata_dicts(self): metadata_dicts = (summary_services.get_exploration_metadata_dicts( [self.EXP_ID1, self.EXP_ID2, self.EXP_ID3], self.albert)) expected_metadata_dicts = [{ 'id': self.EXP_ID1, 'objective': u'An objective 1', 'title': u'Exploration 1 Albert title', }, { 'id': self.EXP_ID2, 'objective': u'An objective 2', 'title': u'Exploration 2 Albert title', }, { 'id': self.EXP_ID3, 'objective': u'An objective 3', 'title': u'Exploration 3 Albert title', }] self.assertEqual(expected_metadata_dicts, metadata_dicts) def test_private_exps_of_another_user_are_not_returned(self): metadata_dicts = (summary_services.get_exploration_metadata_dicts( [self.EXP_ID5, self.EXP_ID4], self.bob)) expected_metadata_dicts = [{ 'id': self.EXP_ID4, 'objective': u'An objective 4', 'title': u'Exploration 4 Bob title', }] self.assertEqual(expected_metadata_dicts, metadata_dicts) def test_public_exps_of_another_user_are_returned(self): metadata_dicts = (summary_services.get_exploration_metadata_dicts( [self.EXP_ID2, self.EXP_ID3, self.EXP_ID4], self.bob)) expected_metadata_dicts = [{ 'id': self.EXP_ID2, 'objective': u'An objective 2', 'title': u'Exploration 2 Albert title', }, { 'id': self.EXP_ID3, 'objective': u'An objective 3', 'title': u'Exploration 3 Albert title', }, { 'id': self.EXP_ID4, 'objective': u'An objective 4', 'title': u'Exploration 4 Bob title', }] self.assertEqual(expected_metadata_dicts, metadata_dicts) def test_deleted_exps_are_not_returned(self): exp_services.delete_exploration(self.albert_id, self.EXP_ID2) metadata_dicts = (summary_services.get_exploration_metadata_dicts( [self.EXP_ID2, self.EXP_ID3, self.EXP_ID4], self.bob)) expected_metadata_dicts = [{ 'id': self.EXP_ID3, 'objective': u'An objective 3', 'title': u'Exploration 3 Albert title', }, { 'id': self.EXP_ID4, 'objective': u'An objective 4', 'title': u'Exploration 4 Bob title', }] self.assertEqual(expected_metadata_dicts, metadata_dicts) def test_exp_metadata_dicts_matching_query(self): metadata_dicts, _ = ( summary_services.get_exp_metadata_dicts_matching_query( 'Exploration 1', None, self.albert)) expected_metadata_dicts = [{ 'id': self.EXP_ID1, 'objective': u'An objective 1', 'title': u'Exploration 1 Albert title', }] self.assertEqual(expected_metadata_dicts, metadata_dicts) def test_invalid_exp_ids(self): metadata_dicts = (summary_services.get_exploration_metadata_dicts( [self.EXP_ID3, self.INVALID_EXP_ID], self.albert)) expected_metadata_dicts = [{ 'id': self.EXP_ID3, 'objective': u'An objective 3', 'title': u'Exploration 3 Albert title', }] self.assertEqual(expected_metadata_dicts, metadata_dicts)
unknown
codeparrot/codeparrot-clean
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MODERNIZE_USENOEXCEPTCHECK_H #define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MODERNIZE_USENOEXCEPTCHECK_H #include "../ClangTidyCheck.h" namespace clang::tidy::modernize { /// Replace dynamic exception specifications, with /// `noexcept` (or user-defined macro) or `noexcept(false)`. /// \code /// void foo() throw(); /// void bar() throw(int); /// \endcode /// Is converted to: /// \code /// void foo() ; /// void bar() noexcept(false); /// \endcode /// /// For the user-facing documentation see: /// https://clang.llvm.org/extra/clang-tidy/checks/modernize/use-noexcept.html class UseNoexceptCheck : public ClangTidyCheck { public: UseNoexceptCheck(StringRef Name, ClangTidyContext *Context); bool isLanguageVersionSupported(const LangOptions &LangOpts) const override { return LangOpts.CPlusPlus11; } void storeOptions(ClangTidyOptions::OptionMap &Opts) override; void registerMatchers(ast_matchers::MatchFinder *Finder) override; void check(const ast_matchers::MatchFinder::MatchResult &Result) override; private: const StringRef NoexceptMacro; const bool UseNoexceptFalse; }; } // namespace clang::tidy::modernize #endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MODERNIZE_USENOEXCEPTCHECK_H
c
github
https://github.com/llvm/llvm-project
clang-tools-extra/clang-tidy/modernize/UseNoexceptCheck.h
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' ========================================================================= Program: Visualization Toolkit Module: TestNamedColorsIntegration.py Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. ========================================================================= ''' import vtk import vtk.test.Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() class TestSphereWidget(vtk.test.Testing.vtkTest): def testSphereWidget(self): # This example demonstrates how to use the vtkSphereWidget to control the # position of a light. # These are the pre-recorded events Recording = \ "# StreamVersion 1\n\ CharEvent 23 266 0 0 105 1 i\n\ KeyReleaseEvent 23 266 0 0 105 1 i\n\ EnterEvent 69 294 0 0 0 0 i\n\ MouseMoveEvent 69 294 0 0 0 0 i\n\ MouseMoveEvent 68 293 0 0 0 0 i\n\ MouseMoveEvent 67 292 0 0 0 0 i\n\ MouseMoveEvent 66 289 0 0 0 0 i\n\ MouseMoveEvent 66 282 0 0 0 0 i\n\ MouseMoveEvent 66 271 0 0 0 0 i\n\ MouseMoveEvent 69 253 0 0 0 0 i\n\ MouseMoveEvent 71 236 0 0 0 0 i\n\ MouseMoveEvent 74 219 0 0 0 0 i\n\ MouseMoveEvent 76 208 0 0 0 0 i\n\ MouseMoveEvent 78 190 0 0 0 0 i\n\ MouseMoveEvent 78 173 0 0 0 0 i\n\ MouseMoveEvent 77 162 0 0 0 0 i\n\ MouseMoveEvent 77 151 0 0 0 0 i\n\ MouseMoveEvent 77 139 0 0 0 0 i\n\ MouseMoveEvent 76 125 0 0 0 0 i\n\ MouseMoveEvent 73 114 0 0 0 0 i\n\ MouseMoveEvent 73 106 0 0 0 0 i\n\ MouseMoveEvent 73 101 0 0 0 0 i\n\ MouseMoveEvent 72 95 0 0 0 0 i\n\ MouseMoveEvent 72 92 0 0 0 0 i\n\ MouseMoveEvent 70 89 0 0 0 0 i\n\ MouseMoveEvent 69 86 0 0 0 0 i\n\ MouseMoveEvent 67 84 0 0 0 0 i\n\ MouseMoveEvent 65 81 0 0 0 0 i\n\ MouseMoveEvent 60 79 0 0 0 0 i\n\ MouseMoveEvent 59 79 0 0 0 0 i\n\ MouseMoveEvent 58 79 0 0 0 0 i\n\ MouseMoveEvent 57 78 0 0 0 0 i\n\ MouseMoveEvent 55 78 0 0 0 0 i\n\ MouseMoveEvent 54 77 0 0 0 0 i\n\ LeftButtonPressEvent 54 77 0 0 0 0 i\n\ MouseMoveEvent 61 79 0 0 0 0 i\n\ MouseMoveEvent 67 83 0 0 0 0 i\n\ MouseMoveEvent 72 88 0 0 0 0 i\n\ MouseMoveEvent 77 90 0 0 0 0 i\n\ MouseMoveEvent 78 91 0 0 0 0 i\n\ MouseMoveEvent 80 92 0 0 0 0 i\n\ MouseMoveEvent 84 93 0 0 0 0 i\n\ MouseMoveEvent 85 94 0 0 0 0 i\n\ MouseMoveEvent 88 97 0 0 0 0 i\n\ MouseMoveEvent 90 100 0 0 0 0 i\n\ MouseMoveEvent 92 102 0 0 0 0 i\n\ MouseMoveEvent 94 103 0 0 0 0 i\n\ MouseMoveEvent 97 105 0 0 0 0 i\n\ MouseMoveEvent 101 107 0 0 0 0 i\n\ MouseMoveEvent 102 109 0 0 0 0 i\n\ MouseMoveEvent 104 111 0 0 0 0 i\n\ MouseMoveEvent 108 113 0 0 0 0 i\n\ MouseMoveEvent 112 115 0 0 0 0 i\n\ MouseMoveEvent 118 119 0 0 0 0 i\n\ MouseMoveEvent 118 120 0 0 0 0 i\n\ MouseMoveEvent 118 123 0 0 0 0 i\n\ MouseMoveEvent 120 125 0 0 0 0 i\n\ MouseMoveEvent 122 128 0 0 0 0 i\n\ MouseMoveEvent 123 129 0 0 0 0 i\n\ MouseMoveEvent 125 132 0 0 0 0 i\n\ MouseMoveEvent 125 134 0 0 0 0 i\n\ MouseMoveEvent 127 138 0 0 0 0 i\n\ MouseMoveEvent 127 142 0 0 0 0 i\n\ MouseMoveEvent 127 147 0 0 0 0 i\n\ MouseMoveEvent 126 152 0 0 0 0 i\n\ MouseMoveEvent 126 155 0 0 0 0 i\n\ MouseMoveEvent 125 160 0 0 0 0 i\n\ MouseMoveEvent 125 167 0 0 0 0 i\n\ MouseMoveEvent 125 169 0 0 0 0 i\n\ MouseMoveEvent 125 174 0 0 0 0 i\n\ MouseMoveEvent 122 179 0 0 0 0 i\n\ MouseMoveEvent 120 183 0 0 0 0 i\n\ MouseMoveEvent 116 187 0 0 0 0 i\n\ MouseMoveEvent 113 192 0 0 0 0 i\n\ MouseMoveEvent 113 193 0 0 0 0 i\n\ MouseMoveEvent 111 195 0 0 0 0 i\n\ MouseMoveEvent 108 198 0 0 0 0 i\n\ MouseMoveEvent 106 200 0 0 0 0 i\n\ MouseMoveEvent 104 202 0 0 0 0 i\n\ MouseMoveEvent 103 203 0 0 0 0 i\n\ MouseMoveEvent 99 205 0 0 0 0 i\n\ MouseMoveEvent 97 207 0 0 0 0 i\n\ MouseMoveEvent 94 208 0 0 0 0 i\n\ MouseMoveEvent 91 210 0 0 0 0 i\n\ MouseMoveEvent 89 211 0 0 0 0 i\n\ MouseMoveEvent 86 211 0 0 0 0 i\n\ MouseMoveEvent 84 211 0 0 0 0 i\n\ MouseMoveEvent 80 211 0 0 0 0 i\n\ MouseMoveEvent 77 211 0 0 0 0 i\n\ MouseMoveEvent 75 211 0 0 0 0 i\n\ MouseMoveEvent 71 211 0 0 0 0 i\n\ MouseMoveEvent 68 211 0 0 0 0 i\n\ MouseMoveEvent 66 210 0 0 0 0 i\n\ MouseMoveEvent 62 210 0 0 0 0 i\n\ MouseMoveEvent 58 209 0 0 0 0 i\n\ MouseMoveEvent 54 207 0 0 0 0 i\n\ MouseMoveEvent 52 204 0 0 0 0 i\n\ MouseMoveEvent 51 203 0 0 0 0 i\n\ MouseMoveEvent 51 200 0 0 0 0 i\n\ MouseMoveEvent 48 196 0 0 0 0 i\n\ MouseMoveEvent 45 187 0 0 0 0 i\n\ MouseMoveEvent 45 181 0 0 0 0 i\n\ MouseMoveEvent 44 168 0 0 0 0 i\n\ MouseMoveEvent 40 161 0 0 0 0 i\n\ MouseMoveEvent 39 154 0 0 0 0 i\n\ MouseMoveEvent 38 146 0 0 0 0 i\n\ MouseMoveEvent 35 131 0 0 0 0 i\n\ MouseMoveEvent 34 121 0 0 0 0 i\n\ MouseMoveEvent 34 110 0 0 0 0 i\n\ MouseMoveEvent 34 103 0 0 0 0 i\n\ MouseMoveEvent 34 91 0 0 0 0 i\n\ MouseMoveEvent 34 86 0 0 0 0 i\n\ MouseMoveEvent 34 73 0 0 0 0 i\n\ MouseMoveEvent 35 66 0 0 0 0 i\n\ MouseMoveEvent 37 60 0 0 0 0 i\n\ MouseMoveEvent 37 53 0 0 0 0 i\n\ MouseMoveEvent 38 50 0 0 0 0 i\n\ MouseMoveEvent 38 48 0 0 0 0 i\n\ MouseMoveEvent 41 45 0 0 0 0 i\n\ MouseMoveEvent 43 45 0 0 0 0 i\n\ MouseMoveEvent 44 45 0 0 0 0 i\n\ MouseMoveEvent 47 43 0 0 0 0 i\n\ MouseMoveEvent 51 44 0 0 0 0 i\n\ MouseMoveEvent 54 44 0 0 0 0 i\n\ MouseMoveEvent 55 44 0 0 0 0 i\n\ MouseMoveEvent 59 44 0 0 0 0 i\n\ MouseMoveEvent 64 44 0 0 0 0 i\n\ MouseMoveEvent 67 44 0 0 0 0 i\n\ MouseMoveEvent 68 44 0 0 0 0 i\n\ MouseMoveEvent 71 44 0 0 0 0 i\n\ MouseMoveEvent 74 44 0 0 0 0 i\n\ MouseMoveEvent 77 44 0 0 0 0 i\n\ MouseMoveEvent 80 45 0 0 0 0 i\n\ MouseMoveEvent 81 45 0 0 0 0 i\n\ MouseMoveEvent 85 49 0 0 0 0 i\n\ MouseMoveEvent 89 50 0 0 0 0 i\n\ MouseMoveEvent 94 52 0 0 0 0 i\n\ MouseMoveEvent 99 56 0 0 0 0 i\n\ MouseMoveEvent 104 58 0 0 0 0 i\n\ MouseMoveEvent 107 61 0 0 0 0 i\n\ MouseMoveEvent 109 63 0 0 0 0 i\n\ MouseMoveEvent 109 67 0 0 0 0 i\n\ MouseMoveEvent 111 83 0 0 0 0 i\n\ MouseMoveEvent 113 86 0 0 0 0 i\n\ MouseMoveEvent 113 87 0 0 0 0 i\n\ MouseMoveEvent 113 89 0 0 0 0 i\n\ MouseMoveEvent 112 93 0 0 0 0 i\n\ MouseMoveEvent 112 97 0 0 0 0 i\n\ MouseMoveEvent 111 104 0 0 0 0 i\n\ MouseMoveEvent 112 108 0 0 0 0 i\n\ MouseMoveEvent 116 115 0 0 0 0 i\n\ MouseMoveEvent 116 123 0 0 0 0 i\n\ MouseMoveEvent 116 129 0 0 0 0 i\n\ MouseMoveEvent 119 138 0 0 0 0 i\n\ MouseMoveEvent 122 141 0 0 0 0 i\n\ MouseMoveEvent 127 148 0 0 0 0 i\n\ MouseMoveEvent 128 161 0 0 0 0 i\n\ MouseMoveEvent 131 166 0 0 0 0 i\n\ MouseMoveEvent 134 168 0 0 0 0 i\n\ MouseMoveEvent 135 171 0 0 0 0 i\n\ MouseMoveEvent 134 174 0 0 0 0 i\n\ MouseMoveEvent 132 176 0 0 0 0 i\n\ MouseMoveEvent 132 178 0 0 0 0 i\n\ MouseMoveEvent 129 180 0 0 0 0 i\n\ MouseMoveEvent 127 182 0 0 0 0 i\n\ MouseMoveEvent 124 185 0 0 0 0 i\n\ MouseMoveEvent 122 186 0 0 0 0 i\n\ MouseMoveEvent 118 189 0 0 0 0 i\n\ MouseMoveEvent 114 191 0 0 0 0 i\n\ MouseMoveEvent 114 193 0 0 0 0 i\n\ MouseMoveEvent 112 193 0 0 0 0 i\n\ MouseMoveEvent 111 194 0 0 0 0 i\n\ MouseMoveEvent 110 197 0 0 0 0 i\n\ MouseMoveEvent 110 198 0 0 0 0 i\n\ MouseMoveEvent 109 199 0 0 0 0 i\n\ MouseMoveEvent 108 200 0 0 0 0 i\n\ MouseMoveEvent 108 201 0 0 0 0 i\n\ MouseMoveEvent 108 202 0 0 0 0 i\n\ MouseMoveEvent 108 203 0 0 0 0 i\n\ MouseMoveEvent 104 206 0 0 0 0 i\n\ LeftButtonReleaseEvent 104 206 0 0 0 0 i\n\ MouseMoveEvent 104 205 0 0 0 0 i\n\ MouseMoveEvent 104 204 0 0 0 0 i\n\ MouseMoveEvent 105 205 0 0 0 0 i\n\ MouseMoveEvent 105 206 0 0 0 0 i\n\ " # Start by loading some data. # dem = vtk.vtkDEMReader() dem.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem") dem.Update() Scale = 2 lut = vtk.vtkLookupTable() lut.SetHueRange(0.6, 0) lut.SetSaturationRange(1.0, 0) lut.SetValueRange(0.5, 1.0) lo = Scale * dem.GetElevationBounds()[0] hi = Scale * dem.GetElevationBounds()[1] shrink = vtk.vtkImageShrink3D() shrink.SetShrinkFactors(4, 4, 1) shrink.SetInputConnection(dem.GetOutputPort()) shrink.AveragingOn() geom = vtk.vtkImageDataGeometryFilter() geom.SetInputConnection(shrink.GetOutputPort()) geom.ReleaseDataFlagOn() warp = vtk.vtkWarpScalar() warp.SetInputConnection(geom.GetOutputPort()) warp.SetNormal(0, 0, 1) warp.UseNormalOn() warp.SetScaleFactor(Scale) warp.ReleaseDataFlagOn() elevation = vtk.vtkElevationFilter() elevation.SetInputConnection(warp.GetOutputPort()) elevation.SetLowPoint(0, 0, lo) elevation.SetHighPoint(0, 0, hi) elevation.SetScalarRange(lo, hi) elevation.ReleaseDataFlagOn() normals = vtk.vtkPolyDataNormals() normals.SetInputConnection(elevation.GetOutputPort()) normals.SetFeatureAngle(60) normals.ConsistencyOff() normals.SplittingOff() normals.ReleaseDataFlagOn() normals.Update() demMapper = vtk.vtkPolyDataMapper() demMapper.SetInputConnection(normals.GetOutputPort()) demMapper.SetScalarRange(lo, hi) demMapper.SetLookupTable(lut) demMapper.ImmediateModeRenderingOn() demActor = vtk.vtkActor() demActor.SetMapper(demMapper) # Create the RenderWindow, Renderer and both Actors # ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.SetMultiSamples(0) renWin.AddRenderer(ren) iRen = vtk.vtkRenderWindowInteractor() iRen.SetRenderWindow(renWin) iRen.LightFollowCameraOff() # iRen.SetInteractorStyle("") # The callback takes two arguments. # The first being the object that generates the event and # the second argument the event name (which is a string). def MoveLight(widget, event_string): light.SetPosition(rep.GetHandlePosition()) # Associate the line widget with the interactor rep = vtk.vtkSphereRepresentation() rep.SetPlaceFactor(4) rep.PlaceWidget(normals.GetOutput().GetBounds()) rep.HandleVisibilityOn() rep.SetRepresentationToWireframe() # rep HandleVisibilityOff # rep HandleTextOff sphereWidget = vtk.vtkSphereWidget2() sphereWidget.SetInteractor(iRen) sphereWidget.SetRepresentation(rep) # sphereWidget.TranslationEnabledOff() # sphereWidget.ScalingEnabledOff() sphereWidget.AddObserver("InteractionEvent", MoveLight) recorder = vtk.vtkInteractorEventRecorder() recorder.SetInteractor(iRen) # recorder.SetFileName("c:/record.log") # recorder.Record() recorder.ReadFromInputStringOn() recorder.SetInputString(Recording) # Add the actors to the renderer, set the background and size # ren.AddActor(demActor) ren.SetBackground(1, 1, 1) renWin.SetSize(300, 300) ren.SetBackground(0.1, 0.2, 0.4) cam1 = ren.GetActiveCamera() cam1.SetViewUp(0, 0, 1) cam1.SetFocalPoint(dem.GetOutput().GetCenter()) cam1.SetPosition(1, 0, 0) ren.ResetCamera() cam1.Elevation(25) cam1.Azimuth(125) cam1.Zoom(1.25) light = vtk.vtkLight() light.SetFocalPoint(rep.GetCenter()) light.SetPosition(rep.GetHandlePosition()) ren.AddLight(light) iRen.Initialize() renWin.Render() # render the image renWin.Render() # Actually probe the data recorder.Play() img_file = "TestSphereWidget.png" vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25) vtk.test.Testing.interact() if __name__ == "__main__": vtk.test.Testing.main([(TestSphereWidget, 'test')])
unknown
codeparrot/codeparrot-clean
# coding: utf-8 # In[1]: import os import pandas as pd import matplotlib.pyplot as plt # In[2]: from keras.applications.vgg16 import VGG16 from keras.models import Model from keras.callbacks import ModelCheckpoint,EarlyStopping from keras.preprocessing.image import ImageDataGenerator # In[3]: from keras.utils import np_utils from keras.models import Sequential from keras.callbacks import EarlyStopping, History, ModelCheckpoint from keras.layers.core import Flatten, Dense, Dropout, Reshape, Lambda from keras.layers.normalization import BatchNormalization # In[16]: from sklearn.preprocessing import LabelEncoder from keras.utils.np_utils import to_categorical from sklearn.metrics import log_loss from sklearn.model_selection import train_test_split # In[8]: import numpy as np # In[9]: train_features = np.load('train_preprocesed.npy') valid_features = np.load('valid_preprocessed.npy') # In[10]: train_dir = "new_train/" valid_dir = "new_valid/" # In[11]: classes = os.listdir(train_dir) # In[12]: # Get the labels train_labels = [] for c in classes: l = [c]*len(os.listdir(train_dir+c+'/')) train_labels.extend(l) # In[25]: len(train_labels) # In[17]: valid_labels = [] for c in classes: l = [c]*len(os.listdir(valid_dir+c+'/')) valid_labels.extend(l) # In[18]: onehot_train = to_categorical(LabelEncoder().fit_transform(train_labels)) # In[19]: onehot_valid = to_categorical(LabelEncoder().fit_transform(valid_labels)) # In[20]: vgg16_base = VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(150, 150,3)) # Note that the preprocessing of InceptionV3 is: # (x / 255 - 0.5) x 2 print('Adding new layers...') output = vgg16_base.get_layer(index = -1).output output = Flatten()(output) # let's add a fully-connected layer output = Dense(4096,activation = "relu")(output) output = BatchNormalization()(output) output = Dropout(0.5)(output) output = Dense(512,activation = "relu")(output) output = BatchNormalization()(output) output = Dropout(0.5)(output) # and a logistic layer -- let's say we have 200 classes output = Dense(8, activation='softmax')(output) vgg16_model = Model(vgg16_base.input, output) #InceptionV3_model.summary() # In[ ]: for layer in vgg16_model.layers[:19]: layer.trainable = False # In[21]: vgg16_model.compile(optimizer="adam",loss="categorical_crossentropy",metrics =["accuracy"]) # In[35]: train_datagen = ImageDataGenerator( shear_range=0.1, zoom_range=0.1, rotation_range=10., width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True) val_datagen = ImageDataGenerator() # In[38]: callbacks = EarlyStopping(monitor='val_loss', patience=1, verbose=1, mode='auto') # autosave best Model best_model_file = "./data_augmented_weights.h5" best_model = ModelCheckpoint(best_model_file, monitor='val_acc', verbose = 1, save_best_only = True) # In[39]: history = vgg16_model.fit_generator(train_datagen.flow(train_features, onehot_train, batch_size=10), nb_epoch=5, samples_per_epoch = 3019, validation_data=val_datagen.flow(valid_features,onehot_valid,batch_size=10,shuffle=False), nb_val_samples=758,callbacks = [callbacks,best_model]) # In[34]: #model.load_weights("batch_normalized_weights.h5") # In[ ]: # summarize history for accuracy plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) plt.plot(history.history['acc']); plt.plot(history.history['val_acc']); plt.title('model accuracy'); plt.ylabel('accuracy'); plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left'); # summarize history for loss plt.subplot(1, 2, 2) plt.plot(history.history['loss']); plt.plot(history.history['val_loss']); plt.title('model loss'); plt.ylabel('loss'); plt.xlabel('epoch'); plt.legend(['train', 'valid'], loc='upper left'); plt.show() # In[17]: test_features = np.load("test_features.npy") # In[18]: test_preds = model.predict_proba(test_features, verbose=1) # In[19]: test_preds[0:5] # In[21]: submission1 = pd.DataFrame(test_preds, columns= os.listdir(train_dir)) test_files = os.listdir("test_stg1/test_stg1/") submission1.insert(0, 'image', test_files) submission1.head() # In[27]: clipped_preds = np.clip(test_preds,(1-0.82)/7,0.82) submission2 = pd.DataFrame(clipped_preds, columns= os.listdir("train/train/")) submission2.insert(0, 'image', test_files) submission2.head() # In[28]: submission2.to_csv("batch_normalized.csv",index = False) # In[ ]:
unknown
codeparrot/codeparrot-clean
"use strict"; module.exports = [ /Can't import the named export 'aa' \(imported as 'aa'\) from default-exporting module/, /Can't import the named export 'named' \(imported as 'named'\) from default-exporting module/ ];
javascript
github
https://github.com/webpack/webpack
test/cases/json/import-named-with-type-json/warnings.js
# -*- coding: utf-8 -*- ## Copyright 2008-2013 Luc Saffre ## This file is part of the Lino project. ## Lino is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## Lino is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## You should have received a copy of the GNU General Public License ## along with Lino; if not, see <http://www.gnu.org/licenses/>. """ This module contains some relatively quick tests that don't load any fixtures. To run only this test:: manage.py test contacts.QuickTest """ from __future__ import unicode_literals from pprint import pprint from django.conf import settings from django.utils import translation from djangosite.utils.djangotest import RemoteAuthTestCase from django.test.utils import override_settings #from lino.igen import models #from lino.modlib.contacts.models import Contact, Companies #from lino.modlib.countries.models import Country from north import dbutils from lino import dd Person = dd.resolve_model("contacts.Person") from lino.utils.instantiator import Instantiator, create_and_get from north.dbutils import babelkw from lino.modlib.contacts import models as contacts from lino import mixins Genders = mixins.Genders person = Instantiator(Person).build company = Instantiator("contacts.Company").build class QuickTest(RemoteAuthTestCase): def test01(self): """ Tests some basic funtionality. """ #~ self.assertEqual(settings.MIDDLEWARE_CLASSES,1) ee = create_and_get('countries.Country', isocode='EE',**babelkw('name', de="Estland", fr='Estonie', en="Estonia", nl='Estland', et='Eesti', )) be = create_and_get('countries.Country', isocode='BE',**babelkw('name', de="Belgien", fr='Belgique', en="Belgium", nl='Belgie', et='Belgia', )) eupen = create_and_get('countries.City',name=u'Eupen',country=be,zip_code='4700') vigala = create_and_get('countries.City',name=u'Vigala',country=ee) luc = create_and_get(Person, first_name='Luc',last_name='Saffre', gender=Genders.male, country=ee,street='Uus', street_no='1', addr2=u'Vana-Vigala küla', city=vigala,zip_code='78003') settings.SITE.uppercase_last_name = True """ If the following tests raise a "DoesNotExist: Company matching query does not exist" then this may come because Site._site_config has been filled before the database switched from the real db to test db. and not properly reset. """ if settings.SITE.get_language_info('en'): with translation.override('en'): #~ dbutils.set_language('en') self.assertEquals(luc.address, u'''\ Mr Luc SAFFRE Uus 1 Vana-Vigala küla 78003 Vigala Estonia''') if settings.SITE.get_language_info('de'): with translation.override('de'): self.assertEquals(luc.address, u'''\ Herrn Luc SAFFRE Uus 1 Vana-Vigala küla 78003 Vigala Estland''') self.assertEquals(luc.address_html, '''\ <p>Herrn Luc SAFFRE<br />Uus 1<br />Vana-Vigala k&#252;la<br />78003 Vigala<br />Estland</p>''') u = create_and_get(settings.SITE.user_model, username='root',language='',profile=dd.UserProfiles.admin) """ disable SITE.is_imported_partner() otherwise disabled_fields may contain more than just the 'id' field. """ save_iip = settings.SITE.is_imported_partner def f(obj): return False settings.SITE.is_imported_partner = f """ Note that we must specify the language both in the user and in HTTP_ACCEPT_LANGUAGE because... """ luc = Person.objects.get(name__exact="Saffre Luc") self.assertEqual(luc.pk,contacts.PARTNER_NUMBERS_START_AT) url = settings.SITE.build_admin_url('api','contacts','Person','%d?query=&an=detail&fmt=json' % luc.pk) #~ url = '/api/contacts/Person/%d?query=&an=detail&fmt=json' % luc.pk if settings.SITE.get_language_info('en'): u.language = 'en' u.save() response = self.client.get(url,REMOTE_USER='root',HTTP_ACCEPT_LANGUAGE='en') result = self.check_json_result(response,'navinfo disable_delete data id title') self.assertEqual(result['data']['country'],"Estonia") self.assertEqual(result['data']['gender'],"Male") if settings.SITE.get_language_info('de'): u.language = 'de' u.save() response = self.client.get(url,REMOTE_USER='root',HTTP_ACCEPT_LANGUAGE='de') result = self.check_json_result( response, 'navinfo disable_delete data id title') self.assertEqual(result['data']['country'],"Estland") self.assertEqual(result['data']['gender'],u"Männlich") #~ self.assertEqual(result['data']['disabled_fields'],['contact_ptr_id','id']) #~ self.assertEqual(result['data']['disabled_fields'],['id']) df = result['data']['disabled_fields'] self.assertEqual(df['id'],True) if settings.SITE.get_language_info('fr'): u.language = 'fr' u.save() response = self.client.get(url,REMOTE_USER='root',HTTP_ACCEPT_LANGUAGE='fr') result = self.check_json_result(response,'navinfo disable_delete data id title') self.assertEqual(result['data']['country'],"Estonie") self.assertEqual(result['data']['gender'],u"Masculin") #~ u.language = lang #~ u.save() # restore is_imported_partner method settings.SITE.is_imported_partner = save_iip #~ def test03(self): """ Test the following situation: - User 1 opens the :menuselection:`Configure --> System--> System Parameters` dialog - User 2 creates a new Person (which increases next_partner_id) - User 1 clicks on `Save`. `next_partner_id` may not get overwritten """ # User 1 SiteConfigs = settings.SITE.modules.system.SiteConfigs elem = SiteConfigs.get_row_by_pk(None,settings.SITE.config_id) self.assertEqual(elem.next_partner_id,contacts.PARTNER_NUMBERS_START_AT + 1) elem.next_partner_id = 12345 elem.full_clean() elem.save() #~ print "saved" self.assertEqual(settings.SITE.site_config.next_partner_id,12345) john = create_and_get(Person,first_name='John',last_name='Smith') self.assertEqual(john.pk,12345) self.assertEqual(elem.next_partner_id,12346) self.assertEqual(settings.SITE.site_config.next_partner_id,12346) def unused_test03(self): """ Test the following situation: - User 1 opens the :menuselection:`Configure --> System--> System Parameters` dialog - User 2 creates a new Person (which increases next_partner_id) - User 1 clicks on `Save`. `next_partner_id` may not get overwritten """ url = settings.SITE.build_admin_url('api','system','SiteConfigs','1?an=detail&fmt=json') response = self.client.get(url,REMOTE_USER='root') result = self.check_json_result(response,'navinfo disable_delete data id title') """ `test01` created one Person, so next_partner_id should be at 101: """ data = result['data'] self.assertEqual(data['next_partner_id'],contacts.PARTNER_NUMBERS_START_AT + 1) data['next_partner_id'] = 12345 #~ pprint(data) response = self.client.put(url,data, #~ content_type="application/x-www-form-urlencoded; charset=UTF-8", REMOTE_USER='root') result = self.check_json_result(response,'message rows success data_record') data = result['data_record']['data'] john = create_and_get(Person,first_name='John',last_name='Smith') # fails: self.assertEqual(john.pk,12345) """ I no longer understand how to call test.Client.put() with normal form data... Furthermore this seems to change between 1.4 and 1.5, so I'll wait until all my users have moved to 1.5. """
unknown
codeparrot/codeparrot-clean
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (c) # 2015 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com) # 2015 AvanzOsc (http://www.avanzosc.es) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { "name": "Quality control - Sale stock", "version": "8.0.1.0.0", "author": "OdooMRP team, " "AvanzOSC, " "Serv. Tecnol. Avanzados - Pedro M. Baeza", "website": "http://www.odoomrp.com", "contributors": [ "Pedro M. Baeza <pedro.baeza@serviciosbaeza.com", ], "category": "Quality control", "depends": [ 'quality_control_stock', 'sale_stock', ], "data": [ 'security/ir.model.access.csv', ], "installable": True, "auto_install": True, }
unknown
codeparrot/codeparrot-clean
/*! * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import { useQueryClient } from "@tanstack/react-query"; import { useState } from "react"; import { useTranslation } from "react-i18next"; import { useVariableServiceBulkVariables, useVariableServiceGetVariablesKey } from "openapi/queries"; import { toaster } from "src/components/ui"; type Props = { readonly clearSelections: VoidFunction; readonly onSuccessConfirm: VoidFunction; }; export const useBulkDeleteVariables = ({ clearSelections, onSuccessConfirm }: Props) => { const queryClient = useQueryClient(); const [error, setError] = useState<unknown>(undefined); const { t: translate } = useTranslation(["common", "admin"]); const onSuccess = async (responseData: { delete?: { errors: Array<unknown>; success: Array<string> } }) => { await queryClient.invalidateQueries({ queryKey: [useVariableServiceGetVariablesKey], }); if (responseData.delete) { const { errors, success } = responseData.delete; if (Array.isArray(errors) && errors.length > 0) { const apiError = errors[0] as { error: string }; setError({ body: { detail: apiError.error }, }); } else if (Array.isArray(success) && success.length > 0) { toaster.create({ description: translate("toaster.bulkDelete.success.description", { count: success.length, keys: success.join(", "), resourceName: translate("admin:variables.variable_other"), }), title: translate("toaster.bulkDelete.success.title"), type: "success", }); clearSelections(); onSuccessConfirm(); } } }; const onError = (_error: unknown) => { setError(_error); }; const { isPending, mutate } = useVariableServiceBulkVariables({ onError, onSuccess, }); return { error, isPending, mutate }; };
typescript
github
https://github.com/apache/airflow
airflow-core/src/airflow/ui/src/queries/useBulkDeleteVariables.ts
#!/usr/bin/env python3 ############################################################################### # Copyright 2018 The Apollo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### def segment_overlap(a, b, x, y): if b < x or a > y: return False return True def vector_projection_overlap(p0, p1, p2, p3): v = p1.subtract(p0) n_square = v.norm_square() v0 = p2.subtract(p0) v1 = p3.subtract(p0) t0 = v0.dot(v) t1 = v1.dot(v) if t0 > t1: t = t0 t0 = t1 t1 = t return segment_overlap(t0, t1, 0.0, n_square)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'certified'} DOCUMENTATION = """ --- module: elb_instance short_description: De-registers or registers instances from EC2 ELBs description: - This module de-registers or registers an AWS EC2 instance from the ELBs that it belongs to. - Returns fact "ec2_elbs" which is a list of elbs attached to the instance if state=absent is passed as an argument. - Will be marked changed when called only if there are ELBs found to operate on. version_added: "1.2" author: "John Jarvis (@jarv)" options: state: description: - register or deregister the instance required: true choices: ['present', 'absent'] instance_id: description: - EC2 Instance ID required: true ec2_elbs: description: - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. required: false default: None enable_availability_zone: description: - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB. required: false default: yes choices: [ "yes", "no" ] wait: description: - Wait for instance registration or deregistration to complete successfully before returning. required: false default: yes choices: [ "yes", "no" ] validate_certs: description: - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. required: false default: "yes" choices: ["yes", "no"] aliases: [] version_added: "1.5" wait_timeout: description: - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no. required: false default: 0 version_added: "1.6" extends_documentation_fragment: - aws - ec2 """ EXAMPLES = """ # basic pre_task and post_task example pre_tasks: - name: Gathering ec2 facts action: ec2_facts - name: Instance De-register local_action: module: ec2_elb instance_id: "{{ ansible_ec2_instance_id }}" state: absent roles: - myrole post_tasks: - name: Instance Register local_action: module: ec2_elb instance_id: "{{ ansible_ec2_instance_id }}" ec2_elbs: "{{ item }}" state: present with_items: "{{ ec2_elbs }}" """ import time try: import boto import boto.ec2 import boto.ec2.autoscale import boto.ec2.elb from boto.regioninfo import RegionInfo HAS_BOTO = True except ImportError: HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info) class ElbManager: """Handles EC2 instance ELB registration and de-registration""" def __init__(self, module, instance_id=None, ec2_elbs=None, region=None, **aws_connect_params): self.module = module self.instance_id = instance_id self.region = region self.aws_connect_params = aws_connect_params self.lbs = self._get_instance_lbs(ec2_elbs) self.changed = False def deregister(self, wait, timeout): """De-register the instance from all ELBs and wait for the ELB to report it out-of-service""" for lb in self.lbs: initial_state = self._get_instance_health(lb) if initial_state is None: # Instance isn't registered with this load # balancer. Ignore it and try the next one. continue lb.deregister_instances([self.instance_id]) # The ELB is changing state in some way. Either an instance that's # InService is moving to OutOfService, or an instance that's # already OutOfService is being deregistered. self.changed = True if wait: self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) def register(self, wait, enable_availability_zone, timeout): """Register the instance for all ELBs and wait for the ELB to report the instance in-service""" for lb in self.lbs: initial_state = self._get_instance_health(lb) if enable_availability_zone: self._enable_availailability_zone(lb) lb.register_instances([self.instance_id]) if wait: self._await_elb_instance_state(lb, 'InService', initial_state, timeout) else: # We cannot assume no change was made if we don't wait # to find out self.changed = True def exists(self, lbtest): """ Verify that the named ELB actually exists """ found = False for lb in self.lbs: if lb.name == lbtest: found=True break return found def _enable_availailability_zone(self, lb): """Enable the current instance's availability zone in the provided lb. Returns True if the zone was enabled or False if no change was made. lb: load balancer""" instance = self._get_instance() if instance.placement in lb.availability_zones: return False lb.enable_zones(zones=instance.placement) # If successful, the new zone will have been added to # lb.availability_zones return instance.placement in lb.availability_zones def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): """Wait for an ELB to change state lb: load balancer awaited_state : state to poll for (string)""" wait_timeout = time.time() + timeout while True: instance_state = self._get_instance_health(lb) if not instance_state: msg = ("The instance %s could not be put in service on %s." " Reason: Invalid Instance") self.module.fail_json(msg=msg % (self.instance_id, lb)) if instance_state.state == awaited_state: # Check the current state against the initial state, and only set # changed if they are different. if (initial_state is None) or (instance_state.state != initial_state.state): self.changed = True break elif self._is_instance_state_pending(instance_state): # If it's pending, we'll skip further checks and continue waiting pass elif (awaited_state == 'InService' and instance_state.reason_code == "Instance" and time.time() >= wait_timeout): # If the reason_code for the instance being out of service is # "Instance" this indicates a failure state, e.g. the instance # has failed a health check or the ELB does not have the # instance's availability zone enabled. The exact reason why is # described in InstantState.description. msg = ("The instance %s could not be put in service on %s." " Reason: %s") self.module.fail_json(msg=msg % (self.instance_id, lb, instance_state.description)) time.sleep(1) def _is_instance_state_pending(self, instance_state): """ Determines whether the instance_state is "pending", meaning there is an operation under way to bring it in service. """ # This is messy, because AWS provides no way to distinguish between # an instance that is is OutOfService because it's pending vs. OutOfService # because it's failing health checks. So we're forced to analyze the # description, which is likely to be brittle. return (instance_state and 'pending' in instance_state.description) def _get_instance_health(self, lb): """ Check instance health, should return status object or None under certain error conditions. """ try: status = lb.get_instance_health([self.instance_id])[0] except boto.exception.BotoServerError as e: if e.error_code == 'InvalidInstance': return None else: raise return status def _get_instance_lbs(self, ec2_elbs=None): """Returns a list of ELBs attached to self.instance_id ec2_elbs: an optional list of elb names that will be used for elb lookup instead of returning what elbs are attached to self.instance_id""" if not ec2_elbs: ec2_elbs = self._get_auto_scaling_group_lbs() try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: self.module.fail_json(msg=str(e)) elbs = [] marker = None while True: try: newelbs = elb.get_all_load_balancers(marker=marker) marker = newelbs.next_marker elbs.extend(newelbs) if not marker: break except TypeError: # Older version of boto do not allow for params elbs = elb.get_all_load_balancers() break if ec2_elbs: lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) else: lbs = [] for lb in elbs: for info in lb.instances: if self.instance_id == info.id: lbs.append(lb) return lbs def _get_auto_scaling_group_lbs(self): """Returns a list of ELBs associated with self.instance_id indirectly through its auto scaling group membership""" try: asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: self.module.fail_json(msg=str(e)) asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) if len(asg_instances) > 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") if not asg_instances: asg_elbs = [] else: asg_name = asg_instances[0].group_name asgs = asg.get_all_groups([asg_name]) if len(asg_instances) != 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group.") asg_elbs = asgs[0].load_balancers return asg_elbs def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state={'required': True}, instance_id={'required': True}, ec2_elbs={'default': None, 'required': False, 'type':'list'}, enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, wait={'required': False, 'default': True, 'type': 'bool'}, wait_timeout={'required': False, 'default': 0, 'type': 'int'} ) ) module = AnsibleModule( argument_spec=argument_spec, ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") ec2_elbs = module.params['ec2_elbs'] wait = module.params['wait'] enable_availability_zone = module.params['enable_availability_zone'] timeout = module.params['wait_timeout'] if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: module.fail_json(msg="ELBs are required for registration") instance_id = module.params['instance_id'] elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) if ec2_elbs is not None: for elb in ec2_elbs: if not elb_man.exists(elb): msg="ELB %s does not exist" % elb module.fail_json(msg=msg) if module.params['state'] == 'present': elb_man.register(wait, enable_availability_zone, timeout) elif module.params['state'] == 'absent': elb_man.deregister(wait, timeout) ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) module.exit_json(**ec2_facts_result) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
from sqlagg.columns import SimpleColumn from corehq.apps.reports.sqlreport import SqlData, DatabaseColumn from custom.common import ALL_OPTION from dimagi.utils.decorators.memoized import memoized EMPTY_FIELD = "---" class BaseMixin(object): @property def blocks(self): hierarchy_block = self.request.GET.getlist('hierarchy_block', []) return [] if hierarchy_block and hierarchy_block[0] == ALL_OPTION else hierarchy_block @property def awcs(self): hierarchy_awc = self.request.GET.getlist('hierarchy_awc', []) return [] if hierarchy_awc and hierarchy_awc[0] == ALL_OPTION else hierarchy_awc @property def gp(self): hierarchy_gp = self.request.GET.getlist('hierarchy_gp', []) return [] if hierarchy_gp and hierarchy_gp[0] == ALL_OPTION else hierarchy_gp class UserSqlData(SqlData): table_name = "fluff_OpmUserFluff" group_by = ['doc_id', 'name', 'awc', 'awc_code', 'bank_name', 'ifs_code', 'account_number', 'gp', 'block', 'village', 'gps'] @property def filters(self): return [] @property def columns(self): return [ DatabaseColumn('doc_id', SimpleColumn('doc_id')), DatabaseColumn('name', SimpleColumn('name')), DatabaseColumn('awc', SimpleColumn('awc')), DatabaseColumn('awc_code', SimpleColumn('awc_code')), DatabaseColumn('bank_name', SimpleColumn('bank_name')), DatabaseColumn('ifs_code', SimpleColumn('ifs_code')), DatabaseColumn('account_number', SimpleColumn('account_number')), DatabaseColumn('gp', SimpleColumn('gp')), DatabaseColumn('block', SimpleColumn('block')), DatabaseColumn('village', SimpleColumn('village')), DatabaseColumn('gps', SimpleColumn('gps')) ] def transformed_data(self): data = [] for user in self.get_data(): transformed_user = user transformed_user['awc_with_code'] = "{} - ({})".format(user['awc'], user['awc_code']) data.append(transformed_user) return data def data_as_hierarchy(self): """ Creates a location hierarchy structured as follows: hierarchy = {"Atri": { "Sahora": { "Sohran Bigha (34)": None}}} """ hierarchy = {} for location in self.transformed_data(): block = location['block'] gp = location['gp'] awc_name_with_code = location['awc_with_code'] if not (awc_name_with_code and gp and block): continue hierarchy[block] = hierarchy.get(block, {}) hierarchy[block][gp] = hierarchy[block].get(gp, {}) hierarchy[block][gp][awc_name_with_code] = None return hierarchy @property @memoized def data_by_doc_id(self): return {user['doc_id']: (user['awc_code'], user['gp']) for user in self.get_data()} @memoized def user_sql_data(): return UserSqlData() def get_matching_users(awcs=None, gps=None, blocks=None): """ Accepts a list of one or more of `awcs`, `gps`, and `blocks`, returns a list of users matching that selection each user is represented as a dict with the following keys: ['doc_id', 'awc', 'gp', 'block', 'awc_code'] """ non_null = filter( lambda (k, v): bool(v), [('awc_with_code', awcs), ('gp', gps), ('block', blocks)] ) if not len(non_null) > 0: raise TypeError("You must pass at least one of awc, gp, or block") key, selected = non_null[0] # get most specific selection return [ user for user in user_sql_data().transformed_data() if user[key] in selected ] def numeric_fn(val): try: sort_val = int(val) except ValueError: sort_val = -1 except TypeError: sort_val = -1 return {'sort_key': sort_val, 'html': val} def format_bool(bool_or_none): if bool_or_none is None: return EMPTY_FIELD elif bool_or_none: return 'Yes' elif not bool_or_none: return 'No'
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python ''' Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's Smalltalk testing framework. This module contains the core framework classes that form the basis of specific test cases and suites (TestCase, TestSuite etc.), and also a text-based utility class for running the tests and reporting the results (TextTestRunner). Simple usage: import unittest class IntegerArithmenticTestCase(unittest.TestCase): def testAdd(self): ## test method names begin 'test*' self.assertEquals((1 + 2), 3) self.assertEquals(0 + 1, 1) def testMultiply(self): self.assertEquals((0 * 10), 0) self.assertEquals((5 * 8), 40) if __name__ == '__main__': unittest.main() Further information is available in the bundled documentation, and from http://pyunit.sourceforge.net/ Copyright (c) 1999-2003 Steve Purcell This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form. IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. ''' __author__ = "Steve Purcell" __email__ = "stephen_purcell at yahoo dot com" __version__ = "#Revision: 1.63 $"[11:-2] import time import sys import traceback import os import types ############################################################################## # Exported classes and functions ############################################################################## __all__ = ['TestResult', 'TestCase', 'TestSuite', 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', 'defaultTestLoader'] # Expose obsolete functions for backwards compatibility __all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases']) ############################################################################## # Backward compatibility ############################################################################## if sys.version_info[:2] < (2, 2): False, True = 0, 1 def isinstance(obj, clsinfo): import __builtin__ if type(clsinfo) in (tuple, list): for cls in clsinfo: if cls is type: cls = types.ClassType if __builtin__.isinstance(obj, cls): return 1 return 0 else: return __builtin__.isinstance(obj, clsinfo) ############################################################################## # Test framework core ############################################################################## # All classes defined herein are 'new-style' classes, allowing use of 'super()' __metaclass__ = type def _strclass(cls): return "%s.%s" % (cls.__module__, cls.__name__) __unittest = 1 class TestResult: """Holder for test result information. Test results are automatically managed by the TestCase and TestSuite classes, and do not need to be explicitly manipulated by writers of tests. Each instance holds the total number of tests run, and collections of failures and errors that occurred among those test runs. The collections contain tuples of (testcase, exceptioninfo), where exceptioninfo is the formatted traceback of the error that occurred. """ def __init__(self): self.failures = [] self.errors = [] self.testsRun = 0 self.shouldStop = 0 def startTest(self, test): "Called when the given test is about to be run" self.testsRun = self.testsRun + 1 def stopTest(self, test): "Called when the given test has been run" pass def addError(self, test, err): """Called when an error has occurred. 'err' is a tuple of values as returned by sys.exc_info(). """ self.errors.append((test, self._exc_info_to_string(err, test))) def addFailure(self, test, err): """Called when an error has occurred. 'err' is a tuple of values as returned by sys.exc_info().""" self.failures.append((test, self._exc_info_to_string(err, test))) def addSuccess(self, test): "Called when a test has completed successfully" pass def wasSuccessful(self): "Tells whether or not this result was a success" return len(self.failures) == len(self.errors) == 0 def stop(self): "Indicates that the tests should be aborted" self.shouldStop = True def _exc_info_to_string(self, err, test): """Converts a sys.exc_info()-style tuple of values into a string.""" exctype, value, tb = err # Skip test runner traceback levels while tb and self._is_relevant_tb_level(tb): tb = tb.tb_next if exctype is test.failureException: # Skip assert*() traceback levels length = self._count_relevant_tb_levels(tb) return ''.join(traceback.format_exception(exctype, value, tb, length)) return ''.join(traceback.format_exception(exctype, value, tb)) def _is_relevant_tb_level(self, tb): return tb.tb_frame.f_globals.has_key('__unittest') def _count_relevant_tb_levels(self, tb): length = 0 while tb and not self._is_relevant_tb_level(tb): length += 1 tb = tb.tb_next return length def __repr__(self): return "<%s run=%i errors=%i failures=%i>" % \ (_strclass(self.__class__), self.testsRun, len(self.errors), len(self.failures)) class TestCase: """A class whose instances are single test cases. By default, the test code itself should be placed in a method named 'runTest'. If the fixture may be used for many test cases, create as many test methods as are needed. When instantiating such a TestCase subclass, specify in the constructor arguments the name of the test method that the instance is to execute. Test authors should subclass TestCase for their own tests. Construction and deconstruction of the test's environment ('fixture') can be implemented by overriding the 'setUp' and 'tearDown' methods respectively. If it is necessary to override the __init__ method, the base class __init__ method must always be called. It is important that subclasses should not change the signature of their __init__ method, since instances of the classes are instantiated automatically by parts of the framework in order to be run. """ # This attribute determines which exception will be raised when # the instance's assertion methods fail; test methods raising this # exception will be deemed to have 'failed' rather than 'errored' failureException = AssertionError def __init__(self, methodName='runTest'): """Create an instance of the class that will use the named test method when executed. Raises a ValueError if the instance does not have a method with the specified name. """ try: self._testMethodName = methodName testMethod = getattr(self, methodName) self._testMethodDoc = testMethod.__doc__ except AttributeError: raise ValueError, "no such test method in %s: %s" % \ (self.__class__, methodName) def setUp(self): "Hook method for setting up the test fixture before exercising it." pass def tearDown(self): "Hook method for deconstructing the test fixture after testing it." pass def countTestCases(self): return 1 def defaultTestResult(self): return TestResult() def shortDescription(self): """Returns a one-line description of the test, or None if no description has been provided. The default implementation of this method returns the first line of the specified test method's docstring. """ doc = self._testMethodDoc return doc and doc.split("\n")[0].strip() or None def id(self): return "%s.%s" % (_strclass(self.__class__), self._testMethodName) def __str__(self): return "%s (%s)" % (self._testMethodName, _strclass(self.__class__)) def __repr__(self): return "<%s testMethod=%s>" % \ (_strclass(self.__class__), self._testMethodName) def run(self, result=None): if result is None: result = self.defaultTestResult() result.startTest(self) testMethod = getattr(self, self._testMethodName) try: try: self.setUp() except KeyboardInterrupt: raise except: result.addError(self, self._exc_info()) return ok = False try: testMethod() ok = True except self.failureException: result.addFailure(self, self._exc_info()) except KeyboardInterrupt: raise except: result.addError(self, self._exc_info()) try: self.tearDown() except KeyboardInterrupt: raise except: result.addError(self, self._exc_info()) ok = False if ok: result.addSuccess(self) finally: result.stopTest(self) def __call__(self, *args, **kwds): return self.run(*args, **kwds) def debug(self): """Run the test without collecting errors in a TestResult""" self.setUp() getattr(self, self._testMethodName)() self.tearDown() def _exc_info(self): """Return a version of sys.exc_info() with the traceback frame minimised; usually the top level of the traceback frame is not needed. """ exctype, excvalue, tb = sys.exc_info() if sys.platform[:4] == 'java': ## tracebacks look different in Jython return (exctype, excvalue, tb) return (exctype, excvalue, tb) def fail(self, msg=None): """Fail immediately, with the given message.""" raise self.failureException, msg def failIf(self, expr, msg=None): "Fail the test if the expression is true." if expr: raise self.failureException, msg def failUnless(self, expr, msg=None): """Fail the test unless the expression is true.""" if not expr: raise self.failureException, msg def failUnlessRaises(self, excClass, callableObj, *args, **kwargs): """Fail unless an exception of class excClass is thrown by callableObj when invoked with arguments args and keyword arguments kwargs. If a different type of exception is thrown, it will not be caught, and the test case will be deemed to have suffered an error, exactly as for an unexpected exception. """ try: callableObj(*args, **kwargs) except excClass: return else: if hasattr(excClass,'__name__'): excName = excClass.__name__ else: excName = str(excClass) raise self.failureException, "%s not raised" % excName def failUnlessEqual(self, first, second, msg=None): """Fail if the two objects are unequal as determined by the '==' operator. """ if not first == second: raise self.failureException, \ (msg or '%r != %r' % (first, second)) def failIfEqual(self, first, second, msg=None): """Fail if the two objects are equal as determined by the '==' operator. """ if first == second: raise self.failureException, \ (msg or '%r == %r' % (first, second)) def failUnlessAlmostEqual(self, first, second, places=7, msg=None): """Fail if the two objects are unequal as determined by their difference rounded to the given number of decimal places (default 7) and comparing to zero. Note that decimal places (from zero) are usually not the same as significant digits (measured from the most signficant digit). """ if round(second-first, places) != 0: raise self.failureException, \ (msg or '%r != %r within %r places' % (first, second, places)) def failIfAlmostEqual(self, first, second, places=7, msg=None): """Fail if the two objects are equal as determined by their difference rounded to the given number of decimal places (default 7) and comparing to zero. Note that decimal places (from zero) are usually not the same as significant digits (measured from the most signficant digit). """ if round(second-first, places) == 0: raise self.failureException, \ (msg or '%r == %r within %r places' % (first, second, places)) # Synonyms for assertion methods assertEqual = assertEquals = failUnlessEqual assertNotEqual = assertNotEquals = failIfEqual assertAlmostEqual = assertAlmostEquals = failUnlessAlmostEqual assertNotAlmostEqual = assertNotAlmostEquals = failIfAlmostEqual assertRaises = failUnlessRaises assert_ = assertTrue = failUnless assertFalse = failIf class TestSuite: """A test suite is a composite test consisting of a number of TestCases. For use, create an instance of TestSuite, then add test case instances. When all tests have been added, the suite can be passed to a test runner, such as TextTestRunner. It will run the individual test cases in the order in which they were added, aggregating the results. When subclassing, do not forget to call the base class constructor. """ def __init__(self, tests=()): self._tests = [] self.addTests(tests) def __repr__(self): return "<%s tests=%s>" % (_strclass(self.__class__), self._tests) __str__ = __repr__ def __iter__(self): return iter(self._tests) def countTestCases(self): cases = 0 for test in self._tests: cases += test.countTestCases() return cases def addTest(self, test): # sanity checks if not callable(test): raise TypeError("the test to add must be callable") if (isinstance(test, (type, types.ClassType)) and issubclass(test, (TestCase, TestSuite))): raise TypeError("TestCases and TestSuites must be instantiated " "before passing them to addTest()") self._tests.append(test) def addTests(self, tests): if isinstance(tests, basestring): raise TypeError("tests must be an iterable of tests, not a string") for test in tests: self.addTest(test) def run(self, result): for test in self._tests: if result.shouldStop: break test(result) return result def __call__(self, *args, **kwds): return self.run(*args, **kwds) def debug(self): """Run the tests without collecting errors in a TestResult""" for test in self._tests: test.debug() class FunctionTestCase(TestCase): """A test case that wraps a test function. This is useful for slipping pre-existing test functions into the PyUnit framework. Optionally, set-up and tidy-up functions can be supplied. As with TestCase, the tidy-up ('tearDown') function will always be called if the set-up ('setUp') function ran successfully. """ def __init__(self, testFunc, setUp=None, tearDown=None, description=None): TestCase.__init__(self) self.__setUpFunc = setUp self.__tearDownFunc = tearDown self.__testFunc = testFunc self.__description = description def setUp(self): if self.__setUpFunc is not None: self.__setUpFunc() def tearDown(self): if self.__tearDownFunc is not None: self.__tearDownFunc() def runTest(self): self.__testFunc() def id(self): return self.__testFunc.__name__ def __str__(self): return "%s (%s)" % (_strclass(self.__class__), self.__testFunc.__name__) def __repr__(self): return "<%s testFunc=%s>" % (_strclass(self.__class__), self.__testFunc) def shortDescription(self): if self.__description is not None: return self.__description doc = self.__testFunc.__doc__ return doc and doc.split("\n")[0].strip() or None ############################################################################## # Locating and loading tests ############################################################################## class TestLoader: """This class is responsible for loading tests according to various criteria and returning them wrapped in a Test """ testMethodPrefix = 'test' sortTestMethodsUsing = cmp suiteClass = TestSuite def loadTestsFromTestCase(self, testCaseClass): """Return a suite of all tests cases contained in testCaseClass""" if issubclass(testCaseClass, TestSuite): raise TypeError("Test cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?") testCaseNames = self.getTestCaseNames(testCaseClass) if not testCaseNames and hasattr(testCaseClass, 'runTest'): testCaseNames = ['runTest'] return self.suiteClass(map(testCaseClass, testCaseNames)) def loadTestsFromModule(self, module): """Return a suite of all tests cases contained in the given module""" tests = [] for name in dir(module): obj = getattr(module, name) if (isinstance(obj, (type, types.ClassType)) and issubclass(obj, TestCase)): tests.append(self.loadTestsFromTestCase(obj)) return self.suiteClass(tests) def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. The name may resolve either to a module, a test case class, a test method within a test case class, or a callable object which returns a TestCase or TestSuite instance. The method optionally resolves the names relative to a given module. """ parts = name.split('.') if module is None: parts_copy = parts[:] while parts_copy: try: module = __import__('.'.join(parts_copy)) break except ImportError: del parts_copy[-1] if not parts_copy: raise parts = parts[1:] obj = module for part in parts: parent, obj = obj, getattr(obj, part) if type(obj) == types.ModuleType: return self.loadTestsFromModule(obj) elif (isinstance(obj, (type, types.ClassType)) and issubclass(obj, TestCase)): return self.loadTestsFromTestCase(obj) elif type(obj) == types.UnboundMethodType: return parent(obj.__name__) elif isinstance(obj, TestSuite): return obj elif callable(obj): test = obj() if not isinstance(test, (TestCase, TestSuite)): raise ValueError, \ "calling %s returned %s, not a test" % (obj,test) return test else: raise ValueError, "don't know how to make test from: %s" % obj def loadTestsFromNames(self, names, module=None): """Return a suite of all tests cases found using the given sequence of string specifiers. See 'loadTestsFromName()'. """ suites = [self.loadTestsFromName(name, module) for name in names] return self.suiteClass(suites) def getTestCaseNames(self, testCaseClass): """Return a sorted sequence of method names found within testCaseClass """ def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix): return attrname.startswith(prefix) and callable(getattr(testCaseClass, attrname)) testFnNames = filter(isTestMethod, dir(testCaseClass)) for baseclass in testCaseClass.__bases__: for testFnName in self.getTestCaseNames(baseclass): if testFnName not in testFnNames: # handle overridden methods testFnNames.append(testFnName) if self.sortTestMethodsUsing: testFnNames.sort(self.sortTestMethodsUsing) return testFnNames defaultTestLoader = TestLoader() ############################################################################## # Patches for old functions: these functions should be considered obsolete ############################################################################## def _makeLoader(prefix, sortUsing, suiteClass=None): loader = TestLoader() loader.sortTestMethodsUsing = sortUsing loader.testMethodPrefix = prefix if suiteClass: loader.suiteClass = suiteClass return loader def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp): return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass) def makeSuite(testCaseClass, prefix='test', sortUsing=cmp, suiteClass=TestSuite): return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass) def findTestCases(module, prefix='test', sortUsing=cmp, suiteClass=TestSuite): return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module) ############################################################################## # Text UI ############################################################################## class _WritelnDecorator: """Used to decorate file-like objects with a handy 'writeln' method""" def __init__(self,stream): self.stream = stream def __getattr__(self, attr): return getattr(self.stream,attr) def writeln(self, arg=None): if arg: self.write(arg) self.write('\n') # text-mode streams translate to \r\n if needed class _TextTestResult(TestResult): """A test result class that can print formatted text results to a stream. Used by TextTestRunner. """ separator1 = '=' * 70 separator2 = '-' * 70 def __init__(self, stream, descriptions, verbosity): TestResult.__init__(self) self.stream = stream self.showAll = verbosity > 1 self.dots = verbosity == 1 self.descriptions = descriptions def getDescription(self, test): if self.descriptions: return test.shortDescription() or str(test) else: return str(test) def startTest(self, test): TestResult.startTest(self, test) if self.showAll: self.stream.write(self.getDescription(test)) self.stream.write(" ... ") def addSuccess(self, test): TestResult.addSuccess(self, test) if self.showAll: self.stream.writeln("ok") elif self.dots: self.stream.write('.') def addError(self, test, err): TestResult.addError(self, test, err) if self.showAll: self.stream.writeln("ERROR") elif self.dots: self.stream.write('E') def addFailure(self, test, err): TestResult.addFailure(self, test, err) if self.showAll: self.stream.writeln("FAIL") elif self.dots: self.stream.write('F') def printErrors(self): if self.dots or self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) def printErrorList(self, flavour, errors): for test, err in errors: self.stream.writeln(self.separator1) self.stream.writeln("%s: %s" % (flavour,self.getDescription(test))) self.stream.writeln(self.separator2) self.stream.writeln("%s" % err) class TextTestRunner: """A test runner class that displays results in textual form. It prints out the names of tests as they are run, errors as they occur, and a summary of the results at the end of the test run. """ def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1): self.stream = _WritelnDecorator(stream) self.descriptions = descriptions self.verbosity = verbosity def _makeResult(self): return _TextTestResult(self.stream, self.descriptions, self.verbosity) def run(self, test): "Run the given test case or test suite." result = self._makeResult() startTime = time.time() test(result) stopTime = time.time() timeTaken = stopTime - startTime result.printErrors() self.stream.writeln(result.separator2) run = result.testsRun self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)) self.stream.writeln() if not result.wasSuccessful(): self.stream.write("FAILED (") failed, errored = map(len, (result.failures, result.errors)) if failed: self.stream.write("failures=%d" % failed) if errored: if failed: self.stream.write(", ") self.stream.write("errors=%d" % errored) self.stream.writeln(")") else: self.stream.writeln("OK") return result ############################################################################## # Facilities for running tests from the command line ############################################################################## class TestProgram: """A command-line program that runs a set of tests; this is primarily for making test modules conveniently executable. """ USAGE = """\ Usage: %(progName)s [options] [test] [...] Options: -h, --help Show this message -v, --verbose Verbose output -q, --quiet Minimal output Examples: %(progName)s - run default set of tests %(progName)s MyTestSuite - run suite 'MyTestSuite' %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething %(progName)s MyTestCase - run all 'test*' test methods in MyTestCase """ def __init__(self, module='__main__', defaultTest=None, argv=None, testRunner=None, testLoader=defaultTestLoader): if type(module) == type(''): self.module = __import__(module) for part in module.split('.')[1:]: self.module = getattr(self.module, part) else: self.module = module if argv is None: argv = sys.argv self.verbosity = 1 self.defaultTest = defaultTest self.testRunner = testRunner self.testLoader = testLoader self.progName = os.path.basename(argv[0]) self.parseArgs(argv) self.runTests() def usageExit(self, msg=None): if msg: print msg print self.USAGE % self.__dict__ sys.exit(2) def parseArgs(self, argv): import getopt try: options, args = getopt.getopt(argv[1:], 'hHvq', ['help','verbose','quiet']) for opt, value in options: if opt in ('-h','-H','--help'): self.usageExit() if opt in ('-q','--quiet'): self.verbosity = 0 if opt in ('-v','--verbose'): self.verbosity = 2 if len(args) == 0 and self.defaultTest is None: self.test = self.testLoader.loadTestsFromModule(self.module) return if len(args) > 0: self.testNames = args else: self.testNames = (self.defaultTest,) self.createTests() except getopt.error, msg: self.usageExit(msg) def createTests(self): self.test = self.testLoader.loadTestsFromNames(self.testNames, self.module) def runTests(self): if self.testRunner is None: self.testRunner = TextTestRunner(verbosity=self.verbosity) result = self.testRunner.run(self.test) sys.exit(not result.wasSuccessful()) main = TestProgram ############################################################################## # Executing this module from the command line ############################################################################## if __name__ == "__main__": main(module=None)
unknown
codeparrot/codeparrot-clean
#Copyright ReportLab Europe Ltd. 2000-2012 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/__init__.py __version__=''' $Id$ ''' __doc__='''Page Layout and Typography Using Scripts" - higher-level framework for flowing documents''' from reportlab.platypus.flowables import Flowable, Image, Macro, PageBreak, Preformatted, Spacer, XBox, \ CondPageBreak, KeepTogether, TraceInfo, FailOnWrap, FailOnDraw, PTOContainer, \ KeepInFrame, ParagraphAndImage, ImageAndFlowables, ListFlowable, ListItem, FrameBG, \ PageBreakIfNotEmpty from reportlab.platypus.paragraph import Paragraph, cleanBlockQuotedText, ParaLines from reportlab.platypus.paraparser import ParaFrag from reportlab.platypus.tables import Table, TableStyle, CellStyle, LongTable from reportlab.platypus.frames import Frame from reportlab.platypus.doctemplate import BaseDocTemplate, NextPageTemplate, PageTemplate, ActionFlowable, \ SimpleDocTemplate, FrameBreak, PageBegin, Indenter, NotAtTopPageBreak from reportlab.platypus.xpreformatted import XPreformatted
unknown
codeparrot/codeparrot-clean
""" ******************************* K-means clustering (``kmeans``) ******************************* .. index:: single: clustering, kmeans .. index:: agglomerative clustering .. autoclass:: Orange.clustering.kmeans.Clustering(data=None, centroids=3, maxiters=None, minscorechange=None, stopchanges=0, nstart=1, initialization=init_random, distance=Orange.distance.Euclidean, scoring=score_distance_to_centroids, inner_callback=None, outer_callback=None) :members: :exclude-members: __init__ .. automethod:: __init__(data=None, centroids=3, maxiters=None, minscorechange=None, stopchanges=0, nstart=1, initialization=init_random, distance=Orange.distance.Euclidean, scoring=score_distance_to_centroids, inner_callback=None, outer_callback=None) Examples ======== The following code runs k-means clustering and prints out the cluster indexes for the last 10 data instances (:download:`kmeans-run.py <code/kmeans-run.py>`): .. literalinclude:: code/kmeans-run.py The output of this code is:: [1, 1, 2, 1, 1, 1, 2, 1, 1, 2] Invoking a call-back function may be useful when tracing the progress of the clustering. Below is a code that uses an :obj:`inner_callback` to report on the number of instances that have changed the cluster and to report on the clustering score. For the score o be computed at each iteration we have to set :obj:`minscorechange`, but we can leave it at 0 or even set it to a negative value, which allows the score to deteriorate by some amount (:download:`kmeans-run-callback.py <code/kmeans-run-callback.py>`): .. literalinclude:: code/kmeans-run-callback.py The convergence on Iris data set is fast:: Iteration: 1, changes: 150, score: 10.9555 Iteration: 2, changes: 12, score: 10.3867 Iteration: 3, changes: 2, score: 10.2034 Iteration: 4, changes: 2, score: 10.0699 Iteration: 5, changes: 2, score: 9.9542 Iteration: 6, changes: 1, score: 9.9168 Iteration: 7, changes: 2, score: 9.8624 Iteration: 8, changes: 0, score: 9.8624 Call-back above is used for reporting of the progress, but may as well call a function that plots a selection data projection with corresponding centroid at a given step of the clustering. This is exactly what we did with the following script (:download:`kmeans-trace.py <code/kmeans-trace.py>`): .. literalinclude:: code/kmeans-trace.py Only the first four scatterplots are shown below. Colors of the data instances indicate the cluster membership. Notice that since the Iris data set includes four attributes, the closest centroid in a particular 2-dimensional projection is not necessary also the centroid of the cluster that the data point belongs to. .. image:: files/kmeans-scatter-001.png .. image:: files/kmeans-scatter-002.png .. image:: files/kmeans-scatter-003.png .. image:: files/kmeans-scatter-004.png k-Means Utility Functions ========================= .. automethod:: Orange.clustering.kmeans.init_random .. automethod:: Orange.clustering.kmeans.init_diversity .. autoclass:: Orange.clustering.kmeans.init_hclustering :members: .. automethod:: Orange.clustering.kmeans.plot_silhouette .. automethod:: Orange.clustering.kmeans.score_distance_to_centroids .. automethod:: Orange.clustering.kmeans.score_silhouette .. automethod:: Orange.clustering.kmeans.score_fast_silhouette Typically, the choice of seeds has a large impact on the k-means clustering, with better initialization methods yielding a clustering that converges faster and finds more optimal centroids. The following code compares three different initialization methods (random, diversity-based and hierarchical clustering-based) in terms of how fast they converge (:download:`kmeans-cmp-init.py <code/kmeans-cmp-init.py>`): .. literalinclude:: code/kmeans-cmp-init.py As expected, k-means converges faster with diversity and clustering-based initialization that with random seed selection:: Rnd Div HC iris 12 3 4 housing 14 6 4 vehicle 11 4 3 The following code computes the silhouette score for k=2..7 and plots a silhuette plot for k=3 (:download:`kmeans-silhouette.py <code/kmeans-silhouette.py>`): .. literalinclude:: code/kmeans-silhouette.py The analysis suggests that k=2 is preferred as it yields the maximal silhouette coefficient:: 2 0.629467553352 3 0.504318855054 4 0.407259377854 5 0.358628975081 6 0.353228492088 7 0.366357876944 .. figure:: files/kmeans-silhouette.png Silhouette plot for k=3. """ import math import sys import random from Orange import statc import Orange.clustering.hierarchical import Orange # miscellaneous functions def _modus(dist): #Check bool(dist) - False means no known cases #Check dist.cases > 0 - We cant return some value from the domain without knowing if it is even present #in the data. TOOD: What does this mean for k-means convergence? if bool(dist) and dist.cases > 0: return dist.modus() else: return None def data_center(data): """ Returns a center of the instances in the data set (average across data instances for continuous attributes, most frequent value for discrete attributes). """ atts = data.domain.attributes astats = Orange.statistics.basic.Domain(data) center = [astats[a].avg if a.varType == Orange.feature.Type.Continuous \ # else max(enumerate(orange.Distribution(a, data)), key=lambda x:x[1])[0] if a.varType == orange.VarTypes.Discrete else _modus(Orange.statistics.distribution.Distribution(a, data)) if a.varType == Orange.feature.Type.Discrete else None for a in atts] if data.domain.classVar: center.append(0) return Orange.data.Instance(data.domain, center) def minindex(x): """Return the index of the minimum element""" return x.index(min(x)) def avg(x): """Return the average (mean) of a given list""" return (float(sum(x)) / len(x)) if x else 0 # # data distances # # k-means clustering # clustering scoring functions def score_distance_to_centroids(km): """Returns an average distance of data instances to their associated centroids. :param km: a k-means clustering object. :type km: :class:`KMeans` """ return sum(km.distance(km.centroids[km.clusters[i]], d) for i,d in enumerate(km.data)) score_distance_to_centroids.minimize = True def score_conditional_entropy(km): """UNIMPLEMENTED cluster quality measured by conditional entropy""" raise NotImplemented def score_within_cluster_distance(km): """UNIMPLEMENTED weighted average within-cluster pairwise distance""" raise NotImplemented score_within_cluster_distance.minimize = True def score_between_cluster_distance(km): """Sum of distances from elements to 'nearest miss' centroids""" return sum(min(km.distance(c, d) for j,c in enumerate(km.centroids) if j!=km.clusters[i]) for i,d in enumerate(km.data)) from Orange.utils import deprecated_function_name score_betweenClusterDistance = deprecated_function_name(score_between_cluster_distance) def score_silhouette(km, index=None): """Returns an average silhouette score of data instances. :param km: a k-means clustering object. :type km: :class:`KMeans` :param index: if given, the functon returns just the silhouette score of that particular data instance. :type index: integer """ if index == None: return avg([score_silhouette(km, i) for i in range(len(km.data))]) cind = km.clusters[index] a = avg([km.distance(km.data[index], ex) for i, ex in enumerate(km.data) if km.clusters[i] == cind and i != index]) b = min([avg([km.distance(km.data[index], ex) for i, ex in enumerate(km.data) if km.clusters[i] == c]) for c in range(len(km.centroids)) if c != cind]) return float(b - a) / max(a, b) if max(a, b) > 0 else 0.0 def score_fast_silhouette(km, index=None): """Same as score_silhouette, but computes an approximation and is faster. :param km: a k-means clustering object. :type km: :class:`KMeans` """ if index == None: return avg([score_fast_silhouette(km, i) for i in range(len(km.data))]) cind = km.clusters[index] a = km.distance(km.data[index], km.centroids[km.clusters[index]]) b = min([km.distance(km.data[index], c) for i,c in enumerate(km.centroids) if i != cind]) return float(b - a) / max(a, b) if max(a, b) > 0 else 0.0 def compute_bic(km): """Compute bayesian information criteria score for given clustering. NEEDS REWRITE!!!""" data = km.data medoids = km.centroids M = len(data.domain.attributes) R = float(len(data)) Ri = [km.clusters.count(i) for i in range(km.k)] numFreePar = (len(km.data.domain.attributes) + 1.) * km.k * math.log(R, 2.) / 2. # sigma**2 s2 = 0. cidx = [i for i, attr in enumerate(data.domain.attributes) if attr.varType in [Orange.feature.Type.Continuous, Orange.feature.Type.Discrete]] for x, midx in izip(data, mapping): medoid = medoids[midx] # medoids has a dummy element at the beginning, so we don't need -1 s2 += sum( [(float(x[i]) - float(medoid[i]))**2 for i in cidx] ) s2 /= (R - K) if s2 < 1e-20: return None, [None]*K # log-lokehood of clusters: l(Dn) # log-likehood of clustering: l(D) ld = 0 bicc = [] for k in range(1, 1+K): ldn = -1. * Ri[k] * ((math.log(2. * math.pi, 2) / -2.) - (M * math.log(s2, 2) / 2.) + (K / 2.) + math.log(Ri[k], 2) - math.log(R, 2)) ld += ldn bicc.append(ldn - numFreePar) return ld - numFreePar, bicc # # silhouette plot # def plot_silhouette(km, filename='tmp.png', fast=False): """ Saves a silhuette plot to filename, showing the distributions of silhouette scores in clusters. kmeans is a k-means clustering object. If fast is True use score_fast_silhouette to compute scores instead of score_silhouette. :param km: a k-means clustering object. :type km: :class:`KMeans` :param filename: name of output plot. :type filename: string :param fast: if True use :func:`score_fast_silhouette` to compute scores instead of :func:`score_silhouette` :type fast: boolean. """ import matplotlib.pyplot as plt plt.figure() scoring = score_fast_silhouette if fast else score_silhouette scores = [[] for i in range(km.k)] for i, c in enumerate(km.clusters): scores[c].append(scoring(km, i)) csizes = map(len, scores) cpositions = [sum(csizes[:i]) + (i+1)*3 + csizes[i]/2 for i in range(km.k)] scores = reduce(lambda x,y: x + [0]*3 + sorted(y), scores, []) plt.barh(range(len(scores)), scores, linewidth=0, color='c') plt.yticks(cpositions, map(str, range(km.k))) #plt.title('Silhouette plot') plt.ylabel('Cluster') plt.xlabel('Silhouette value') plt.savefig(filename) # clustering initialization (seeds) # initialization functions should be of the type f(data, k, distfun) def init_random(data, k, _): """A function that can be used for initialization of k-means clustering returns k data instances from the data. This type of initialization is also known as Fory's initialization (Forgy, 1965; He et al., 2004). :param data: data instances. :type data: :class:`orange.ExampleTable` :param k: the number of clusters. :type k: integer """ return data.getitems(random.sample(range(len(data)), k)) def init_diversity(data, k, distfun): """A function that can be used for intialization of k-means clustering. Returns a set of centroids where the first one is a data point being the farthest away from the center of the data, and consequent centroids data points of which the minimal distance to the previous set of centroids is maximal. Differs from the initialization proposed by Katsavounidis et al. (1994) only in the selection of the first centroid (where they use a data instance with the highest norm). :param data: data instances. :type data: :class:`orange.ExampleTable` :param k: the number of clusters. :type k: integer :param distfun: a distance function. :type distfun: :class:`Orange.distance.Distance` """ center = data_center(data) # the first seed should be the farthest point from the center seeds = [max([(distfun(d, center), d) for d in data])[1]] # other seeds are added iteratively, and are data points that are farthest from the current set of seeds for i in range(1,k): seeds.append(max([(min([distfun(d, s) for s in seeds]), d) for d in data if d not in seeds])[1]) return seeds class init_hclustering(): """ A class that returns an clustering initialization function that performs hierarhical clustering, uses it to infer k clusters, and computes a list of cluster-based data centers """ def __init__(self, n=100): """ :param n: number of data instances to sample. :type n: integer """ self.n = n def __call__(self, data, k, disfun): """ :param data: data instances. :type data: :class:`orange.ExampleTable` :param k: the number of clusters. :type k: integer :param distfun: a distance function. :type distfun: :class:`Orange.distance.Distance` """ sample = Orange.data.Table(random.sample(data, min(self.n, len(data)))) root = Orange.clustering.hierarchical.clustering(sample) cmap = Orange.clustering.hierarchical.top_clusters(root, k) return [data_center(Orange.data.Table([sample[e] for e in cl])) for cl in cmap] # # k-means clustering, main implementation # class Clustering: """Implements a k-means clustering algorithm: #. Choose the number of clusters, k. #. Choose a set of k initial centroids. #. Assign each instances in the data set to the closest centroid. #. For each cluster, compute a new centroid as a center of clustered data instances. #. Repeat the previous two steps, until some convergence criterion is met (e.g., the cluster assignment has not changed). The main advantages of this algorithm are simplicity and low memory requirements. The principal disadvantage is the dependence of results on the selection of initial set of centroids. .. attribute:: k Number of clusters. .. attribute:: data Instances to cluster. .. attribute:: centroids Current set of centroids. .. attribute:: scoring Current clustering score. .. attribute:: iteration Current clustering iteration. .. attribute:: clusters A list of cluster indexes. An i-th element provides an index to a centroid associated with i-th data instance from the input data set. """ def __init__(self, data=None, centroids=3, maxiters=None, minscorechange=None, stopchanges=0, nstart=1, initialization=init_random, distance=Orange.distance.Euclidean, scoring=score_distance_to_centroids, inner_callback=None, outer_callback=None): """ :param data: Data instances to be clustered. If not None, clustering will be executed immediately after initialization unless ``initialize_only=True``. :type data: :class:`~Orange.data.Table` or None :param centroids: either specify a number of clusters or provide a list of examples that will serve as clustering centroids. :type centroids: :obj:`int` or :obj:`list` of :class:`~Orange.data.Instance` :param nstart: If greater than one, nstart runs of the clustering algorithm will be executed, returning the clustering with the best (lowest) score. :type nstart: int :param distance: an example distance constructor, which measures the distance between two instances. :type distance: :class:`~Orange.distance.DistanceConstructor` :param initialization: a function to select centroids given data instances, k and a example distance function. This module implements different approaches (:obj:`init_random`, :obj:`init_diversity`, :obj:`init_hclustering`). :param scoring: a function that takes clustering object and returns the clustering score. It could be used, for instance, in procedure that repeats the clustering nstart times, returning the clustering with the lowest score. :param inner_callback: invoked after every clustering iteration. :param outer_callback: invoked after every clustering restart (if nstart is greater than 1). Stopping criteria: :param maxiters: maximum number of clustering iterations :type maxiters: integer :param minscorechange: minimal improvement of the score from previous generation (if lower, the clustering will stop). If None, the score will not be computed between iterations :type minscorechange: float or None :param stopchanges: if the number of instances changing the cluster is lower or equal to stopchanges, stop the clustering. :type stopchanges: integer """ self.data = data self.k = centroids if type(centroids)==int else len(centroids) self.centroids = centroids if type(centroids) == Orange.data.Table else None self.maxiters = maxiters self.minscorechange = minscorechange self.stopchanges = stopchanges self.nstart = nstart self.initialization = initialization self.distance_constructor = distance self.distance = self.distance_constructor(self.data) if self.data else None self.scoring = scoring self.minimize_score = True if hasattr(scoring, 'minimize') else False self.inner_callback = inner_callback self.outer_callback = outer_callback if self.data: self.run() def __call__(self, data = None): """Runs the k-means clustering algorithm, with optional new data.""" if data: self.data = data self.distance = self.distance_constructor(self.data) self.run() def init_centroids(self): """Initialize cluster centroids""" if self.centroids and not self.nstart > 1: # centroids were specified return self.centroids = self.initialization(self.data, self.k, self.distance) def compute_centeroid(self, data): """Return a centroid of the data set.""" return data_center(data) def compute_cluster(self): """calculate membership in clusters""" return [minindex([self.distance(s, d) for s in self.centroids]) for d in self.data] def runone(self): """Runs a single clustering iteration, starting with re-computation of centroids, followed by computation of data membership (associating data instances to their nearest centroid).""" self.centroids = [self.compute_centeroid(self.data.getitems( [i for i, c in enumerate(self.clusters) if c == cl])) for cl in range(self.k)] self.clusters = self.compute_cluster() def run(self): """ Runs clustering until the convergence conditions are met. If nstart is greater than one, nstart runs of the clustering algorithm will be executed, returning the clustering with the best (lowest) score. """ self.winner = None for startindx in range(self.nstart): self.init_centroids() self.clusters = old_cluster = self.compute_cluster() if self.minscorechange != None: self.score = old_score = self.scoring(self) self.nchanges = len(self.data) self.iteration = 0 stopcondition = False if self.inner_callback: self.inner_callback(self) while not stopcondition: self.iteration += 1 self.runone() self.nchanges = sum(map(lambda x,y: x!=y, old_cluster, self.clusters)) old_cluster = self.clusters if self.minscorechange != None: self.score = self.scoring(self) scorechange = (self.score - old_score) / old_score if old_score > 0 else self.minscorechange if self.minimize_score: scorechange = -scorechange old_score = self.score stopcondition = (self.nchanges <= self.stopchanges or self.iteration == self.maxiters or (self.minscorechange != None and scorechange <= self.minscorechange)) if self.inner_callback: self.inner_callback(self) if self.scoring and self.minscorechange == None: self.score = self.scoring(self) if self.nstart > 1: if not self.winner or (self.score < self.winner[0] if self.minimize_score else self.score > self.winner[0]): self.winner = (self.score, self.clusters, self.centroids) if self.outer_callback: self.outer_callback(self) if self.nstart > 1: self.score, self.clusters, self.centroids = self.winner
unknown
codeparrot/codeparrot-clean
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Debug middleware""" from __future__ import print_function import sys import six import webob.dec from neutron.openstack.common.middleware import base class Debug(base.Middleware): """Helper class that returns debug information. Can be inserted into any WSGI application chain to get information about the request and response. """ @webob.dec.wsgify def __call__(self, req): print(("*" * 40) + " REQUEST ENVIRON") for key, value in req.environ.items(): print(key, "=", value) print() resp = req.get_response(self.application) print(("*" * 40) + " RESPONSE HEADERS") for (key, value) in six.iteritems(resp.headers): print(key, "=", value) print() resp.app_iter = self.print_generator(resp.app_iter) return resp @staticmethod def print_generator(app_iter): """Prints the contents of a wrapper string iterator when iterated.""" print(("*" * 40) + " BODY") for part in app_iter: sys.stdout.write(part) sys.stdout.flush() yield part print()
unknown
codeparrot/codeparrot-clean
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Bundle\WebProfilerBundle\Controller; use Symfony\Component\ExpressionLanguage\ExpressionFunctionProviderInterface; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\Response; use Symfony\Component\HttpKernel\DataCollector\RequestDataCollector; use Symfony\Component\HttpKernel\Exception\NotFoundHttpException; use Symfony\Component\HttpKernel\Profiler\Profiler; use Symfony\Component\Routing\Matcher\TraceableUrlMatcher; use Symfony\Component\Routing\Matcher\UrlMatcherInterface; use Symfony\Component\Routing\RouteCollection; use Symfony\Component\Routing\RouterInterface; use Twig\Environment; /** * @author Fabien Potencier <fabien@symfony.com> * * @internal */ class RouterController { /** * @param ExpressionFunctionProviderInterface[] $expressionLanguageProviders */ public function __construct( private ?Profiler $profiler, private Environment $twig, private ?UrlMatcherInterface $matcher = null, private ?RouteCollection $routes = null, private iterable $expressionLanguageProviders = [], ) { if ($this->matcher instanceof RouterInterface) { $this->routes ??= $this->matcher->getRouteCollection(); } } /** * Renders the profiler panel for the given token. * * @throws NotFoundHttpException */ public function panelAction(string $token): Response { if (null === $this->profiler) { throw new NotFoundHttpException('The profiler must be enabled.'); } $this->profiler->disable(); if (null === $this->matcher || null === $this->routes) { return new Response('The Router is not enabled.', 200, ['Content-Type' => 'text/html']); } $profile = $this->profiler->loadProfile($token); /** @var RequestDataCollector $request */ $request = $profile->getCollector('request'); return new Response($this->twig->render('@WebProfiler/Router/panel.html.twig', [ 'request' => $request, 'router' => $profile->getCollector('router'), 'traces' => $this->getTraces($request, $profile->getMethod()), ]), 200, ['Content-Type' => 'text/html']); } /** * Returns the routing traces associated to the given request. */ private function getTraces(RequestDataCollector $request, string $method): array { $traceRequest = new Request( $request->getRequestQuery()->all(), $request->getRequestRequest()->all(), $request->getRequestAttributes()->all(), $request->getRequestCookies(true)->all(), [], $request->getRequestServer(true)->all() ); $context = $this->matcher->getContext(); $context->setMethod($method); $matcher = new TraceableUrlMatcher($this->routes, $context); foreach ($this->expressionLanguageProviders as $provider) { $matcher->addExpressionLanguageProvider($provider); } return $matcher->getTracesForRequest($traceRequest); } }
php
github
https://github.com/symfony/symfony
src/Symfony/Bundle/WebProfilerBundle/Controller/RouterController.php
from __future__ import annotations import json import random import re import string from ipaddress import IPv4Address from pathlib import Path from typing import TYPE_CHECKING, Any, cast from unittest import mock from urllib.parse import urlencode import pytest from pytest_twisted import async_yield_fixture from twisted.internet.defer import Deferred, DeferredList, inlineCallbacks from twisted.internet.endpoints import SSL4ClientEndpoint, SSL4ServerEndpoint from twisted.internet.ssl import Certificate, PrivateCertificate, optionsForClientTLS from twisted.web.client import URI, ResponseFailed from twisted.web.http import H2_ENABLED from twisted.web.http import Request as TxRequest from twisted.web.server import NOT_DONE_YET, Site from twisted.web.static import File from scrapy.exceptions import DownloadCancelledError, DownloadTimeoutError from scrapy.http import JsonRequest, Request, Response from scrapy.settings import Settings from scrapy.spiders import Spider from scrapy.utils.defer import ( deferred_f_from_coro_f, deferred_from_coro, maybe_deferred_to_future, ) from tests.mockserver.http_resources import LeafResource, Status from tests.mockserver.utils import ssl_context_factory if TYPE_CHECKING: from collections.abc import AsyncGenerator, Callable, Coroutine, Generator from scrapy.core.http2.protocol import H2ClientProtocol pytestmark = [ pytest.mark.requires_reactor, pytest.mark.skipif( not H2_ENABLED, reason="HTTP/2 support in Twisted is not enabled" ), ] def generate_random_string(size: int) -> str: return "".join(random.choices(string.ascii_uppercase + string.digits, k=size)) def make_html_body(val: str) -> bytes: response = f"""<html> <h1>Hello from HTTP2<h1> <p>{val}</p> </html>""" return bytes(response, "utf-8") class DummySpider(Spider): name = "dummy" start_urls: list = [] def parse(self, response): print(response) class Data: SMALL_SIZE = 1024 # 1 KB LARGE_SIZE = 1024**2 # 1 MB STR_SMALL = generate_random_string(SMALL_SIZE) STR_LARGE = generate_random_string(LARGE_SIZE) EXTRA_SMALL = generate_random_string(1024 * 15) EXTRA_LARGE = generate_random_string((1024**2) * 15) HTML_SMALL = make_html_body(STR_SMALL) HTML_LARGE = make_html_body(STR_LARGE) JSON_SMALL = {"data": STR_SMALL} JSON_LARGE = {"data": STR_LARGE} DATALOSS = b"Dataloss Content" NO_CONTENT_LENGTH = b"This response do not have any content-length header" class GetDataHtmlSmall(LeafResource): def render_GET(self, request: TxRequest): request.setHeader("Content-Type", "text/html; charset=UTF-8") return Data.HTML_SMALL class GetDataHtmlLarge(LeafResource): def render_GET(self, request: TxRequest): request.setHeader("Content-Type", "text/html; charset=UTF-8") return Data.HTML_LARGE class PostDataJsonMixin: @staticmethod def make_response(request: TxRequest, extra_data: str) -> bytes: assert request.content is not None response = { "request-headers": {}, "request-body": json.loads(request.content.read()), "extra-data": extra_data, } for k, v in request.requestHeaders.getAllRawHeaders(): response["request-headers"][str(k, "utf-8")] = str(v[0], "utf-8") response_bytes = bytes(json.dumps(response), "utf-8") request.setHeader("Content-Type", "application/json; charset=UTF-8") request.setHeader("Content-Encoding", "UTF-8") return response_bytes class PostDataJsonSmall(LeafResource, PostDataJsonMixin): def render_POST(self, request: TxRequest): return self.make_response(request, Data.EXTRA_SMALL) class PostDataJsonLarge(LeafResource, PostDataJsonMixin): def render_POST(self, request: TxRequest): return self.make_response(request, Data.EXTRA_LARGE) class Dataloss(LeafResource): def render_GET(self, request: TxRequest): request.setHeader(b"Content-Length", b"1024") self.deferRequest(request, 0, self._delayed_render, request) return NOT_DONE_YET @staticmethod def _delayed_render(request: TxRequest): request.write(Data.DATALOSS) request.finish() class NoContentLengthHeader(LeafResource): def render_GET(self, request: TxRequest): request.requestHeaders.removeHeader("Content-Length") self.deferRequest(request, 0, self._delayed_render, request) return NOT_DONE_YET @staticmethod def _delayed_render(request: TxRequest): request.write(Data.NO_CONTENT_LENGTH) request.finish() class TimeoutResponse(LeafResource): def render_GET(self, request: TxRequest): return NOT_DONE_YET class QueryParams(LeafResource): def render_GET(self, request: TxRequest): request.setHeader("Content-Type", "application/json; charset=UTF-8") request.setHeader("Content-Encoding", "UTF-8") query_params: dict[str, str] = {} assert request.args is not None for k, v in request.args.items(): query_params[str(k, "utf-8")] = str(v[0], "utf-8") return bytes(json.dumps(query_params), "utf-8") class RequestHeaders(LeafResource): """Sends all the headers received as a response""" def render_GET(self, request: TxRequest): request.setHeader("Content-Type", "application/json; charset=UTF-8") request.setHeader("Content-Encoding", "UTF-8") headers = {} for k, v in request.requestHeaders.getAllRawHeaders(): headers[str(k, "utf-8")] = str(v[0], "utf-8") return bytes(json.dumps(headers), "utf-8") def make_request_dfd(client: H2ClientProtocol, request: Request) -> Deferred[Response]: return client.request(request, DummySpider()) async def make_request(client: H2ClientProtocol, request: Request) -> Response: return await maybe_deferred_to_future(make_request_dfd(client, request)) class TestHttps2ClientProtocol: scheme = "https" host = "localhost" key_file = Path(__file__).parent / "keys" / "localhost.key" certificate_file = Path(__file__).parent / "keys" / "localhost.crt" @pytest.fixture def site(self, tmp_path): r = File(str(tmp_path)) r.putChild(b"get-data-html-small", GetDataHtmlSmall()) r.putChild(b"get-data-html-large", GetDataHtmlLarge()) r.putChild(b"post-data-json-small", PostDataJsonSmall()) r.putChild(b"post-data-json-large", PostDataJsonLarge()) r.putChild(b"dataloss", Dataloss()) r.putChild(b"no-content-length-header", NoContentLengthHeader()) r.putChild(b"status", Status()) r.putChild(b"query-params", QueryParams()) r.putChild(b"timeout", TimeoutResponse()) r.putChild(b"request-headers", RequestHeaders()) return Site(r, timeout=None) @async_yield_fixture async def server_port(self, site: Site) -> AsyncGenerator[int]: from twisted.internet import reactor context_factory = ssl_context_factory( str(self.key_file), str(self.certificate_file) ) server_endpoint = SSL4ServerEndpoint( reactor, 0, context_factory, interface=self.host ) server = await server_endpoint.listen(site) yield server.getHost().port await server.stopListening() @pytest.fixture def client_certificate(self) -> PrivateCertificate: pem = self.key_file.read_text( encoding="utf-8" ) + self.certificate_file.read_text(encoding="utf-8") return PrivateCertificate.loadPEM(pem) @async_yield_fixture async def client( self, server_port: int, client_certificate: PrivateCertificate ) -> AsyncGenerator[H2ClientProtocol]: from twisted.internet import reactor from scrapy.core.http2.protocol import H2ClientFactory # noqa: PLC0415 client_options = optionsForClientTLS( hostname=self.host, trustRoot=client_certificate, acceptableProtocols=[b"h2"], ) uri = URI.fromBytes(bytes(self.get_url(server_port, "/"), "utf-8")) h2_client_factory = H2ClientFactory(uri, Settings(), Deferred()) client_endpoint = SSL4ClientEndpoint( reactor, self.host, server_port, client_options ) client = await client_endpoint.connect(h2_client_factory) yield client if client.connected: client.transport.loseConnection() client.transport.abortConnection() def get_url(self, portno: int, path: str) -> str: """ :param path: Should have / at the starting compulsorily if not empty :return: Complete url """ assert len(path) > 0 assert path[0] == "/" or path[0] == "&" return f"{self.scheme}://{self.host}:{portno}{path}" @staticmethod async def _check_repeat( get_coro: Callable[[], Coroutine[Any, Any, None]], count: int ) -> None: d_list = [] for _ in range(count): d = deferred_from_coro(get_coro()) d_list.append(d) await maybe_deferred_to_future(DeferredList(d_list, fireOnOneErrback=True)) async def _check_GET( self, client: H2ClientProtocol, request: Request, expected_body: bytes, expected_status: int, ) -> None: response = await make_request(client, request) assert response.status == expected_status assert response.body == expected_body content_length_header = response.headers.get("Content-Length") assert content_length_header is not None content_length = int(content_length_header) assert len(response.body) == content_length @deferred_f_from_coro_f async def test_GET_small_body( self, server_port: int, client: H2ClientProtocol ) -> None: request = Request(self.get_url(server_port, "/get-data-html-small")) await self._check_GET(client, request, Data.HTML_SMALL, 200) @deferred_f_from_coro_f async def test_GET_large_body( self, server_port: int, client: H2ClientProtocol ) -> None: request = Request(self.get_url(server_port, "/get-data-html-large")) await self._check_GET(client, request, Data.HTML_LARGE, 200) async def _check_GET_x10( self, client: H2ClientProtocol, request: Request, expected_body: bytes, expected_status: int, ) -> None: async def get_coro() -> None: await self._check_GET(client, request, expected_body, expected_status) await self._check_repeat(get_coro, 10) @deferred_f_from_coro_f async def test_GET_small_body_x10( self, server_port: int, client: H2ClientProtocol ) -> None: await self._check_GET_x10( client, Request(self.get_url(server_port, "/get-data-html-small")), Data.HTML_SMALL, 200, ) @deferred_f_from_coro_f async def test_GET_large_body_x10( self, server_port: int, client: H2ClientProtocol ) -> None: await self._check_GET_x10( client, Request(self.get_url(server_port, "/get-data-html-large")), Data.HTML_LARGE, 200, ) @staticmethod async def _check_POST_json( client: H2ClientProtocol, request: Request, expected_request_body: dict[str, str], expected_extra_data: str, expected_status: int, ) -> None: response = await make_request(client, request) assert response.status == expected_status content_length_header = response.headers.get("Content-Length") assert content_length_header is not None content_length = int(content_length_header) assert len(response.body) == content_length # Parse the body content_encoding_header = response.headers[b"Content-Encoding"] assert content_encoding_header is not None content_encoding = str(content_encoding_header, "utf-8") body = json.loads(str(response.body, content_encoding)) assert "request-body" in body assert "extra-data" in body assert "request-headers" in body request_body = body["request-body"] assert request_body == expected_request_body extra_data = body["extra-data"] assert extra_data == expected_extra_data # Check if headers were sent successfully request_headers = body["request-headers"] for k, v in request.headers.items(): k_str = str(k, "utf-8") assert k_str in request_headers assert request_headers[k_str] == str(v[0], "utf-8") @deferred_f_from_coro_f async def test_POST_small_json( self, server_port: int, client: H2ClientProtocol ) -> None: request = JsonRequest( url=self.get_url(server_port, "/post-data-json-small"), method="POST", data=Data.JSON_SMALL, ) await self._check_POST_json( client, request, Data.JSON_SMALL, Data.EXTRA_SMALL, 200 ) @deferred_f_from_coro_f async def test_POST_large_json( self, server_port: int, client: H2ClientProtocol ) -> None: request = JsonRequest( url=self.get_url(server_port, "/post-data-json-large"), method="POST", data=Data.JSON_LARGE, ) await self._check_POST_json( client, request, Data.JSON_LARGE, Data.EXTRA_LARGE, 200 ) async def _check_POST_json_x10(self, *args, **kwargs): async def get_coro() -> None: await self._check_POST_json(*args, **kwargs) await self._check_repeat(get_coro, 10) @deferred_f_from_coro_f async def test_POST_small_json_x10( self, server_port: int, client: H2ClientProtocol ) -> None: request = JsonRequest( url=self.get_url(server_port, "/post-data-json-small"), method="POST", data=Data.JSON_SMALL, ) await self._check_POST_json_x10( client, request, Data.JSON_SMALL, Data.EXTRA_SMALL, 200 ) @deferred_f_from_coro_f async def test_POST_large_json_x10( self, server_port: int, client: H2ClientProtocol ) -> None: request = JsonRequest( url=self.get_url(server_port, "/post-data-json-large"), method="POST", data=Data.JSON_LARGE, ) await self._check_POST_json_x10( client, request, Data.JSON_LARGE, Data.EXTRA_LARGE, 200 ) @inlineCallbacks def test_invalid_negotiated_protocol( self, server_port: int, client: H2ClientProtocol ) -> Generator[Deferred[Any], Any, None]: with mock.patch( "scrapy.core.http2.protocol.PROTOCOL_NAME", return_value=b"not-h2" ): request = Request(url=self.get_url(server_port, "/status?n=200")) with pytest.raises(ResponseFailed): yield make_request_dfd(client, request) @inlineCallbacks def test_cancel_request( self, server_port: int, client: H2ClientProtocol ) -> Generator[Deferred[Any], Any, None]: request = Request(url=self.get_url(server_port, "/get-data-html-large")) d = make_request_dfd(client, request) d.cancel() response = cast("Response", (yield d)) assert response.status == 499 @deferred_f_from_coro_f async def test_download_maxsize_exceeded( self, caplog: pytest.LogCaptureFixture, server_port: int, client: H2ClientProtocol, ) -> None: request = Request( url=self.get_url(server_port, "/get-data-html-large"), meta={"download_maxsize": 1000}, ) with pytest.raises( DownloadCancelledError, match=r"Expected to receive \d+ bytes which is larger than download max size \(1000\)", ): await make_request(client, request) @inlineCallbacks def test_received_dataloss_response( self, server_port: int, client: H2ClientProtocol ) -> Generator[Deferred[Any], Any, None]: """In case when value of Header Content-Length != len(Received Data) ProtocolError is raised""" from h2.exceptions import InvalidBodyLengthError # noqa: PLC0415 request = Request(url=self.get_url(server_port, "/dataloss")) with pytest.raises(ResponseFailed) as exc_info: yield make_request_dfd(client, request) assert len(exc_info.value.reasons) > 0 assert any( isinstance(error, InvalidBodyLengthError) for error in exc_info.value.reasons ) @deferred_f_from_coro_f async def test_missing_content_length_header( self, server_port: int, client: H2ClientProtocol ) -> None: request = Request(url=self.get_url(server_port, "/no-content-length-header")) response = await make_request(client, request) assert response.status == 200 assert response.body == Data.NO_CONTENT_LENGTH assert "Content-Length" not in response.headers async def _check_log_warnsize( self, client: H2ClientProtocol, request: Request, warn_pattern: re.Pattern[str], expected_body: bytes, caplog: pytest.LogCaptureFixture, ) -> None: with caplog.at_level("WARNING", "scrapy.core.http2.stream"): response = await make_request(client, request) assert response.status == 200 assert response.body == expected_body # Check the warning is raised only once for this request assert len(re.findall(warn_pattern, caplog.text)) == 1 @deferred_f_from_coro_f async def test_log_expected_warnsize( self, server_port: int, client: H2ClientProtocol, caplog: pytest.LogCaptureFixture, ) -> None: request = Request( url=self.get_url(server_port, "/get-data-html-large"), meta={"download_warnsize": 1000}, ) warn_pattern = re.compile( rf"Expected to receive \d+ bytes which is larger than " rf"download warn size \(1000\) in request {request}" ) await self._check_log_warnsize( client, request, warn_pattern, Data.HTML_LARGE, caplog ) @deferred_f_from_coro_f async def test_log_received_warnsize( self, server_port: int, client: H2ClientProtocol, caplog: pytest.LogCaptureFixture, ) -> None: request = Request( url=self.get_url(server_port, "/no-content-length-header"), meta={"download_warnsize": 10}, ) warn_pattern = re.compile( rf"Received \d+ bytes which is larger than " rf"download warn size \(10\) in request {request}" ) await self._check_log_warnsize( client, request, warn_pattern, Data.NO_CONTENT_LENGTH, caplog ) @deferred_f_from_coro_f async def test_max_concurrent_streams( self, server_port: int, client: H2ClientProtocol ) -> None: """Send 500 requests at one to check if we can handle very large number of request. """ async def get_coro() -> None: await self._check_GET( client, Request(self.get_url(server_port, "/get-data-html-small")), Data.HTML_SMALL, 200, ) await self._check_repeat(get_coro, 500) @inlineCallbacks def test_inactive_stream( self, server_port: int, client: H2ClientProtocol ) -> Generator[Deferred[Any], Any, None]: """Here we send 110 requests considering the MAX_CONCURRENT_STREAMS by default is 100. After sending the first 100 requests we close the connection.""" d_list = [] def assert_inactive_stream(failure): assert failure.check(ResponseFailed) is not None from scrapy.core.http2.stream import InactiveStreamClosed # noqa: PLC0415 assert any( isinstance(e, InactiveStreamClosed) for e in failure.value.reasons ) # Send 100 request (we do not check the result) for _ in range(100): d = make_request_dfd( client, Request(self.get_url(server_port, "/get-data-html-small")) ) d.addBoth(lambda _: None) d_list.append(d) # Now send 10 extra request and save the response deferred in a list for _ in range(10): d = make_request_dfd( client, Request(self.get_url(server_port, "/get-data-html-small")) ) d.addCallback(lambda _: pytest.fail("This request should have failed")) d.addErrback(assert_inactive_stream) d_list.append(d) # Close the connection now to fire all the extra 10 requests errback # with InactiveStreamClosed assert client.transport client.transport.loseConnection() yield DeferredList(d_list, consumeErrors=True, fireOnOneErrback=True) @deferred_f_from_coro_f async def test_invalid_request_type(self, client: H2ClientProtocol): with pytest.raises(TypeError): await make_request(client, "https://InvalidDataTypePassed.com") # type: ignore[arg-type] @deferred_f_from_coro_f async def test_query_parameters( self, server_port: int, client: H2ClientProtocol ) -> None: params = { "a": generate_random_string(20), "b": generate_random_string(20), "c": generate_random_string(20), "d": generate_random_string(20), } request = Request( self.get_url(server_port, f"/query-params?{urlencode(params)}") ) response = await make_request(client, request) content_encoding_header = response.headers[b"Content-Encoding"] assert content_encoding_header is not None content_encoding = str(content_encoding_header, "utf-8") data = json.loads(str(response.body, content_encoding)) assert data == params @deferred_f_from_coro_f async def test_status_codes( self, server_port: int, client: H2ClientProtocol ) -> None: for status in [200, 404]: request = Request(self.get_url(server_port, f"/status?n={status}")) response = await make_request(client, request) assert response.status == status @deferred_f_from_coro_f async def test_response_has_correct_certificate_ip_address( self, server_port: int, client: H2ClientProtocol, client_certificate: PrivateCertificate, ) -> None: request = Request(self.get_url(server_port, "/status?n=200")) response = await make_request(client, request) assert isinstance(response.certificate, Certificate) assert response.certificate.original is not None assert response.certificate.getIssuer() == client_certificate.getIssuer() assert response.certificate.getPublicKey().matches( client_certificate.getPublicKey() ) assert isinstance(response.ip_address, IPv4Address) assert str(response.ip_address) == "127.0.0.1" @staticmethod async def _check_invalid_netloc(client: H2ClientProtocol, url: str) -> None: from scrapy.core.http2.stream import InvalidHostname # noqa: PLC0415 request = Request(url) with pytest.raises(InvalidHostname) as exc_info: await make_request(client, request) error_msg = str(exc_info.value) assert "localhost" in error_msg assert "127.0.0.1" in error_msg assert str(request) in error_msg @deferred_f_from_coro_f async def test_invalid_hostname(self, client: H2ClientProtocol) -> None: await self._check_invalid_netloc( client, "https://notlocalhost.notlocalhostdomain" ) @deferred_f_from_coro_f async def test_invalid_host_port( self, server_port: int, client: H2ClientProtocol ) -> None: port = server_port + 1 await self._check_invalid_netloc(client, f"https://127.0.0.1:{port}") @deferred_f_from_coro_f async def test_connection_stays_with_invalid_requests( self, server_port: int, client: H2ClientProtocol ): await maybe_deferred_to_future(self.test_invalid_hostname(client)) await maybe_deferred_to_future(self.test_invalid_host_port(server_port, client)) await maybe_deferred_to_future(self.test_GET_small_body(server_port, client)) await maybe_deferred_to_future(self.test_POST_small_json(server_port, client)) @inlineCallbacks def test_connection_timeout( self, server_port: int, client: H2ClientProtocol ) -> Generator[Deferred[Any], Any, None]: request = Request(self.get_url(server_port, "/timeout")) # Update the timer to 1s to test connection timeout client.setTimeout(1) with pytest.raises(ResponseFailed) as exc_info: yield make_request_dfd(client, request) for err in exc_info.value.reasons: from scrapy.core.http2.protocol import H2ClientProtocol # noqa: PLC0415 if isinstance(err, DownloadTimeoutError): assert ( f"Connection was IDLE for more than {H2ClientProtocol.IDLE_TIMEOUT}s" in str(err) ) break else: pytest.fail("No TimeoutError raised.") @deferred_f_from_coro_f async def test_request_headers_received( self, server_port: int, client: H2ClientProtocol ) -> None: request = Request( self.get_url(server_port, "/request-headers"), headers={"header-1": "header value 1", "header-2": "header value 2"}, ) response = await make_request(client, request) assert response.status == 200 response_headers = json.loads(str(response.body, "utf-8")) assert isinstance(response_headers, dict) for k, v in request.headers.items(): k_decoded, v_decoded = str(k, "utf-8"), str(v[0], "utf-8") assert k_decoded in response_headers assert v_decoded == response_headers[k_decoded]
python
github
https://github.com/scrapy/scrapy
tests/test_http2_client_protocol.py
#!/bin/sh test_description='detect unwritable repository and fail correctly' . ./test-lib.sh test_expect_success setup ' >file && git add file && test_tick && git commit -m initial && echo >file && git add file ' test_expect_success POSIXPERM,SANITY 'write-tree should notice unwritable repository' ' test_when_finished "chmod 775 .git/objects .git/objects/??" && chmod a-w .git/objects .git/objects/?? && test_must_fail git write-tree 2>out.write-tree ' test_lazy_prereq WRITE_TREE_OUT 'test -e "$TRASH_DIRECTORY"/out.write-tree' test_expect_success WRITE_TREE_OUT 'write-tree output on unwritable repository' ' cat >expect <<-\EOF && error: insufficient permission for adding an object to repository database .git/objects fatal: git-write-tree: error building trees EOF test_cmp expect out.write-tree ' test_expect_success POSIXPERM,SANITY 'commit should notice unwritable repository' ' test_when_finished "chmod 775 .git/objects .git/objects/??" && chmod a-w .git/objects .git/objects/?? && test_must_fail git commit -m second 2>out.commit ' test_lazy_prereq COMMIT_OUT 'test -e "$TRASH_DIRECTORY"/out.commit' test_expect_success COMMIT_OUT 'commit output on unwritable repository' ' cat >expect <<-\EOF && error: insufficient permission for adding an object to repository database .git/objects error: Error building trees EOF test_cmp expect out.commit ' test_expect_success POSIXPERM,SANITY 'update-index should notice unwritable repository' ' test_when_finished "chmod 775 .git/objects .git/objects/??" && echo 6O >file && chmod a-w .git/objects .git/objects/?? && test_must_fail git update-index file 2>out.update-index ' test_lazy_prereq UPDATE_INDEX_OUT 'test -e "$TRASH_DIRECTORY"/out.update-index' test_expect_success UPDATE_INDEX_OUT 'update-index output on unwritable repository' ' cat >expect <<-\EOF && error: insufficient permission for adding an object to repository database .git/objects error: file: failed to insert into database fatal: Unable to process path file EOF test_cmp expect out.update-index ' test_expect_success POSIXPERM,SANITY 'add should notice unwritable repository' ' test_when_finished "chmod 775 .git/objects .git/objects/??" && echo b >file && chmod a-w .git/objects .git/objects/?? && test_must_fail git add file 2>out.add ' test_lazy_prereq ADD_OUT 'test -e "$TRASH_DIRECTORY"/out.add' test_expect_success ADD_OUT 'add output on unwritable repository' ' cat >expect <<-\EOF && error: insufficient permission for adding an object to repository database .git/objects error: file: failed to insert into database error: unable to index file '\''file'\'' fatal: updating files failed EOF test_cmp expect out.add ' test_done
unknown
github
https://github.com/git/git
t/t0004-unwritable.sh
# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. # # This is used for generating API documentation and the types used by the # client/server. See api/README.md for more information. # # Some style notes: # - This file is used by ReDoc, which allows GitHub Flavored Markdown in # descriptions. # - There is no maximum line length, for ease of editing and pretty diffs. # - operationIds are in the format "NounVerb", with a singular noun. swagger: "2.0" schemes: - "http" - "https" produces: - "application/json" - "text/plain" consumes: - "application/json" - "text/plain" basePath: "/v1.48" info: title: "Docker Engine API" version: "1.48" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { "message": "page not found" } ``` # Versioning The API is usually changed in each release, so API calls are versioned to ensure that clients don't break. To lock to a specific version of the API, you prefix the URL with its version, for example, call `/v1.30/info` to use the v1.30 version of the `/info` endpoint. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. If you omit the version-prefix, the current version of the API (v1.48) is used. For example, calling `/info` is the same as calling `/v1.48/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. The API uses an open schema model, which means the server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer daemons. # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) (JSON) string with the following structure: ``` { "username": "string", "password": "string", "serveraddress": "string" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { "identitytoken": "9cbaf023786cd7..." } ``` # The tags on paths define the menu sections in the ReDoc documentation, so # the usage of tags must make sense for that: # - They should be singular, not plural. # - There should not be too many tags, or the menu becomes unwieldy. For # example, it is preferable to add a path to the "System" tag instead of # creating a tag with a single path in it. # - The order of tags in this list defines the order in the menu. tags: # Primary objects - name: "Container" x-displayName: "Containers" description: | Create and manage containers. - name: "Image" x-displayName: "Images" - name: "Network" x-displayName: "Networks" description: | Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/network/) for more information. - name: "Volume" x-displayName: "Volumes" description: | Create and manage persistent storage that can be attached to containers. - name: "Exec" x-displayName: "Exec" description: | Run new commands inside running containers. Refer to the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. # Swarm things - name: "Swarm" x-displayName: "Swarm" description: | Engines can be clustered together in a swarm. Refer to the [swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. - name: "Node" x-displayName: "Nodes" description: | Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Service" x-displayName: "Services" description: | Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. - name: "Task" x-displayName: "Tasks" description: | A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. - name: "Secret" x-displayName: "Secrets" description: | Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. - name: "Config" x-displayName: "Configs" description: | Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" - name: "System" x-displayName: "System" definitions: ImageHistoryResponseItem: type: "object" x-go-name: HistoryResponseItem title: "HistoryResponseItem" description: "individual image layer information in response to ImageHistory operation" required: [Id, Created, CreatedBy, Tags, Size, Comment] properties: Id: type: "string" x-nullable: false Created: type: "integer" format: "int64" x-nullable: false CreatedBy: type: "string" x-nullable: false Tags: type: "array" items: type: "string" Size: type: "integer" format: "int64" x-nullable: false Comment: type: "string" x-nullable: false Port: type: "object" description: "An open port on a container" required: [PrivatePort, Type] properties: IP: type: "string" format: "ip-address" description: "Host IP address that the container's port is mapped to" PrivatePort: type: "integer" format: "uint16" x-nullable: false description: "Port on the container" PublicPort: type: "integer" format: "uint16" description: "Port exposed on the host" Type: type: "string" x-nullable: false enum: ["tcp", "udp", "sctp"] example: PrivatePort: 8080 PublicPort: 80 Type: "tcp" MountType: description: |- The mount type. Available types: - `bind` a mount of a file or directory from the host into the container. - `cluster` a Swarm cluster volume. - `image` an OCI image. - `npipe` a named pipe from the host into the container. - `tmpfs` a `tmpfs`. - `volume` a docker volume with the given `Name`. type: "string" enum: - "bind" - "cluster" - "image" - "npipe" - "tmpfs" - "volume" example: "volume" MountPoint: type: "object" description: | MountPoint represents a mount point configuration inside the container. This is used for reporting the mountpoints in use by a container. properties: Type: description: | The mount type: - `bind` a mount of a file or directory from the host into the container. - `cluster` a Swarm cluster volume. - `image` an OCI image. - `npipe` a named pipe from the host into the container. - `tmpfs` a `tmpfs`. - `volume` a docker volume with the given `Name`. allOf: - $ref: "#/definitions/MountType" example: "volume" Name: description: | Name is the name reference to the underlying data defined by `Source` e.g., the volume name. type: "string" example: "myvolume" Source: description: | Source location of the mount. For volumes, this contains the storage location of the volume (within `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains the source (host) part of the bind-mount. For `tmpfs` mount points, this field is empty. type: "string" example: "/var/lib/docker/volumes/myvolume/_data" Destination: description: | Destination is the path relative to the container root (`/`) where the `Source` is mounted inside the container. type: "string" example: "/usr/share/nginx/html/" Driver: description: | Driver is the volume driver used to create the volume (if it is a volume). type: "string" example: "local" Mode: description: | Mode is a comma separated list of options supplied by the user when creating the bind/volume mount. The default is platform-specific (`"z"` on Linux, empty on Windows). type: "string" example: "z" RW: description: | Whether the mount is mounted writable (read-write). type: "boolean" example: true Propagation: description: | Propagation describes how mounts are propagated from the host into the mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) for details. This field is not used on Windows. type: "string" example: "" DeviceMapping: type: "object" description: "A device mapping between the host and container" properties: PathOnHost: type: "string" PathInContainer: type: "string" CgroupPermissions: type: "string" example: PathOnHost: "/dev/deviceName" PathInContainer: "/dev/deviceName" CgroupPermissions: "mrw" DeviceRequest: type: "object" description: "A request for devices to be sent to device drivers" properties: Driver: type: "string" example: "nvidia" Count: type: "integer" example: -1 DeviceIDs: type: "array" items: type: "string" example: - "0" - "1" - "GPU-fef8089b-4820-abfc-e83e-94318197576e" Capabilities: description: | A list of capabilities; an OR list of AND lists of capabilities. type: "array" items: type: "array" items: type: "string" example: # gpu AND nvidia AND compute - ["gpu", "nvidia", "compute"] Options: description: | Driver-specific options, specified as a key/value pairs. These options are passed directly to the driver. type: "object" additionalProperties: type: "string" ThrottleDevice: type: "object" properties: Path: description: "Device path" type: "string" Rate: description: "Rate" type: "integer" format: "int64" minimum: 0 Mount: type: "object" properties: Target: description: "Container path." type: "string" Source: description: |- Mount source (e.g. a volume name, a host path). The source cannot be specified when using `Type=tmpfs`. For `Type=bind`, the source path must either exist, or the `CreateMountpoint` must be set to `true` to create the source path on the host if missing. For `Type=npipe`, the pipe must exist prior to creating the container. type: "string" Type: description: | The mount type. Available types: - `bind` Mounts a file or directory from the host into the container. The `Source` must exist prior to creating the container. - `cluster` a Swarm cluster volume - `image` Mounts an image. - `npipe` Mounts a named pipe from the host into the container. The `Source` must exist prior to creating the container. - `tmpfs` Create a tmpfs with the given options. The mount `Source` cannot be specified for tmpfs. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. allOf: - $ref: "#/definitions/MountType" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" Consistency: description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." type: "string" BindOptions: description: "Optional configuration for the `bind` type." type: "object" properties: Propagation: description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." type: "string" enum: - "private" - "rprivate" - "shared" - "rshared" - "slave" - "rslave" NonRecursive: description: "Disable recursive bind mount." type: "boolean" default: false CreateMountpoint: description: "Create mount point on host if missing" type: "boolean" default: false ReadOnlyNonRecursive: description: | Make the mount non-recursively read-only, but still leave the mount recursive (unless NonRecursive is set to `true` in conjunction). Added in v1.44, before that version all read-only mounts were non-recursive by default. To match the previous behaviour this will default to `true` for clients on versions prior to v1.44. type: "boolean" default: false ReadOnlyForceRecursive: description: "Raise an error if the mount cannot be made recursively read-only." type: "boolean" default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" properties: NoCopy: description: "Populate volume with data from the target." type: "boolean" default: false Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" DriverConfig: description: "Map of driver specific options" type: "object" properties: Name: description: "Name of the driver to use to create the volume." type: "string" Options: description: "key/value map of driver specific options." type: "object" additionalProperties: type: "string" Subpath: description: "Source path inside the volume. Must be relative without any back traversals." type: "string" example: "dir-inside-volume/subdirectory" ImageOptions: description: "Optional configuration for the `image` type." type: "object" properties: Subpath: description: "Source path inside the image. Must be relative without any back traversals." type: "string" example: "dir-inside-image/subdirectory" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" properties: SizeBytes: description: "The size for the tmpfs mount in bytes." type: "integer" format: "int64" Mode: description: | The permission mode for the tmpfs mount in an integer. The value must not be in octal format (e.g. 755) but rather the decimal representation of the octal value (e.g. 493). type: "integer" Options: description: | The options to be passed to the tmpfs mount. An array of arrays. Flag options should be provided as 1-length arrays. Other types should be provided as as 2-length arrays, where the first item is the key and the second the value. type: "array" items: type: "array" minItems: 1 maxItems: 2 items: type: "string" example: [["noexec"]] RestartPolicy: description: | The behavior to apply when the container exits. The default is not to restart. An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. type: "object" properties: Name: type: "string" description: | - Empty string means not to restart - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" - "no" - "always" - "unless-stopped" - "on-failure" MaximumRetryCount: type: "integer" description: | If `on-failure` is used, the number of times to retry before giving up. Resources: description: "A container's resources (cgroups config, ulimits, etc)" type: "object" properties: # Applicable to all platforms CpuShares: description: | An integer value representing this container's relative CPU weight versus other containers. type: "integer" Memory: description: "Memory limit in bytes." type: "integer" format: "int64" default: 0 # Applicable to UNIX platforms CgroupParent: description: | Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist. type: "string" BlkioWeight: description: "Block IO weight (relative weight)." type: "integer" minimum: 0 maximum: 1000 BlkioWeightDevice: description: | Block IO weight (relative device weight) in the form: ``` [{"Path": "device_path", "Weight": weight}] ``` type: "array" items: type: "object" properties: Path: type: "string" Weight: type: "integer" minimum: 0 BlkioDeviceReadBps: description: | Limit read rate (bytes per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteBps: description: | Limit write rate (bytes per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceReadIOps: description: | Limit read rate (IO per second) from a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteIOps: description: | Limit write rate (IO per second) to a device, in the form: ``` [{"Path": "device_path", "Rate": rate}] ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" CpuPeriod: description: "The length of a CPU period in microseconds." type: "integer" format: "int64" CpuQuota: description: | Microseconds of CPU time that the container can get in a CPU period. type: "integer" format: "int64" CpuRealtimePeriod: description: | The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpuRealtimeRuntime: description: | The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpusetCpus: description: | CPUs in which to allow execution (e.g., `0-3`, `0,1`). type: "string" example: "0-3" CpusetMems: description: | Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. type: "string" Devices: description: "A list of devices to add to the container." type: "array" items: $ref: "#/definitions/DeviceMapping" DeviceCgroupRules: description: "a list of cgroup rules to apply to the container" type: "array" items: type: "string" example: "c 13:* rwm" DeviceRequests: description: | A list of requests for devices to be sent to device drivers. type: "array" items: $ref: "#/definitions/DeviceRequest" KernelMemoryTCP: description: | Hard limit for kernel TCP buffer memory (in bytes). Depending on the OCI runtime in use, this option may be ignored. It is no longer supported by the default (runc) runtime. This field is omitted when empty. type: "integer" format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" format: "int64" MemorySwap: description: | Total memory limit (memory + swap). Set as `-1` to enable unlimited swap. type: "integer" format: "int64" MemorySwappiness: description: | Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. type: "integer" format: "int64" minimum: 0 maximum: 100 NanoCpus: description: "CPU quota in units of 10<sup>-9</sup> CPUs." type: "integer" format: "int64" OomKillDisable: description: "Disable OOM Killer for the container." type: "boolean" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true PidsLimit: description: | Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. type: "integer" format: "int64" x-nullable: true Ulimits: description: | A list of resource limits to set in the container. For example: ``` {"Name": "nofile", "Soft": 1024, "Hard": 2048} ``` type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" # Applicable to Windows CpuCount: description: | The number of usable CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" CpuPercent: description: | The usable percentage of the available CPUs (Windows only). On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" IOMaximumIOps: description: "Maximum IOps for the container system drive (Windows only)" type: "integer" format: "int64" IOMaximumBandwidth: description: | Maximum IO in bytes per second for the container system drive (Windows only). type: "integer" format: "int64" Limit: description: | An object describing a limit on resources which can be requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 Pids: description: | Limits the maximum number of PIDs in the container. Set `0` for unlimited. type: "integer" format: "int64" default: 0 example: 100 ResourceObject: description: | An object describing the resources which can be advertised by a node and requested by a task. type: "object" properties: NanoCPUs: type: "integer" format: "int64" example: 4000000000 MemoryBytes: type: "integer" format: "int64" example: 8272408576 GenericResources: $ref: "#/definitions/GenericResources" GenericResources: description: | User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`). type: "array" items: type: "object" properties: NamedResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "string" DiscreteResourceSpec: type: "object" properties: Kind: type: "string" Value: type: "integer" format: "int64" example: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" HealthConfig: description: | A test to perform to check that the container is healthy. Healthcheck commands should be side-effect free. type: "object" properties: Test: description: | The test to perform. Possible values are: - `[]` inherit healthcheck from image or parent image - `["NONE"]` disable healthcheck - `["CMD", args...]` exec arguments directly - `["CMD-SHELL", command]` run command with system's default shell A non-zero exit code indicates a failed healthcheck: - `0` healthy - `1` unhealthy - `2` reserved (treated as unhealthy) - other values: error running probe type: "array" items: type: "string" Interval: description: | The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" format: "int64" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. If the health check command does not complete within this timeout, the check is considered failed and the health check process is forcibly terminated without a graceful shutdown. type: "integer" format: "int64" Retries: description: | The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit. type: "integer" StartPeriod: description: | Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" format: "int64" StartInterval: description: | The time to wait between checks in nanoseconds during the start period. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" format: "int64" Health: description: | Health stores information about the container's healthcheck results. type: "object" x-nullable: true properties: Status: description: | Status is one of `none`, `starting`, `healthy` or `unhealthy` - "none" Indicates there is no healthcheck - "starting" Starting indicates that the container is not yet ready - "healthy" Healthy indicates that the container is running correctly - "unhealthy" Unhealthy indicates that the container has a problem type: "string" enum: - "none" - "starting" - "healthy" - "unhealthy" example: "healthy" FailingStreak: description: "FailingStreak is the number of consecutive failures" type: "integer" example: 0 Log: type: "array" description: | Log contains the last few results (oldest first) items: $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" x-nullable: true properties: Start: description: | Date and time at which this check started in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "date-time" example: "2020-01-04T10:44:24.496525531Z" End: description: | Date and time at which this check ended in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2020-01-04T10:45:21.364524523Z" ExitCode: description: | ExitCode meanings: - `0` healthy - `1` unhealthy - `2` reserved (considered unhealthy) - other values: error running probe type: "integer" example: 0 Output: description: "Output from last check" type: "string" HostConfig: description: "Container configuration that depends on the host we are running on" allOf: - $ref: "#/definitions/Resources" - type: "object" properties: # Applicable to all platforms Binds: type: "array" description: | A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest[:options]` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest[:options]` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. `options` is an optional, comma-delimited list of: - `nocopy` disables automatic copying of data from the container path to the volume. The `nocopy` flag only applies to named volumes. - `[ro|rw]` mounts a volume read-only or read-write, respectively. If omitted or set to `rw`, volumes are mounted read-write. - `[z|Z]` applies SELinux labels to allow or deny multiple containers to read and write to the same volume. - `z`: a _shared_ content label is applied to the content. This label indicates that multiple containers can share the volume content, for both reading and writing. - `Z`: a _private unshared_ label is applied to the content. This label indicates that only the current container can use a private volume. Labeling systems such as SELinux require proper labels to be placed on volume content that is mounted into a container. Without a label, the security system can prevent a container's processes from using the content. By default, the labels set by the host operating system are not modified. - `[[r]shared|[r]slave|[r]private]` specifies mount [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). This only applies to bind-mounted volumes, not internal volumes or named volumes. Mount propagation requires the source mount point (the location where the source directory is mounted in the host operating system) to have the correct propagation properties. For shared volumes, the source mount point must be set to `shared`. For slave volumes, the mount must be set to either `shared` or `slave`. items: type: "string" ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" example: "" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: description: |- Name of the logging driver used for the container or "none" if logging is disabled. type: "string" enum: - "local" - "json-file" - "syslog" - "journald" - "gelf" - "fluentd" - "awslogs" - "splunk" - "etwlogs" - "none" Config: description: |- Driver-specific configuration options for the logging driver. type: "object" additionalProperties: type: "string" example: "max-file": "5" "max-size": "10m" NetworkMode: type: "string" description: | Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name to which this container should connect to. PortBindings: $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: type: "boolean" description: | Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set. VolumeDriver: type: "string" description: "Driver that this container uses to mount volumes." VolumesFrom: type: "array" description: | A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`. items: type: "string" Mounts: description: | Specification for mounts to be added to the container. type: "array" items: $ref: "#/definitions/Mount" ConsoleSize: type: "array" description: | Initial console size, as an `[height, width]` array. x-nullable: true minItems: 2 maxItems: 2 items: type: "integer" minimum: 0 example: [80, 64] Annotations: type: "object" description: | Arbitrary non-identifying metadata attached to container and provided to the runtime when the container is started. additionalProperties: type: "string" # Applicable to UNIX platforms CapAdd: type: "array" description: | A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'. items: type: "string" CapDrop: type: "array" description: | A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'. items: type: "string" CgroupnsMode: type: "string" enum: - "private" - "host" description: | cgroup namespace mode for the container. Possible values are: - `"private"`: the container runs in its own private cgroup namespace - `"host"`: use the host system's cgroup namespace If not specified, the daemon default is used, which can either be `"private"` or `"host"`, depending on daemon version, kernel support and configuration. Dns: type: "array" description: "A list of DNS servers for the container to use." items: type: "string" DnsOptions: type: "array" description: "A list of DNS options." items: type: "string" DnsSearch: type: "array" description: "A list of DNS search domains." items: type: "string" ExtraHosts: type: "array" description: | A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. items: type: "string" GroupAdd: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" IpcMode: type: "string" description: | IPC sharing mode for the container. Possible values are: - `"none"`: own private IPC namespace, with /dev/shm not mounted - `"private"`: own private IPC namespace - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - `"container:<name|id>"`: join another (shareable) container's IPC namespace - `"host"`: use the host system's IPC namespace If not specified, daemon default is used, which can either be `"private"` or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." Links: type: "array" description: | A list of links for the container in the form `container_name:alias`. items: type: "string" OomScoreAdj: type: "integer" description: | An integer value containing the score given to the container in order to tune OOM killer preferences. example: 500 PidMode: type: "string" description: | Set the PID (Process) Namespace mode for the container. It can be either: - `"container:<name|id>"`: joins another container's PID namespace - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" description: |- Gives the container full access to the host. PublishAllPorts: type: "boolean" description: | Allocates an ephemeral host port for all of a container's exposed ports. Ports are de-allocated when the container stops and allocated when the container starts. The allocated port might be changed when restarting the container. The port is selected from the ephemeral port range that depends on the kernel. For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" description: | A list of string values to customize labels for MLS systems, such as SELinux. items: type: "string" StorageOpt: type: "object" description: | Storage driver options for this container, in the form `{"size": "120G"}`. additionalProperties: type: "string" Tmpfs: type: "object" description: | A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: ``` { "/run": "rw,noexec,nosuid,size=65536k" } ``` additionalProperties: type: "string" UTSMode: type: "string" description: "UTS namespace to use for the container." UsernsMode: type: "string" description: | Sets the usernamespace mode for the container when usernamespace remapping option is enabled. ShmSize: type: "integer" format: "int64" description: | Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. minimum: 0 Sysctls: type: "object" x-nullable: true description: |- A list of kernel parameters (sysctls) to set in the container. This field is omitted if not set. additionalProperties: type: "string" example: "net.ipv4.ip_forward": "1" Runtime: type: "string" x-nullable: true description: |- Runtime to use with this container. # Applicable to Windows Isolation: type: "string" description: | Isolation technology of the container. (Windows only) enum: - "default" - "process" - "hyperv" - "" MaskedPaths: type: "array" description: | The list of paths to be masked inside the container (this overrides the default set of paths). items: type: "string" example: - "/proc/asound" - "/proc/acpi" - "/proc/kcore" - "/proc/keys" - "/proc/latency_stats" - "/proc/timer_list" - "/proc/timer_stats" - "/proc/sched_debug" - "/proc/scsi" - "/sys/firmware" - "/sys/devices/virtual/powercap" ReadonlyPaths: type: "array" description: | The list of paths to be set as read-only inside the container (this overrides the default set of paths). items: type: "string" example: - "/proc/bus" - "/proc/fs" - "/proc/irq" - "/proc/sys" - "/proc/sysrq-trigger" ContainerConfig: description: | Configuration for a container that is portable between hosts. type: "object" properties: Hostname: description: | The hostname to use for the container, as a valid RFC 1123 hostname. type: "string" example: "439f4e91bd1d" Domainname: description: | The domain name to use for the container. type: "string" User: description: |- Commands run as this user inside the container. If omitted, commands run as the user specified in the image the container was started from. Can be either user-name or UID, and optional group-name or GID, separated by a colon (`<user-name|UID>[<:group-name|GID>]`). type: "string" example: "123:456" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" default: false AttachStdout: description: "Whether to attach to `stdout`." type: "boolean" default: true AttachStderr: description: "Whether to attach to `stderr`." type: "boolean" default: true ExposedPorts: description: | An object mapping ports to an empty object in the form: `{"<port>/<tcp|udp|sctp>": {}}` type: "object" x-nullable: true additionalProperties: type: "object" enum: - {} default: {} example: { "80/tcp": {}, "443/tcp": {} } Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. type: "boolean" default: false OpenStdin: description: "Open `stdin`" type: "boolean" default: false StdinOnce: description: "Close `stdin` after one attached client disconnects" type: "boolean" default: false Env: description: | A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. type: "array" items: type: "string" example: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" example: ["/bin/sh"] Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" default: false example: false x-nullable: true Image: description: | The name (or reference) of the image to use when creating the container, or which was used when the container was created. type: "string" example: "example-image:1.0" Volumes: description: | An object mapping mount point paths inside the container to empty objects. type: "object" additionalProperties: type: "object" enum: - {} default: {} WorkingDir: description: "The working directory for commands to run in." type: "string" example: "/public/" Entrypoint: description: | The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" example: [] NetworkDisabled: description: "Disable networking for the container." type: "boolean" x-nullable: true MacAddress: description: | MAC address of the container. Deprecated: this field is deprecated in API v1.44 and up. Use EndpointSettings.MacAddress instead. type: "string" x-nullable: true OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" x-nullable: true items: type: "string" example: [] Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" example: "SIGTERM" x-nullable: true StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 x-nullable: true Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" x-nullable: true items: type: "string" example: ["/bin/sh", "-c"] ImageConfig: description: | Configuration of the image. These fields are used as defaults when starting a container from the image. type: "object" properties: Hostname: description: | The hostname to use for the container, as a valid RFC 1123 hostname. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" Domainname: description: | The domain name to use for the container. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always empty. It must not be used, and will be removed in API v1.50. type: "string" example: "" User: description: "The user that commands are run as inside the container." type: "string" example: "web:web" AttachStdin: description: | Whether to attach to `stdin`. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false AttachStdout: description: | Whether to attach to `stdout`. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false AttachStderr: description: | Whether to attach to `stderr`. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false ExposedPorts: description: | An object mapping ports to an empty object in the form: `{"<port>/<tcp|udp|sctp>": {}}` type: "object" x-nullable: true additionalProperties: type: "object" enum: - {} default: {} example: { "80/tcp": {}, "443/tcp": {} } Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false OpenStdin: description: | Open `stdin` <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false StdinOnce: description: | Close `stdin` after one attached client disconnects. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always false. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false Env: description: | A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. type: "array" items: type: "string" example: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" example: ["/bin/sh"] Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" default: false example: false x-nullable: true Image: description: | The name (or reference) of the image to use when creating the container, or which was used when the container was created. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always empty. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" Volumes: description: | An object mapping mount point paths inside the container to empty objects. type: "object" additionalProperties: type: "object" enum: - {} default: {} example: "/app/data": {} "/app/config": {} WorkingDir: description: "The working directory for commands to run in." type: "string" example: "/public/" Entrypoint: description: | The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" example: [] NetworkDisabled: description: | Disable networking for the container. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always omitted. It must not be used, and will be removed in API v1.50. type: "boolean" default: false example: false x-nullable: true MacAddress: description: | MAC address of the container. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always omitted. It must not be used, and will be removed in API v1.50. type: "string" default: "" example: "" x-nullable: true OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" x-nullable: true items: type: "string" example: [] Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" example: "SIGTERM" x-nullable: true StopTimeout: description: | Timeout to stop a container in seconds. <p><br /></p> > **Deprecated**: this field is not part of the image specification and is > always omitted. It must not be used, and will be removed in API v1.50. type: "integer" default: 10 x-nullable: true Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" x-nullable: true items: type: "string" example: ["/bin/sh", "-c"] # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. example: "Hostname": "" "Domainname": "" "User": "web:web" "AttachStdin": false "AttachStdout": false "AttachStderr": false "ExposedPorts": { "80/tcp": {}, "443/tcp": {} } "Tty": false "OpenStdin": false "StdinOnce": false "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] "Cmd": ["/bin/sh"] "Healthcheck": { "Test": ["string"], "Interval": 0, "Timeout": 0, "Retries": 0, "StartPeriod": 0, "StartInterval": 0 } "ArgsEscaped": true "Image": "" "Volumes": { "/app/data": {}, "/app/config": {} } "WorkingDir": "/public/" "Entrypoint": [] "OnBuild": [] "Labels": { "com.example.some-label": "some-value", "com.example.some-other-label": "some-other-value" } "StopSignal": "SIGTERM" "Shell": ["/bin/sh", "-c"] NetworkingConfig: description: | NetworkingConfig represents the container's networking configuration for each of its interfaces. It is used for the networking configs specified in the `docker create` and `docker network connect` commands. type: "object" properties: EndpointsConfig: description: | A mapping of network name to endpoint configuration for that network. The endpoint configuration can be left empty to connect to that network with no particular endpoint configuration. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" example: # putting an example here, instead of using the example values from # /definitions/EndpointSettings, because EndpointSettings contains # operational data returned when inspecting a container that we don't # accept here. EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" MacAddress: "02:42:ac:12:05:02" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" database_nw: {} NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: description: | Name of the default bridge interface when dockerd's --bridge flag is set. type: "string" example: "docker0" SandboxID: description: SandboxID uniquely represents a container's network stack. type: "string" example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" HairpinMode: description: | Indicates if hairpin NAT should be enabled on the virtual interface. Deprecated: This field is never set and will be removed in a future release. type: "boolean" example: false LinkLocalIPv6Address: description: | IPv6 unicast address using the link-local prefix. Deprecated: This field is never set and will be removed in a future release. type: "string" example: "" LinkLocalIPv6PrefixLen: description: | Prefix length of the IPv6 unicast address. Deprecated: This field is never set and will be removed in a future release. type: "integer" example: "" Ports: $ref: "#/definitions/PortMap" SandboxKey: description: SandboxKey is the full path of the netns handle type: "string" example: "/var/run/docker/netns/8ab54b426c38" SecondaryIPAddresses: description: "Deprecated: This field is never set and will be removed in a future release." type: "array" items: $ref: "#/definitions/Address" x-nullable: true SecondaryIPv6Addresses: description: "Deprecated: This field is never set and will be removed in a future release." type: "array" items: $ref: "#/definitions/Address" x-nullable: true # TODO properties below are part of DefaultNetworkSettings, which is # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 EndpointID: description: | EndpointID uniquely represents a service endpoint in a Sandbox. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.1" GlobalIPv6Address: description: | Global IPv6 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 64 IPAddress: description: | IPv4 address for the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address for this network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "2001:db8:2::100" MacAddress: description: | MAC address for the container on the default "bridge" network. <p><br /></p> > **Deprecated**: This field is only propagated when attached to the > default "bridge" network. Use the information from the "bridge" > network inside the `Networks` map instead, which contains the same > information. This field was deprecated in Docker 1.9 and is scheduled > to be removed in Docker 17.12.0 type: "string" example: "02:42:ac:11:00:04" Networks: description: | Information about all networks that the container is connected to. type: "object" additionalProperties: $ref: "#/definitions/EndpointSettings" Address: description: Address represents an IPv4 or IPv6 IP address. type: "object" properties: Addr: description: IP address. type: "string" PrefixLen: description: Mask length of the IP address. type: "integer" PortMap: description: | PortMap describes the mapping of container ports to host ports, using the container's port-number and protocol as key in the format `<port>/<protocol>`, for example, `80/udp`. If a container's port is mapped for multiple protocols, separate entries are added to the mapping table. type: "object" additionalProperties: type: "array" x-nullable: true items: $ref: "#/definitions/PortBinding" example: "443/tcp": - HostIp: "127.0.0.1" HostPort: "4443" "80/tcp": - HostIp: "0.0.0.0" HostPort: "80" - HostIp: "0.0.0.0" HostPort: "8080" "80/udp": - HostIp: "0.0.0.0" HostPort: "80" "53/udp": - HostIp: "0.0.0.0" HostPort: "53" "2377/tcp": null PortBinding: description: | PortBinding represents a binding between a host IP address and a host port. type: "object" properties: HostIp: description: "Host IP address that the container's port is mapped to." type: "string" example: "127.0.0.1" HostPort: description: "Host port number that the container's port is mapped to." type: "string" example: "4443" DriverData: description: | Information about the storage driver used to store the container's and image's filesystem. type: "object" required: [Name, Data] properties: Name: description: "Name of the storage driver." type: "string" x-nullable: false example: "overlay2" Data: description: | Low-level storage metadata, provided as key/value pairs. This information is driver-specific, and depends on the storage-driver in use, and should be used for informational purposes only. type: "object" x-nullable: false additionalProperties: type: "string" example: { "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" } FilesystemChange: description: | Change in the container's filesystem. type: "object" required: [Path, Kind] properties: Path: description: | Path to file or directory that has changed. type: "string" x-nullable: false Kind: $ref: "#/definitions/ChangeType" ChangeType: description: | Kind of change Can be one of: - `0`: Modified ("C") - `1`: Added ("A") - `2`: Deleted ("D") type: "integer" format: "uint8" enum: [0, 1, 2] x-nullable: false ImageInspect: description: | Information about an image in the local image cache. type: "object" properties: Id: description: | ID is the content-addressable ID of an image. This identifier is a content-addressable digest calculated from the image's configuration (which includes the digests of layers used by the image). Note that this digest differs from the `RepoDigests` below, which holds digests of image manifests that reference the image. type: "string" x-nullable: false example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" Descriptor: description: | Descriptor is an OCI descriptor of the image target. In case of a multi-platform image, this descriptor points to the OCI index or a manifest list. This field is only present if the daemon provides a multi-platform image store. WARNING: This is experimental and may change at any time without any backward compatibility. x-nullable: true $ref: "#/definitions/OCIDescriptor" Manifests: description: | Manifests is a list of image manifests available in this image. It provides a more detailed view of the platform-specific image manifests or other image-attached data like build attestations. Only available if the daemon provides a multi-platform image store and the `manifests` option is set in the inspect request. WARNING: This is experimental and may change at any time without any backward compatibility. type: "array" x-nullable: true items: $ref: "#/definitions/ImageManifestSummary" RepoTags: description: | List of image names/tags in the local image cache that reference this image. Multiple image tags can refer to the same image, and this list may be empty if no tags reference the image, in which case the image is "untagged", in which case it can still be referenced by its ID. type: "array" items: type: "string" example: - "example:1.0" - "example:latest" - "example:stable" - "internal.registry.example.com:5000/example:1.0" RepoDigests: description: | List of content-addressable digests of locally available image manifests that the image is referenced from. Multiple manifests can refer to the same image. These digests are usually only available if the image was either pulled from a registry, or if the image was pushed to a registry, which is when the manifest is generated and its digest calculated. type: "array" items: type: "string" example: - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" Parent: description: | ID of the parent image. Depending on how the image was created, this field may be empty and is only set for images that were built/created locally. This field is empty if the image was pulled from an image registry. type: "string" x-nullable: false example: "" Comment: description: | Optional message that was set when committing or importing the image. type: "string" x-nullable: false example: "" Created: description: | Date and time at which the image was created, formatted in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. This information is only available if present in the image, and omitted otherwise. type: "string" format: "dateTime" x-nullable: true example: "2022-02-04T21:20:12.497794809Z" DockerVersion: description: | The version of Docker that was used to build the image. Depending on how the image was created, this field may be empty. type: "string" x-nullable: false example: "27.0.1" Author: description: | Name of the author that was specified when committing the image, or as specified through MAINTAINER (deprecated) in the Dockerfile. type: "string" x-nullable: false example: "" Config: $ref: "#/definitions/ImageConfig" Architecture: description: | Hardware CPU architecture that the image runs on. type: "string" x-nullable: false example: "arm" Variant: description: | CPU architecture variant (presently ARM-only). type: "string" x-nullable: true example: "v7" Os: description: | Operating System the image is built to run on. type: "string" x-nullable: false example: "linux" OsVersion: description: | Operating System version the image is built to run on (especially for Windows). type: "string" example: "" x-nullable: true Size: description: | Total size of the image including all layers it is composed of. type: "integer" format: "int64" x-nullable: false example: 1239828 GraphDriver: $ref: "#/definitions/DriverData" RootFS: description: | Information about the image's RootFS, including the layer IDs. type: "object" required: [Type] properties: Type: type: "string" x-nullable: false example: "layers" Layers: type: "array" items: type: "string" example: - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" Metadata: description: | Additional metadata of the image in the local cache. This information is local to the daemon, and not part of the image itself. type: "object" properties: LastTagTime: description: | Date and time at which the image was last tagged in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. This information is only available if the image was tagged locally, and omitted otherwise. type: "string" format: "dateTime" example: "2022-02-28T14:40:02.623929178Z" x-nullable: true ImageSummary: type: "object" x-go-name: "Summary" required: - Id - ParentId - RepoTags - RepoDigests - Created - Size - SharedSize - Labels - Containers properties: Id: description: | ID is the content-addressable ID of an image. This identifier is a content-addressable digest calculated from the image's configuration (which includes the digests of layers used by the image). Note that this digest differs from the `RepoDigests` below, which holds digests of image manifests that reference the image. type: "string" x-nullable: false example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" ParentId: description: | ID of the parent image. Depending on how the image was created, this field may be empty and is only set for images that were built/created locally. This field is empty if the image was pulled from an image registry. type: "string" x-nullable: false example: "" RepoTags: description: | List of image names/tags in the local image cache that reference this image. Multiple image tags can refer to the same image, and this list may be empty if no tags reference the image, in which case the image is "untagged", in which case it can still be referenced by its ID. type: "array" x-nullable: false items: type: "string" example: - "example:1.0" - "example:latest" - "example:stable" - "internal.registry.example.com:5000/example:1.0" RepoDigests: description: | List of content-addressable digests of locally available image manifests that the image is referenced from. Multiple manifests can refer to the same image. These digests are usually only available if the image was either pulled from a registry, or if the image was pushed to a registry, which is when the manifest is generated and its digest calculated. type: "array" x-nullable: false items: type: "string" example: - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" Created: description: | Date and time at which the image was created as a Unix timestamp (number of seconds since EPOCH). type: "integer" x-nullable: false example: "1644009612" Size: description: | Total size of the image including all layers it is composed of. type: "integer" format: "int64" x-nullable: false example: 172064416 SharedSize: description: | Total size of image layers that are shared between this image and other images. This size is not calculated by default. `-1` indicates that the value has not been set / calculated. type: "integer" format: "int64" x-nullable: false example: 1239828 Labels: description: "User-defined key/value metadata." type: "object" x-nullable: false additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Containers: description: | Number of containers using this image. Includes both stopped and running containers. This size is not calculated by default, and depends on which API endpoint is used. `-1` indicates that the value has not been set / calculated. x-nullable: false type: "integer" example: 2 Manifests: description: | Manifests is a list of manifests available in this image. It provides a more detailed view of the platform-specific image manifests or other image-attached data like build attestations. WARNING: This is experimental and may change at any time without any backward compatibility. type: "array" x-nullable: false x-omitempty: true items: $ref: "#/definitions/ImageManifestSummary" Descriptor: description: | Descriptor is an OCI descriptor of the image target. In case of a multi-platform image, this descriptor points to the OCI index or a manifest list. This field is only present if the daemon provides a multi-platform image store. WARNING: This is experimental and may change at any time without any backward compatibility. x-nullable: true $ref: "#/definitions/OCIDescriptor" AuthConfig: type: "object" properties: username: type: "string" password: type: "string" email: description: | Email is an optional value associated with the username. > **Deprecated**: This field is deprecated since docker 1.11 (API v1.23) and will be removed in a future release. type: "string" serveraddress: type: "string" example: username: "hannibal" password: "xxxx" serveraddress: "https://index.docker.io/v1/" ProcessConfig: type: "object" properties: privileged: type: "boolean" user: type: "string" tty: type: "boolean" entrypoint: type: "string" arguments: type: "array" items: type: "string" Volume: type: "object" required: [Name, Driver, Mountpoint, Labels, Scope, Options] properties: Name: type: "string" description: "Name of the volume." x-nullable: false example: "tardis" Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false example: "custom" Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false example: "/var/lib/docker/volumes/tardis" CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." example: "2016-06-07T20:31:11.853781916Z" Status: type: "object" description: | Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{"key":"value","key2":"value2"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature. additionalProperties: type: "object" example: hello: "world" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Scope: type: "string" description: | The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. default: "local" x-nullable: false enum: ["local", "global"] example: "local" ClusterVolume: $ref: "#/definitions/ClusterVolume" Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" example: device: "tmpfs" o: "size=100m,uid=1000" type: "tmpfs" UsageData: type: "object" x-nullable: true x-go-name: "UsageData" required: [Size, RefCount] description: | Usage details about the volume. This information is used by the `GET /system/df` endpoint, and omitted in other endpoints. properties: Size: type: "integer" format: "int64" default: -1 description: | Amount of disk space used by the volume (in bytes). This information is only available for volumes created with the `"local"` volume driver. For volumes created with other volume drivers, this field is set to `-1` ("not available") x-nullable: false RefCount: type: "integer" format: "int64" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false VolumeCreateOptions: description: "Volume configuration" type: "object" title: "VolumeConfig" x-go-name: "CreateOptions" properties: Name: description: | The new volume's name. If not specified, Docker generates a name. type: "string" x-nullable: false example: "tardis" Driver: description: "Name of the volume driver to use." type: "string" default: "local" x-nullable: false example: "custom" DriverOpts: description: | A mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" additionalProperties: type: "string" example: device: "tmpfs" o: "size=100m,uid=1000" type: "tmpfs" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" ClusterVolumeSpec: $ref: "#/definitions/ClusterVolumeSpec" VolumeListResponse: type: "object" title: "VolumeListResponse" x-go-name: "ListResponse" description: "Volume list response" properties: Volumes: type: "array" description: "List of volumes" items: $ref: "#/definitions/Volume" Warnings: type: "array" description: | Warnings that occurred when fetching the list of volumes. items: type: "string" example: [] Network: type: "object" properties: Name: description: | Name of the network. type: "string" example: "my_network" Id: description: | ID that uniquely identifies a network on a single machine. type: "string" example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" Created: description: | Date and time at which the network was created in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-10-19T04:33:30.360899459Z" Scope: description: | The level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) type: "string" example: "local" Driver: description: | The name of the driver used to create the network (e.g. `bridge`, `overlay`). type: "string" example: "overlay" EnableIPv4: description: | Whether the network was created with IPv4 enabled. type: "boolean" example: true EnableIPv6: description: | Whether the network was created with IPv6 enabled. type: "boolean" example: false IPAM: $ref: "#/definitions/IPAM" Internal: description: | Whether the network is created to only allow internal networking connectivity. type: "boolean" default: false example: false Attachable: description: | Whether a global / swarm scope network is manually attachable by regular containers from workers in swarm mode. type: "boolean" default: false example: false Ingress: description: | Whether the network is providing the routing-mesh for the swarm cluster. type: "boolean" default: false example: false ConfigFrom: $ref: "#/definitions/ConfigReference" ConfigOnly: description: | Whether the network is a config-only network. Config-only networks are placeholder networks for network configurations to be used by other networks. Config-only networks cannot be used directly to run containers or services. type: "boolean" default: false Containers: description: | Contains endpoints attached to the network. type: "object" additionalProperties: $ref: "#/definitions/NetworkContainer" example: 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: Name: "test" EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: "02:42:ac:13:00:02" IPv4Address: "172.19.0.2/16" IPv6Address: "" Options: description: | Network-specific options uses when creating the network. type: "object" additionalProperties: type: "string" example: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Peers: description: | List of peer nodes for an overlay network. This field is only present for overlay networks, and omitted for other network types. type: "array" items: $ref: "#/definitions/PeerInfo" x-nullable: true # TODO: Add Services (only present when "verbose" is set). ConfigReference: description: | The config-only network source to provide the configuration for this network. type: "object" properties: Network: description: | The name of the config-only network that provides the network's configuration. The specified network must be an existing config-only network. Only network names are allowed, not network IDs. type: "string" example: "config_only_network_01" IPAM: type: "object" properties: Driver: description: "Name of the IPAM driver to use." type: "string" default: "default" example: "default" Config: description: | List of IPAM configuration options, specified as a map: ``` {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>} ``` type: "array" items: $ref: "#/definitions/IPAMConfig" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" example: foo: "bar" IPAMConfig: type: "object" properties: Subnet: type: "string" example: "172.20.0.0/16" IPRange: type: "string" example: "172.20.10.0/24" Gateway: type: "string" example: "172.20.10.11" AuxiliaryAddresses: type: "object" additionalProperties: type: "string" NetworkContainer: type: "object" properties: Name: type: "string" example: "container_1" EndpointID: type: "string" example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: type: "string" example: "02:42:ac:13:00:02" IPv4Address: type: "string" example: "172.19.0.2/16" IPv6Address: type: "string" example: "" PeerInfo: description: | PeerInfo represents one peer of an overlay network. type: "object" properties: Name: description: ID of the peer-node in the Swarm cluster. type: "string" example: "6869d7c1732b" IP: description: IP-address of the peer-node in the Swarm cluster. type: "string" example: "10.133.77.91" NetworkCreateResponse: description: "OK response to NetworkCreate operation" type: "object" title: "NetworkCreateResponse" x-go-name: "CreateResponse" required: [Id, Warning] properties: Id: description: "The ID of the created network." type: "string" x-nullable: false example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" Warning: description: "Warnings encountered when creating the container" type: "string" x-nullable: false example: "" BuildInfo: type: "object" properties: id: type: "string" stream: type: "string" error: type: "string" x-nullable: true description: |- errors encountered during the operation. > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" x-nullable: true description: |- Progress is a pre-formatted presentation of progressDetail. > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. progressDetail: $ref: "#/definitions/ProgressDetail" aux: $ref: "#/definitions/ImageID" BuildCache: type: "object" description: | BuildCache contains information about a build cache record. properties: ID: type: "string" description: | Unique ID of the build cache record. example: "ndlpt0hhvkqcdfkputsk4cq9c" Parents: description: | List of parent build cache record IDs. type: "array" items: type: "string" x-nullable: true example: ["hw53o5aio51xtltp5xjp8v7fx"] Type: type: "string" description: | Cache record type. example: "regular" # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 enum: - "internal" - "frontend" - "source.local" - "source.git.checkout" - "exec.cachemount" - "regular" Description: type: "string" description: | Description of the build-step that produced the build cache. example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: type: "boolean" description: | Indicates if the build cache is in use. example: false Shared: type: "boolean" description: | Indicates if the build cache is shared. example: true Size: description: | Amount of disk space used by the build cache (in bytes). type: "integer" example: 51 CreatedAt: description: | Date and time at which the build cache was created in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" LastUsedAt: description: | Date and time at which the build cache was last used in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" x-nullable: true example: "2017-08-09T07:09:37.632105588Z" UsageCount: type: "integer" example: 26 ImageID: type: "object" description: "Image ID or Digest" properties: ID: type: "string" example: ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" CreateImageInfo: type: "object" properties: id: type: "string" error: type: "string" x-nullable: true description: |- errors encountered during the operation. > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" x-nullable: true description: |- Progress is a pre-formatted presentation of progressDetail. > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. progressDetail: $ref: "#/definitions/ProgressDetail" PushImageInfo: type: "object" properties: error: type: "string" x-nullable: true description: |- errors encountered during the operation. > **Deprecated**: This field is deprecated since API v1.4, and will be omitted in a future API version. Use the information in errorDetail instead. errorDetail: $ref: "#/definitions/ErrorDetail" status: type: "string" progress: type: "string" x-nullable: true description: |- Progress is a pre-formatted presentation of progressDetail. > **Deprecated**: This field is deprecated since API v1.8, and will be omitted in a future API version. Use the information in progressDetail instead. progressDetail: $ref: "#/definitions/ProgressDetail" ErrorDetail: type: "object" properties: code: type: "integer" message: type: "string" ProgressDetail: type: "object" properties: current: type: "integer" total: type: "integer" ErrorResponse: description: "Represents an error." type: "object" required: ["message"] properties: message: description: "The error message." type: "string" x-nullable: false example: message: "Something went wrong." IDResponse: description: "Response to an API call that returns just an Id" type: "object" x-go-name: "IDResponse" required: ["Id"] properties: Id: description: "The id of the newly created object." type: "string" x-nullable: false EndpointSettings: description: "Configuration for a network endpoint." type: "object" properties: # Configurations IPAMConfig: $ref: "#/definitions/EndpointIPAMConfig" Links: type: "array" items: type: "string" example: - "container_1" - "container_2" MacAddress: description: | MAC address for the endpoint on this network. The network driver might ignore this parameter. type: "string" example: "02:42:ac:11:00:04" Aliases: type: "array" items: type: "string" example: - "server_x" - "server_y" DriverOpts: description: | DriverOpts is a mapping of driver options and values. These options are passed directly to the driver and are driver specific. type: "object" x-nullable: true additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" GwPriority: description: | This property determines which endpoint will provide the default gateway for a container. The endpoint with the highest priority will be used. If multiple endpoints have the same priority, endpoints are lexicographically sorted based on their network name, and the one that sorts first is picked. type: "integer" format: "int64" example: - 10 # Operational data NetworkID: description: | Unique ID of the network. type: "string" example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" EndpointID: description: | Unique ID for the service endpoint in a Sandbox. type: "string" example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: description: | Gateway address for this network. type: "string" example: "172.17.0.1" IPAddress: description: | IPv4 address. type: "string" example: "172.17.0.4" IPPrefixLen: description: | Mask length of the IPv4 address. type: "integer" example: 16 IPv6Gateway: description: | IPv6 gateway address. type: "string" example: "2001:db8:2::100" GlobalIPv6Address: description: | Global IPv6 address. type: "string" example: "2001:db8::5689" GlobalIPv6PrefixLen: description: | Mask length of the global IPv6 address. type: "integer" format: "int64" example: 64 DNSNames: description: | List of all DNS names an endpoint has on a specific network. This list is based on the container name, network aliases, container short ID, and hostname. These DNS names are non-fully qualified but can contain several dots. You can get fully qualified DNS names by appending `.<network-name>`. For instance, if container name is `my.ctr` and the network is named `testnet`, `DNSNames` will contain `my.ctr` and the FQDN will be `my.ctr.testnet`. type: array items: type: string example: ["foobar", "server_x", "server_y", "my.ctr"] EndpointIPAMConfig: description: | EndpointIPAMConfig represents an endpoint's IPAM configuration. type: "object" x-nullable: true properties: IPv4Address: type: "string" example: "172.20.30.33" IPv6Address: type: "string" example: "2001:db8:abcd::3033" LinkLocalIPs: type: "array" items: type: "string" example: - "169.254.34.68" - "fe80::3468" PluginMount: type: "object" x-nullable: false required: [Name, Description, Settable, Source, Destination, Type, Options] properties: Name: type: "string" x-nullable: false example: "some-mount" Description: type: "string" x-nullable: false example: "This is a mount that's used by the plugin." Settable: type: "array" items: type: "string" Source: type: "string" example: "/var/lib/docker/plugins/" Destination: type: "string" x-nullable: false example: "/mnt/state" Type: type: "string" x-nullable: false example: "bind" Options: type: "array" items: type: "string" example: - "rbind" - "rw" PluginDevice: type: "object" required: [Name, Description, Settable, Path] x-nullable: false properties: Name: type: "string" x-nullable: false Description: type: "string" x-nullable: false Settable: type: "array" items: type: "string" Path: type: "string" example: "/dev/fuse" PluginEnv: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" Description: x-nullable: false type: "string" Settable: type: "array" items: type: "string" Value: type: "string" PluginInterfaceType: type: "object" x-nullable: false required: [Prefix, Capability, Version] properties: Prefix: type: "string" x-nullable: false Capability: type: "string" x-nullable: false Version: type: "string" x-nullable: false PluginPrivilege: description: | Describes a permission the user has to accept upon installing the plugin. type: "object" x-go-name: "PluginPrivilege" properties: Name: type: "string" example: "network" Description: type: "string" Value: type: "array" items: type: "string" example: - "host" Plugin: description: "A plugin for the Engine API" type: "object" required: [Settings, Enabled, Config, Name] properties: Id: type: "string" example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" Name: type: "string" x-nullable: false example: "tiborvass/sample-volume-plugin" Enabled: description: True if the plugin is running. False if the plugin is not running, only installed. type: "boolean" x-nullable: false example: true Settings: description: "Settings that can be modified by users." type: "object" x-nullable: false required: [Args, Devices, Env, Mounts] properties: Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: type: "string" example: - "DEBUG=0" Args: type: "array" items: type: "string" Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PluginReference: description: "plugin remote reference used to push/pull the plugin" type: "string" x-nullable: false example: "localhost:5000/tiborvass/sample-volume-plugin:latest" Config: description: "The config of a plugin." type: "object" x-nullable: false required: - Description - Documentation - Interface - Entrypoint - WorkDir - Network - Linux - PidHost - PropagatedMount - IpcHost - Mounts - Env - Args properties: DockerVersion: description: "Docker Version used to create the plugin" type: "string" x-nullable: false example: "17.06.0-ce" Description: type: "string" x-nullable: false example: "A sample volume plugin for Docker" Documentation: type: "string" x-nullable: false example: "https://docs.docker.com/engine/extend/plugins/" Interface: description: "The interface between Docker and the plugin" x-nullable: false type: "object" required: [Types, Socket] properties: Types: type: "array" items: $ref: "#/definitions/PluginInterfaceType" example: - "docker.volumedriver/1.0" Socket: type: "string" x-nullable: false example: "plugins.sock" ProtocolScheme: type: "string" example: "some.protocol/v1.0" description: "Protocol to use for clients connecting to the plugin." enum: - "" - "moby.plugins.http/v1" Entrypoint: type: "array" items: type: "string" example: - "/usr/bin/sample-volume-plugin" - "/data" WorkDir: type: "string" x-nullable: false example: "/bin/" User: type: "object" x-nullable: false properties: UID: type: "integer" format: "uint32" example: 1000 GID: type: "integer" format: "uint32" example: 1000 Network: type: "object" x-nullable: false required: [Type] properties: Type: x-nullable: false type: "string" example: "host" Linux: type: "object" x-nullable: false required: [Capabilities, AllowAllDevices, Devices] properties: Capabilities: type: "array" items: type: "string" example: - "CAP_SYS_ADMIN" - "CAP_SYSLOG" AllowAllDevices: type: "boolean" x-nullable: false example: false Devices: type: "array" items: $ref: "#/definitions/PluginDevice" PropagatedMount: type: "string" x-nullable: false example: "/mnt/volumes" IpcHost: type: "boolean" x-nullable: false example: false PidHost: type: "boolean" x-nullable: false example: false Mounts: type: "array" items: $ref: "#/definitions/PluginMount" Env: type: "array" items: $ref: "#/definitions/PluginEnv" example: - Name: "DEBUG" Description: "If set, prints debug messages" Settable: null Value: "0" Args: type: "object" x-nullable: false required: [Name, Description, Settable, Value] properties: Name: x-nullable: false type: "string" example: "args" Description: x-nullable: false type: "string" example: "command line arguments" Settable: type: "array" items: type: "string" Value: type: "array" items: type: "string" rootfs: type: "object" properties: type: type: "string" example: "layers" diff_ids: type: "array" items: type: "string" example: - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ObjectVersion: description: | The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. The client must send the version number along with the modified specification when updating these objects. This approach ensures safe concurrency and determinism in that the change on the object may not be applied if the version number has changed from the last read. In other words, if two update requests specify the same base version, only one of the requests can succeed. As a result, two separate update requests that happen at the same time will not unintentionally overwrite each other. type: "object" properties: Index: type: "integer" format: "uint64" example: 373531 NodeSpec: type: "object" properties: Name: description: "Name for the node." type: "string" example: "my-node" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Role: description: "Role of the node." type: "string" enum: - "worker" - "manager" example: "manager" Availability: description: "Availability of the node." type: "string" enum: - "active" - "pause" - "drain" example: "active" example: Availability: "active" Name: "node-name" Role: "manager" Labels: foo: "bar" Node: type: "object" properties: ID: type: "string" example: "24ifsmvkjbyhk" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the node was added to the swarm in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the node was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/NodeSpec" Description: $ref: "#/definitions/NodeDescription" Status: $ref: "#/definitions/NodeStatus" ManagerStatus: $ref: "#/definitions/ManagerStatus" NodeDescription: description: | NodeDescription encapsulates the properties of the Node as reported by the agent. type: "object" properties: Hostname: type: "string" example: "bf3067039e47" Platform: $ref: "#/definitions/Platform" Resources: $ref: "#/definitions/ResourceObject" Engine: $ref: "#/definitions/EngineDescription" TLSInfo: $ref: "#/definitions/TLSInfo" Platform: description: | Platform represents the platform (Arch/OS). type: "object" properties: Architecture: description: | Architecture represents the hardware architecture (for example, `x86_64`). type: "string" example: "x86_64" OS: description: | OS represents the Operating System (for example, `linux` or `windows`). type: "string" example: "linux" EngineDescription: description: "EngineDescription provides information about an engine." type: "object" properties: EngineVersion: type: "string" example: "17.06.0" Labels: type: "object" additionalProperties: type: "string" example: foo: "bar" Plugins: type: "array" items: type: "object" properties: Type: type: "string" Name: type: "string" example: - Type: "Log" Name: "awslogs" - Type: "Log" Name: "fluentd" - Type: "Log" Name: "gcplogs" - Type: "Log" Name: "gelf" - Type: "Log" Name: "journald" - Type: "Log" Name: "json-file" - Type: "Log" Name: "splunk" - Type: "Log" Name: "syslog" - Type: "Network" Name: "bridge" - Type: "Network" Name: "host" - Type: "Network" Name: "ipvlan" - Type: "Network" Name: "macvlan" - Type: "Network" Name: "null" - Type: "Network" Name: "overlay" - Type: "Volume" Name: "local" - Type: "Volume" Name: "localhost:5000/vieux/sshfs:latest" - Type: "Volume" Name: "vieux/sshfs:latest" TLSInfo: description: | Information about the issuer of leaf TLS certificates and the trusted root CA certificate. type: "object" properties: TrustRoot: description: | The root CA certificate(s) that are used to validate leaf TLS certificates. type: "string" CertIssuerSubject: description: The base64-url-safe-encoded raw subject bytes of the issuer. type: "string" CertIssuerPublicKey: description: | The base64-url-safe-encoded raw public key bytes of the issuer. type: "string" example: TrustRoot: | -----BEGIN CERTIFICATE----- MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H -----END CERTIFICATE----- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" NodeStatus: description: | NodeStatus represents the status of a node. It provides the current status of the node, as seen by the manager. type: "object" properties: State: $ref: "#/definitions/NodeState" Message: type: "string" example: "" Addr: description: "IP address of the node." type: "string" example: "172.17.0.2" NodeState: description: "NodeState represents the state of a node." type: "string" enum: - "unknown" - "down" - "ready" - "disconnected" example: "ready" ManagerStatus: description: | ManagerStatus represents the status of a manager. It provides the current status of a node's manager component, if the node is a manager. x-nullable: true type: "object" properties: Leader: type: "boolean" default: false example: true Reachability: $ref: "#/definitions/Reachability" Addr: description: | The IP address and port at which the manager is reachable. type: "string" example: "10.0.0.46:2377" Reachability: description: "Reachability represents the reachability of a node." type: "string" enum: - "unknown" - "unreachable" - "reachable" example: "reachable" SwarmSpec: description: "User modifiable swarm configuration." type: "object" properties: Name: description: "Name of the swarm." type: "string" example: "default" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.corp.type: "production" com.example.corp.department: "engineering" Orchestration: description: "Orchestration configuration." type: "object" x-nullable: true properties: TaskHistoryRetentionLimit: description: | The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks. type: "integer" format: "int64" example: 10 Raft: description: "Raft configuration." type: "object" properties: SnapshotInterval: description: "The number of log entries between snapshots." type: "integer" format: "uint64" example: 10000 KeepOldSnapshots: description: | The number of snapshots to keep beyond the current snapshot. type: "integer" format: "uint64" LogEntriesForSlowFollowers: description: | The number of log entries to keep around to sync up slow followers after a snapshot is created. type: "integer" format: "uint64" example: 500 ElectionTick: description: | The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 3 HeartbeatTick: description: | The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 1 Dispatcher: description: "Dispatcher configuration." type: "object" x-nullable: true properties: HeartbeatPeriod: description: | The delay for an agent to send a heartbeat to the dispatcher. type: "integer" format: "int64" example: 5000000000 CAConfig: description: "CA configuration." type: "object" x-nullable: true properties: NodeCertExpiry: description: "The duration node certificates are issued for." type: "integer" format: "int64" example: 7776000000000000 ExternalCAs: description: | Configuration for forwarding signing requests to an external certificate authority. type: "array" items: type: "object" properties: Protocol: description: | Protocol for communication with the external CA (currently only `cfssl` is supported). type: "string" enum: - "cfssl" default: "cfssl" URL: description: | URL where certificate signing requests should be sent. type: "string" Options: description: | An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver. type: "object" additionalProperties: type: "string" CACert: description: | The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided). type: "string" SigningCACert: description: | The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format. type: "string" SigningCAKey: description: | The desired signing CA key for all swarm node TLS leaf certificates, in PEM format. type: "string" ForceRotate: description: | An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey` format: "uint64" type: "integer" EncryptionConfig: description: "Parameters related to encryption-at-rest." type: "object" properties: AutoLockManagers: description: | If set, generate a key and use it to lock data stored on the managers. type: "boolean" example: false TaskDefaults: description: "Defaults for creating tasks in this cluster." type: "object" properties: LogDriver: description: | The log driver to use for tasks created in the orchestrator if unspecified by a service. Updating this value only affects new tasks. Existing tasks continue to use their previously configured log driver until recreated. type: "object" properties: Name: description: | The log driver to use as a default for new tasks. type: "string" example: "json-file" Options: description: | Driver-specific options for the selected log driver, specified as key/value pairs. type: "object" additionalProperties: type: "string" example: "max-file": "10" "max-size": "100m" # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but # without `JoinTokens`. ClusterInfo: description: | ClusterInfo represents information about the swarm as is returned by the "/info" endpoint. Join-tokens are not included. x-nullable: true type: "object" properties: ID: description: "The ID of the swarm." type: "string" example: "abajmipo7b4xz5ip2nrla6b11" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: description: | Date and time at which the swarm was initialised in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: description: | Date and time at which the swarm was last updated in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/SwarmSpec" TLSInfo: $ref: "#/definitions/TLSInfo" RootRotationInProgress: description: | Whether there is currently a root CA rotation in progress for the swarm type: "boolean" example: false DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. If no port is set or is set to 0, the default port (4789) is used. type: "integer" format: "uint32" default: 4789 example: 4789 DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" format: "CIDR" example: ["10.10.0.0/16", "20.20.0.0/16"] SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" maximum: 29 default: 24 example: 24 JoinTokens: description: | JoinTokens contains the tokens workers and managers need to join the swarm. type: "object" properties: Worker: description: | The token workers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" Manager: description: | The token managers can use to join the swarm. type: "string" example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" Swarm: type: "object" allOf: - $ref: "#/definitions/ClusterInfo" - type: "object" properties: JoinTokens: $ref: "#/definitions/JoinTokens" TaskSpec: description: "User modifiable task configuration." type: "object" properties: PluginSpec: type: "object" description: | Plugin spec for the service. *(Experimental release only.)* <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Name: description: "The name or 'alias' to use for the plugin." type: "string" Remote: description: "The plugin image reference to use." type: "string" Disabled: description: "Disable the plugin once scheduled." type: "boolean" PluginPrivilege: type: "array" items: $ref: "#/definitions/PluginPrivilege" ContainerSpec: type: "object" description: | Container spec for the service. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. properties: Image: description: "The image name to use for the container" type: "string" Labels: description: "User-defined key/value data." type: "object" additionalProperties: type: "string" Command: description: "The command to be run in the image." type: "array" items: type: "string" Args: description: "Arguments to the command." type: "array" items: type: "string" Hostname: description: | The hostname to use for the container, as a valid [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. type: "string" Env: description: | A list of environment variables in the form `VAR=value`. type: "array" items: type: "string" Dir: description: "The working directory for commands to run in." type: "string" User: description: "The user inside the container." type: "string" Groups: type: "array" description: | A list of additional groups that the container process will run as. items: type: "string" Privileges: type: "object" description: "Security options for the container" properties: CredentialSpec: type: "object" description: "CredentialSpec for managed service account (Windows only)" properties: Config: type: "string" example: "0bt9dmxjvjiqermk6xrop3ekq" description: | Load credential spec from a Swarm Config with the given ID. The specified config must also be present in the Configs field with the Runtime property set. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. File: type: "string" example: "spec.json" description: | Load credential spec from this file. The file is read by the daemon, and must be present in the `CredentialSpecs` subdirectory in the docker data directory, which defaults to `C:\ProgramData\Docker\` on Windows. For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | Load credential spec from this value in the Windows registry. The specified registry value must be located in: `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` <p><br /></p> > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, > and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" properties: Disable: type: "boolean" description: "Disable SELinux" User: type: "string" description: "SELinux user label" Role: type: "string" description: "SELinux role label" Type: type: "string" description: "SELinux type label" Level: type: "string" description: "SELinux level label" Seccomp: type: "object" description: "Options for configuring seccomp on the container" properties: Mode: type: "string" enum: - "default" - "unconfined" - "custom" Profile: description: "The custom seccomp profile as a json object" type: "string" AppArmor: type: "object" description: "Options for configuring AppArmor on the container" properties: Mode: type: "string" enum: - "default" - "disabled" NoNewPrivileges: type: "boolean" description: "Configuration of the no_new_privs bit in the container" TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" OpenStdin: description: "Open `stdin`" type: "boolean" ReadOnly: description: "Mount the container's root filesystem as read only." type: "boolean" Mounts: description: | Specification for mounts to be added to containers created as part of the service. type: "array" items: $ref: "#/definitions/Mount" StopSignal: description: "Signal to stop the container." type: "string" StopGracePeriod: description: | Amount of time to wait for the container to terminate before forcefully killing it. type: "integer" format: "int64" HealthCheck: $ref: "#/definitions/HealthConfig" Hosts: type: "array" description: | A list of hostname/IP mappings to add to the container's `hosts` file. The format of extra hosts is specified in the [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) man page: IP_address canonical_hostname [aliases...] items: type: "string" DNSConfig: description: | Specification for DNS related configurations in resolver configuration file (`resolv.conf`). type: "object" properties: Nameservers: description: "The IP addresses of the name servers." type: "array" items: type: "string" Search: description: "A search list for host-name lookup." type: "array" items: type: "string" Options: description: | A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.). type: "array" items: type: "string" Secrets: description: | Secrets contains references to zero or more secrets that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" SecretID: description: | SecretID represents the ID of the specific secret that we're referencing. type: "string" SecretName: description: | SecretName is the name of the secret that this references, but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" OomScoreAdj: type: "integer" format: "int64" description: | An integer value containing the score given to the container in order to tune OOM killer preferences. example: 0 Configs: description: | Configs contains references to zero or more configs that will be exposed to the service. type: "array" items: type: "object" properties: File: description: | File represents a specific target that is backed by a file. <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive type: "object" properties: Name: description: | Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." type: "string" GID: description: "GID represents the file GID." type: "string" Mode: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" Runtime: description: | Runtime represents a target that is not mounted into the container but is used by the task <p><br /><p> > **Note**: `Configs.File` and `Configs.Runtime` are mutually > exclusive type: "object" ConfigID: description: | ConfigID represents the ID of the specific config that we're referencing. type: "string" ConfigName: description: | ConfigName is the name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID. type: "string" Isolation: type: "string" description: | Isolation technology of the containers running the service. (Windows only) enum: - "default" - "process" - "hyperv" - "" Init: description: | Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used. type: "boolean" x-nullable: true Sysctls: description: | Set kernel namedspaced parameters (sysctls) in the container. The Sysctls option on services accepts the same sysctls as the are supported on containers. Note that while the same sysctls are supported, no guarantees or checks are made about their suitability for a clustered environment, and it's up to the user to determine whether a given sysctl will work properly in a Service. type: "object" additionalProperties: type: "string" # This option is not used by Windows containers CapabilityAdd: type: "array" description: | A list of kernel capabilities to add to the default set for the container. items: type: "string" example: - "CAP_NET_RAW" - "CAP_SYS_ADMIN" - "CAP_SYS_CHROOT" - "CAP_SYSLOG" CapabilityDrop: type: "array" description: | A list of kernel capabilities to drop from the default set for the container. items: type: "string" example: - "CAP_NET_RAW" Ulimits: description: | A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" type: "array" items: type: "object" properties: Name: description: "Name of ulimit" type: "string" Soft: description: "Soft limit" type: "integer" Hard: description: "Hard limit" type: "integer" NetworkAttachmentSpec: description: | Read-only spec type for non-swarm containers attached to swarm overlay networks. <p><br /></p> > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are > mutually exclusive. PluginSpec is only used when the Runtime field > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime > field is set to `attachment`. type: "object" properties: ContainerID: description: "ID of the container represented by this task" type: "string" Resources: description: | Resource requirements which apply to each individual container created as part of the service. type: "object" properties: Limits: description: "Define resources limits." $ref: "#/definitions/Limit" Reservations: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: description: | Specification for the restart policy which applies to containers created as part of this service. type: "object" properties: Condition: description: "Condition for restart." type: "string" enum: - "none" - "on-failure" - "any" Delay: description: "Delay between restart attempts." type: "integer" format: "int64" MaxAttempts: description: | Maximum attempts to restart a given container before giving up (default value is 0, which is ignored). type: "integer" format: "int64" default: 0 Window: description: | Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded). type: "integer" format: "int64" default: 0 Placement: type: "object" properties: Constraints: description: | An array of constraint expressions to limit the set of nodes where a task can be scheduled. Constraint expressions can either use a _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find nodes that satisfy every expression (AND match). Constraints can match node or Docker Engine labels as follows: node attribute | matches | example ---------------------|--------------------------------|----------------------------------------------- `node.id` | Node ID | `node.id==2ivku8v2gvtg4` `node.hostname` | Node hostname | `node.hostname!=node-2` `node.role` | Node role (`manager`/`worker`) | `node.role==manager` `node.platform.os` | Node operating system | `node.platform.os==windows` `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` `node.labels` | User-defined node labels | `node.labels.security==high` `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` `engine.labels` apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add `node.labels` for operational purposes by using the [`node update endpoint`](#operation/NodeUpdate). type: "array" items: type: "string" example: - "node.hostname!=node3.corp.example.com" - "node.role!=manager" - "node.labels.type==production" - "node.platform.os==linux" - "node.platform.arch==x86_64" Preferences: description: | Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence. type: "array" items: type: "object" properties: Spread: type: "object" properties: SpreadDescriptor: description: | label descriptor, such as `engine.labels.az`. type: "string" example: - Spread: SpreadDescriptor: "node.labels.datacenter" - Spread: SpreadDescriptor: "node.labels.rack" MaxReplicas: description: | Maximum number of replicas for per node (default value is 0, which is unlimited) type: "integer" format: "int64" default: 0 Platforms: description: | Platforms stores all the platforms that the service's image can run on. This field is used in the platform filter for scheduling. If empty, then the platform filter is off, meaning there are no scheduling restrictions. type: "array" items: $ref: "#/definitions/Platform" ForceUpdate: description: | A counter that triggers an update even if no relevant parameters have been changed. type: "integer" format: "uint64" Runtime: description: | Runtime is the type of runtime specified for the task executor. type: "string" Networks: description: "Specifies which networks the service should attach to." type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" LogDriver: description: | Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified. type: "object" properties: Name: type: "string" Options: type: "object" additionalProperties: type: "string" TaskState: type: "string" enum: - "new" - "allocated" - "pending" - "assigned" - "accepted" - "preparing" - "ready" - "starting" - "running" - "complete" - "shutdown" - "failed" - "rejected" - "remove" - "orphaned" ContainerStatus: type: "object" description: "represents the status of a container." properties: ContainerID: type: "string" PID: type: "integer" ExitCode: type: "integer" PortStatus: type: "object" description: "represents the port status of a task's host ports whose service has published host ports" properties: Ports: type: "array" items: $ref: "#/definitions/EndpointPortConfig" TaskStatus: type: "object" description: "represents the status of a task." properties: Timestamp: type: "string" format: "dateTime" State: $ref: "#/definitions/TaskState" Message: type: "string" Err: type: "string" ContainerStatus: $ref: "#/definitions/ContainerStatus" PortStatus: $ref: "#/definitions/PortStatus" Task: type: "object" properties: ID: description: "The ID of the task." type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Name: description: "Name of the task." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Spec: $ref: "#/definitions/TaskSpec" ServiceID: description: "The ID of the service this task is part of." type: "string" Slot: type: "integer" NodeID: description: "The ID of the node that this task is on." type: "string" AssignedGenericResources: $ref: "#/definitions/GenericResources" Status: $ref: "#/definitions/TaskStatus" DesiredState: $ref: "#/definitions/TaskState" JobIteration: description: | If the Service this Task belongs to is a job-mode service, contains the JobIteration of the Service this Task was created for. Absent if the Task was created for a Replicated or Global Service. $ref: "#/definitions/ObjectVersion" example: ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" AssignedGenericResources: - DiscreteResourceSpec: Kind: "SSD" Value: 3 - NamedResourceSpec: Kind: "GPU" Value: "UUID1" - NamedResourceSpec: Kind: "GPU" Value: "UUID2" ServiceSpec: description: "User modifiable configuration for a service." type: object properties: Name: description: "Name of the service." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" TaskTemplate: $ref: "#/definitions/TaskSpec" Mode: description: "Scheduling mode for the service." type: "object" properties: Replicated: type: "object" properties: Replicas: type: "integer" format: "int64" Global: type: "object" ReplicatedJob: description: | The mode used for services with a finite number of tasks that run to a completed state. type: "object" properties: MaxConcurrent: description: | The maximum number of replicas to run simultaneously. type: "integer" format: "int64" default: 1 TotalCompletions: description: | The total number of replicas desired to reach the Completed state. If unset, will default to the value of `MaxConcurrent` type: "integer" format: "int64" GlobalJob: description: | The mode used for services which run a task to the completed state on each valid node. type: "object" UpdateConfig: description: "Specification for the update strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: "Amount of time between updates, in nanoseconds." type: "integer" format: "int64" FailureAction: description: | Action to take if an updated task fails to run, or stops running during the update. type: "string" enum: - "continue" - "pause" - "rollback" Monitor: description: | Amount of time to monitor each updated task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" RollbackConfig: description: "Specification for the rollback strategy of the service." type: "object" properties: Parallelism: description: | Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism). type: "integer" format: "int64" Delay: description: | Amount of time between rollback iterations, in nanoseconds. type: "integer" format: "int64" FailureAction: description: | Action to take if an rolled back task fails to run, or stops running during the rollback. type: "string" enum: - "continue" - "pause" Monitor: description: | Amount of time to monitor each rolled back task for failures, in nanoseconds. type: "integer" format: "int64" MaxFailureRatio: description: | The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1. type: "number" default: 0 Order: description: | The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" Networks: description: | Specifies which networks the service should attach to. Deprecated: This field is deprecated since v1.44. The Networks field in TaskSpec should be used instead. type: "array" items: $ref: "#/definitions/NetworkAttachmentConfig" EndpointSpec: $ref: "#/definitions/EndpointSpec" EndpointPortConfig: type: "object" properties: Name: type: "string" Protocol: type: "string" enum: - "tcp" - "udp" - "sctp" TargetPort: description: "The port inside the container." type: "integer" PublishedPort: description: "The port on the swarm hosts." type: "integer" PublishMode: description: | The mode in which port is published. <p><br /></p> - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on the swarm node where that service is running. type: "string" enum: - "ingress" - "host" default: "ingress" example: "ingress" EndpointSpec: description: "Properties that can be configured to access and load balance a service." type: "object" properties: Mode: description: | The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" - "dnsrr" default: "vip" Ports: description: | List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used. type: "array" items: $ref: "#/definitions/EndpointPortConfig" Service: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ServiceSpec" Endpoint: type: "object" properties: Spec: $ref: "#/definitions/EndpointSpec" Ports: type: "array" items: $ref: "#/definitions/EndpointPortConfig" VirtualIPs: type: "array" items: type: "object" properties: NetworkID: type: "string" Addr: type: "string" UpdateStatus: description: "The status of a service update." type: "object" properties: State: type: "string" enum: - "updating" - "paused" - "completed" StartedAt: type: "string" format: "dateTime" CompletedAt: type: "string" format: "dateTime" Message: type: "string" ServiceStatus: description: | The status of the service's tasks. Provided only when requested as part of a ServiceList operation. type: "object" properties: RunningTasks: description: | The number of tasks for the service currently in the Running state. type: "integer" format: "uint64" example: 7 DesiredTasks: description: | The number of tasks for the service desired to be running. For replicated services, this is the replica count from the service spec. For global services, this is computed by taking count of all tasks for the service with a Desired State other than Shutdown. type: "integer" format: "uint64" example: 10 CompletedTasks: description: | The number of tasks for a job that are in the Completed state. This field must be cross-referenced with the service type, as the value of 0 may mean the service is not in a job mode, or it may mean the job-mode service has no tasks yet Completed. type: "integer" format: "uint64" JobStatus: description: | The status of the service when it is in one of ReplicatedJob or GlobalJob modes. Absent on Replicated and Global mode services. The JobIteration is an ObjectVersion, but unlike the Service's version, does not need to be sent with an update request. type: "object" properties: JobIteration: description: | JobIteration is a value increased each time a Job is executed, successfully or otherwise. "Executed", in this case, means the job as a whole has been started, not that an individual Task has been launched. A job is "Executed" when its ServiceSpec is updated. JobIteration can be used to disambiguate Tasks belonging to different executions of a job. Though JobIteration will increase with each subsequent execution, it may not necessarily increase by 1, and so JobIteration should not be used to $ref: "#/definitions/ObjectVersion" LastExecution: description: | The last time, as observed by the server, that this job was started. type: "string" format: "dateTime" example: ID: "9mnpnzenvg8p8tdbtq4wvbkcz" Version: Index: 19 CreatedAt: "2016-06-07T21:05:51.880065305Z" UpdatedAt: "2016-06-07T21:07:29.962229872Z" Spec: Name: "hopeful_cori" TaskTemplate: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Endpoint: Spec: Mode: "vip" Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 Ports: - Protocol: "tcp" TargetPort: 6379 PublishedPort: 30001 VirtualIPs: - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.2/16" - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.3/16" ImageDeleteResponseItem: type: "object" x-go-name: "DeleteResponse" properties: Untagged: description: "The image ID of an image that was untagged" type: "string" Deleted: description: "The image ID of an image that was deleted" type: "string" ServiceCreateResponse: type: "object" description: | contains the information returned to a client on the creation of a new service. properties: ID: description: "The ID of the created service." type: "string" x-nullable: false example: "ak7w3gjqoa3kuz8xcpnyy0pvl" Warnings: description: | Optional warning message. FIXME(thaJeztah): this should have "omitempty" in the generated type. type: "array" x-nullable: true items: type: "string" example: - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ServiceUpdateResponse: type: "object" properties: Warnings: description: "Optional warning messages" type: "array" items: type: "string" example: Warnings: - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerInspectResponse: type: "object" title: "ContainerInspectResponse" x-go-name: "InspectResponse" properties: Id: description: |- The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). type: "string" x-go-name: "ID" minLength: 64 maxLength: 64 pattern: "^[0-9a-fA-F]{64}$" example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" Created: description: |- Date and time at which the container was created, formatted in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" x-nullable: true example: "2025-02-17T17:43:39.64001363Z" Path: description: |- The path to the command being run type: "string" example: "/bin/sh" Args: description: "The arguments to the command being run" type: "array" items: type: "string" example: - "-c" - "exit 9" State: $ref: "#/definitions/ContainerState" Image: description: |- The ID (digest) of the image that this container was created from. type: "string" example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" ResolvConfPath: description: |- Location of the `/etc/resolv.conf` generated for the container on the host. This file is managed through the docker daemon, and should not be accessed or modified by other tools. type: "string" example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" HostnamePath: description: |- Location of the `/etc/hostname` generated for the container on the host. This file is managed through the docker daemon, and should not be accessed or modified by other tools. type: "string" example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" HostsPath: description: |- Location of the `/etc/hosts` generated for the container on the host. This file is managed through the docker daemon, and should not be accessed or modified by other tools. type: "string" example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" LogPath: description: |- Location of the file used to buffer the container's logs. Depending on the logging-driver used for the container, this field may be omitted. This file is managed through the docker daemon, and should not be accessed or modified by other tools. type: "string" x-nullable: true example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" Name: description: |- The name associated with this container. For historic reasons, the name may be prefixed with a forward-slash (`/`). type: "string" example: "/funny_chatelet" RestartCount: description: |- Number of times the container was restarted since it was created, or since daemon was started. type: "integer" example: 0 Driver: description: |- The storage-driver used for the container's filesystem (graph-driver or snapshotter). type: "string" example: "overlayfs" Platform: description: |- The platform (operating system) for which the container was created. This field was introduced for the experimental "LCOW" (Linux Containers On Windows) features, which has been removed. In most cases, this field is equal to the host's operating system (`linux` or `windows`). type: "string" example: "linux" ImageManifestDescriptor: $ref: "#/definitions/OCIDescriptor" description: |- OCI descriptor of the platform-specific manifest of the image the container was created from. Note: Only available if the daemon provides a multi-platform image store. MountLabel: description: |- SELinux mount label set for the container. type: "string" example: "" ProcessLabel: description: |- SELinux process label set for the container. type: "string" example: "" AppArmorProfile: description: |- The AppArmor profile set for the container. type: "string" example: "" ExecIDs: description: |- IDs of exec instances that are running in the container. type: "array" items: type: "string" x-nullable: true example: - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" HostConfig: $ref: "#/definitions/HostConfig" GraphDriver: $ref: "#/definitions/DriverData" SizeRw: description: |- The size of files that have been created or changed by this container. This field is omitted by default, and only set when size is requested in the API request. type: "integer" format: "int64" x-nullable: true example: "122880" SizeRootFs: description: |- The total size of all files in the read-only layers from the image that the container uses. These layers can be shared between containers. This field is omitted by default, and only set when size is requested in the API request. type: "integer" format: "int64" x-nullable: true example: "1653948416" Mounts: description: |- List of mounts used by the container. type: "array" items: $ref: "#/definitions/MountPoint" Config: $ref: "#/definitions/ContainerConfig" NetworkSettings: $ref: "#/definitions/NetworkSettings" ContainerSummary: type: "object" properties: Id: description: |- The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). type: "string" x-go-name: "ID" minLength: 64 maxLength: 64 pattern: "^[0-9a-fA-F]{64}$" example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" Names: description: |- The names associated with this container. Most containers have a single name, but when using legacy "links", the container can have multiple names. For historic reasons, names are prefixed with a forward-slash (`/`). type: "array" items: type: "string" example: - "/funny_chatelet" Image: description: |- The name or ID of the image used to create the container. This field shows the image reference as was specified when creating the container, which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). The content of this field can be updated at runtime if the image used to create the container is untagged, in which case the field is updated to contain the the image ID (digest) it was resolved to in its canonical, non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). type: "string" example: "docker.io/library/ubuntu:latest" ImageID: description: |- The ID (digest) of the image that this container was created from. type: "string" example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" ImageManifestDescriptor: $ref: "#/definitions/OCIDescriptor" x-nullable: true description: | OCI descriptor of the platform-specific manifest of the image the container was created from. Note: Only available if the daemon provides a multi-platform image store. This field is not populated in the `GET /system/df` endpoint. Command: description: "Command to run when starting the container" type: "string" example: "/bin/bash" Created: description: |- Date and time at which the container was created as a Unix timestamp (number of seconds since EPOCH). type: "integer" format: "int64" example: "1739811096" Ports: description: |- Port-mappings for the container. type: "array" items: $ref: "#/definitions/Port" SizeRw: description: |- The size of files that have been created or changed by this container. This field is omitted by default, and only set when size is requested in the API request. type: "integer" format: "int64" x-nullable: true example: "122880" SizeRootFs: description: |- The total size of all files in the read-only layers from the image that the container uses. These layers can be shared between containers. This field is omitted by default, and only set when size is requested in the API request. type: "integer" format: "int64" x-nullable: true example: "1653948416" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" State: description: | The state of this container. type: "string" enum: - "created" - "running" - "paused" - "restarting" - "exited" - "removing" - "dead" example: "running" Status: description: |- Additional human-readable status of this container (e.g. `Exit 0`) type: "string" example: "Up 4 days" HostConfig: type: "object" description: |- Summary of host-specific runtime information of the container. This is a reduced set of information in the container's "HostConfig" as available in the container "inspect" response. properties: NetworkMode: description: |- Networking mode (`host`, `none`, `container:<id>`) or name of the primary network the container is using. This field is primarily for backward compatibility. The container can be connected to multiple networks for which information can be found in the `NetworkSettings.Networks` field, which enumerates settings per network. type: "string" example: "mynetwork" Annotations: description: |- Arbitrary key-value metadata attached to the container. type: "object" x-nullable: true additionalProperties: type: "string" example: io.kubernetes.docker.type: "container" io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" NetworkSettings: description: |- Summary of the container's network settings type: "object" properties: Networks: type: "object" description: |- Summary of network-settings for each network the container is attached to. additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" description: |- List of mounts used by the container. items: $ref: "#/definitions/MountPoint" Driver: description: "Driver represents a driver (network, logging, secrets)." type: "object" required: [Name] properties: Name: description: "Name of the driver." type: "string" x-nullable: false example: "some-driver" Options: description: "Key/value map of driver-specific options." type: "object" x-nullable: false additionalProperties: type: "string" example: OptionA: "value for driver-specific option A" OptionB: "value for driver-specific option B" SecretSpec: type: "object" properties: Name: description: "User-defined name of the secret." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" Data: description: | Data is the data to store as a secret, formatted as a standard base64-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. It must be empty if the Driver field is set, in which case the data is loaded from an external secret store. The maximum allowed size is 500KB, as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. type: "string" example: "" Driver: description: | Name of the secrets driver used to fetch the secret's value from an external secret store. $ref: "#/definitions/Driver" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Secret: type: "object" properties: ID: type: "string" example: "blt1owaxmitz71s9v5zh81zun" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" UpdatedAt: type: "string" format: "dateTime" example: "2017-07-20T13:55:28.678958722Z" Spec: $ref: "#/definitions/SecretSpec" ConfigSpec: type: "object" properties: Name: description: "User-defined name of the config." type: "string" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" Data: description: | Data is the data to store as a config, formatted as a standard base64-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-4)) string. The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | Templating driver, if applicable Templating controls whether and how to evaluate the config payload as a template. If no driver is set, no templating is used. $ref: "#/definitions/Driver" Config: type: "object" properties: ID: type: "string" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ConfigSpec" ContainerState: description: | ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" x-nullable: true properties: Status: description: | String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead". type: "string" enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] example: "running" Running: description: | Whether this container is running. Note that a running container can be _paused_. The `Running` and `Paused` booleans are not mutually exclusive: When pausing a container (on Linux), the freezer cgroup is used to suspend all processes in the container. Freezing the process requires the process to be running. As a result, paused containers are both `Running` _and_ `Paused`. Use the `Status` field instead to determine if a container's state is "running". type: "boolean" example: true Paused: description: "Whether this container is paused." type: "boolean" example: false Restarting: description: "Whether this container is restarting." type: "boolean" example: false OOMKilled: description: | Whether a process within this container has been killed because it ran out of memory since the container was last started. type: "boolean" example: false Dead: type: "boolean" example: false Pid: description: "The process ID of this container" type: "integer" example: 1234 ExitCode: description: "The last exit code of this container" type: "integer" example: 0 Error: type: "string" StartedAt: description: "The time when this container was last started." type: "string" example: "2020-01-06T09:06:59.461876391Z" FinishedAt: description: "The time when this container last exited." type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: $ref: "#/definitions/Health" ContainerCreateResponse: description: "OK response to ContainerCreate operation" type: "object" title: "ContainerCreateResponse" x-go-name: "CreateResponse" required: [Id, Warnings] properties: Id: description: "The ID of the created container" type: "string" x-nullable: false example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" Warnings: description: "Warnings encountered when creating the container" type: "array" x-nullable: false items: type: "string" example: [] ContainerUpdateResponse: type: "object" title: "ContainerUpdateResponse" x-go-name: "UpdateResponse" description: |- Response for a successful container-update. properties: Warnings: type: "array" description: |- Warnings encountered when updating the container. items: type: "string" example: ["Published ports are discarded when using host network mode"] ContainerStatsResponse: description: | Statistics sample for a container. type: "object" x-go-name: "StatsResponse" title: "ContainerStatsResponse" properties: name: description: "Name of the container" type: "string" x-nullable: true example: "boring_wozniak" id: description: "ID of the container" type: "string" x-nullable: true example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" read: description: | Date and time at which this sample was collected. The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) with nano-seconds. type: "string" format: "date-time" example: "2025-01-16T13:55:22.165243637Z" preread: description: | Date and time at which this first sample was collected. This field is not propagated if the "one-shot" option is set. If the "one-shot" option is set, this field may be omitted, empty, or set to a default date (`0001-01-01T00:00:00Z`). The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) with nano-seconds. type: "string" format: "date-time" example: "2025-01-16T13:55:21.160452595Z" pids_stats: $ref: "#/definitions/ContainerPidsStats" blkio_stats: $ref: "#/definitions/ContainerBlkioStats" num_procs: description: | The number of processors on the system. This field is Windows-specific and always zero for Linux containers. type: "integer" format: "uint32" example: 16 storage_stats: $ref: "#/definitions/ContainerStorageStats" cpu_stats: $ref: "#/definitions/ContainerCPUStats" precpu_stats: $ref: "#/definitions/ContainerCPUStats" memory_stats: $ref: "#/definitions/ContainerMemoryStats" networks: description: | Network statistics for the container per interface. This field is omitted if the container has no networking enabled. x-nullable: true additionalProperties: $ref: "#/definitions/ContainerNetworkStats" example: eth0: rx_bytes: 5338 rx_dropped: 0 rx_errors: 0 rx_packets: 36 tx_bytes: 648 tx_dropped: 0 tx_errors: 0 tx_packets: 8 eth5: rx_bytes: 4641 rx_dropped: 0 rx_errors: 0 rx_packets: 26 tx_bytes: 690 tx_dropped: 0 tx_errors: 0 tx_packets: 9 ContainerBlkioStats: description: | BlkioStats stores all IO service stats for data read and write. This type is Linux-specific and holds many fields that are specific to cgroups v1. On a cgroup v2 host, all fields other than `io_service_bytes_recursive` are omitted or `null`. This type is only populated on Linux and omitted for Windows containers. type: "object" x-go-name: "BlkioStats" x-nullable: true properties: io_service_bytes_recursive: type: "array" items: $ref: "#/definitions/ContainerBlkioStatEntry" io_serviced_recursive: description: | This field is only available when using Linux containers with cgroups v1. It is omitted or `null` when using cgroups v2. x-nullable: true type: "array" items: $ref: "#/definitions/ContainerBlkioStatEntry" io_queue_recursive: description: | This field is only available when using Linux containers with cgroups v1. It is omitted or `null` when using cgroups v2. x-nullable: true type: "array" items: $ref: "#/definitions/ContainerBlkioStatEntry" io_service_time_recursive: description: | This field is only available when using Linux containers with cgroups v1. It is omitted or `null` when using cgroups v2. x-nullable: true type: "array" items: $ref: "#/definitions/ContainerBlkioStatEntry" io_wait_time_recursive: description: | This field is only available when using Linux containers with cgroups v1. It is omitted or `null` when using cgroups v2. x-nullable: true type: "array" items: $ref: "#/definitions/ContainerBlkioStatEntry" io_merged_recursive: description: | This field is only available when using Linux containers with cgroups v1. It is omitted or `null` when using cgroups v2. x-nullable: true type: "array" items: $ref: "#/definitions/ContainerBlkioStatEntry" io_time_recursive: description: | This field is only available when using Linux containers with cgroups v1. It is omitted or `null` when using cgroups v2. x-nullable: true type: "array" items: $ref: "#/definitions/ContainerBlkioStatEntry" sectors_recursive: description: | This field is only available when using Linux containers with cgroups v1. It is omitted or `null` when using cgroups v2. x-nullable: true type: "array" items: $ref: "#/definitions/ContainerBlkioStatEntry" example: io_service_bytes_recursive: [ {"major": 254, "minor": 0, "op": "read", "value": 7593984}, {"major": 254, "minor": 0, "op": "write", "value": 100} ] io_serviced_recursive: null io_queue_recursive: null io_service_time_recursive: null io_wait_time_recursive: null io_merged_recursive: null io_time_recursive: null sectors_recursive: null ContainerBlkioStatEntry: description: | Blkio stats entry. This type is Linux-specific and omitted for Windows containers. type: "object" x-go-name: "BlkioStatEntry" x-nullable: true properties: major: type: "integer" format: "uint64" example: 254 minor: type: "integer" format: "uint64" example: 0 op: type: "string" example: "read" value: type: "integer" format: "uint64" example: 7593984 ContainerCPUStats: description: | CPU related info of the container type: "object" x-go-name: "CPUStats" x-nullable: true properties: cpu_usage: $ref: "#/definitions/ContainerCPUUsage" system_cpu_usage: description: | System Usage. This field is Linux-specific and omitted for Windows containers. type: "integer" format: "uint64" x-nullable: true example: 5 online_cpus: description: | Number of online CPUs. This field is Linux-specific and omitted for Windows containers. type: "integer" format: "uint32" x-nullable: true example: 5 throttling_data: $ref: "#/definitions/ContainerThrottlingData" ContainerCPUUsage: description: | All CPU stats aggregated since container inception. type: "object" x-go-name: "CPUUsage" x-nullable: true properties: total_usage: description: | Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). type: "integer" format: "uint64" example: 29912000 percpu_usage: description: | Total CPU time (in nanoseconds) consumed per core (Linux). This field is Linux-specific when using cgroups v1. It is omitted when using cgroups v2 and Windows containers. type: "array" x-nullable: true items: type: "integer" format: "uint64" example: 29912000 usage_in_kernelmode: description: | Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux), or time spent (in 100's of nanoseconds) by all container processes in kernel mode (Windows). Not populated for Windows containers using Hyper-V isolation. type: "integer" format: "uint64" example: 21994000 usage_in_usermode: description: | Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux), or time spent (in 100's of nanoseconds) by all container processes in kernel mode (Windows). Not populated for Windows containers using Hyper-V isolation. type: "integer" format: "uint64" example: 7918000 ContainerPidsStats: description: | PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). This type is Linux-specific and omitted for Windows containers. type: "object" x-go-name: "PidsStats" x-nullable: true properties: current: description: | Current is the number of PIDs in the cgroup. type: "integer" format: "uint64" x-nullable: true example: 5 limit: description: | Limit is the hard limit on the number of pids in the cgroup. A "Limit" of 0 means that there is no limit. type: "integer" format: "uint64" x-nullable: true example: "18446744073709551615" ContainerThrottlingData: description: | CPU throttling stats of the container. This type is Linux-specific and omitted for Windows containers. type: "object" x-go-name: "ThrottlingData" x-nullable: true properties: periods: description: | Number of periods with throttling active. type: "integer" format: "uint64" example: 0 throttled_periods: description: | Number of periods when the container hit its throttling limit. type: "integer" format: "uint64" example: 0 throttled_time: description: | Aggregated time (in nanoseconds) the container was throttled for. type: "integer" format: "uint64" example: 0 ContainerMemoryStats: description: | Aggregates all memory stats since container inception on Linux. Windows returns stats for commit and private working set only. type: "object" x-go-name: "MemoryStats" properties: usage: description: | Current `res_counter` usage for memory. This field is Linux-specific and omitted for Windows containers. type: "integer" format: "uint64" x-nullable: true example: 0 max_usage: description: | Maximum usage ever recorded. This field is Linux-specific and only supported on cgroups v1. It is omitted when using cgroups v2 and for Windows containers. type: "integer" format: "uint64" x-nullable: true example: 0 stats: description: | All the stats exported via memory.stat. The fields in this object differ between cgroups v1 and v2. On cgroups v1, fields such as `cache`, `rss`, `mapped_file` are available. On cgroups v2, fields such as `file`, `anon`, `inactive_file` are available. This field is Linux-specific and omitted for Windows containers. type: "object" additionalProperties: type: "integer" format: "uint64" x-nullable: true example: { "active_anon": 1572864, "active_file": 5115904, "anon": 1572864, "anon_thp": 0, "file": 7626752, "file_dirty": 0, "file_mapped": 2723840, "file_writeback": 0, "inactive_anon": 0, "inactive_file": 2510848, "kernel_stack": 16384, "pgactivate": 0, "pgdeactivate": 0, "pgfault": 2042, "pglazyfree": 0, "pglazyfreed": 0, "pgmajfault": 45, "pgrefill": 0, "pgscan": 0, "pgsteal": 0, "shmem": 0, "slab": 1180928, "slab_reclaimable": 725576, "slab_unreclaimable": 455352, "sock": 0, "thp_collapse_alloc": 0, "thp_fault_alloc": 1, "unevictable": 0, "workingset_activate": 0, "workingset_nodereclaim": 0, "workingset_refault": 0 } failcnt: description: | Number of times memory usage hits limits. This field is Linux-specific and only supported on cgroups v1. It is omitted when using cgroups v2 and for Windows containers. type: "integer" format: "uint64" x-nullable: true example: 0 limit: description: | This field is Linux-specific and omitted for Windows containers. type: "integer" format: "uint64" x-nullable: true example: 8217579520 commitbytes: description: | Committed bytes. This field is Windows-specific and omitted for Linux containers. type: "integer" format: "uint64" x-nullable: true example: 0 commitpeakbytes: description: | Peak committed bytes. This field is Windows-specific and omitted for Linux containers. type: "integer" format: "uint64" x-nullable: true example: 0 privateworkingset: description: | Private working set. This field is Windows-specific and omitted for Linux containers. type: "integer" format: "uint64" x-nullable: true example: 0 ContainerNetworkStats: description: | Aggregates the network stats of one container type: "object" x-go-name: "NetworkStats" x-nullable: true properties: rx_bytes: description: | Bytes received. Windows and Linux. type: "integer" format: "uint64" example: 5338 rx_packets: description: | Packets received. Windows and Linux. type: "integer" format: "uint64" example: 36 rx_errors: description: | Received errors. Not used on Windows. This field is Linux-specific and always zero for Windows containers. type: "integer" format: "uint64" example: 0 rx_dropped: description: | Incoming packets dropped. Windows and Linux. type: "integer" format: "uint64" example: 0 tx_bytes: description: | Bytes sent. Windows and Linux. type: "integer" format: "uint64" example: 1200 tx_packets: description: | Packets sent. Windows and Linux. type: "integer" format: "uint64" example: 12 tx_errors: description: | Sent errors. Not used on Windows. This field is Linux-specific and always zero for Windows containers. type: "integer" format: "uint64" example: 0 tx_dropped: description: | Outgoing packets dropped. Windows and Linux. type: "integer" format: "uint64" example: 0 endpoint_id: description: | Endpoint ID. Not used on Linux. This field is Windows-specific and omitted for Linux containers. type: "string" x-nullable: true instance_id: description: | Instance ID. Not used on Linux. This field is Windows-specific and omitted for Linux containers. type: "string" x-nullable: true ContainerStorageStats: description: | StorageStats is the disk I/O stats for read/write on Windows. This type is Windows-specific and omitted for Linux containers. type: "object" x-go-name: "StorageStats" x-nullable: true properties: read_count_normalized: type: "integer" format: "uint64" x-nullable: true example: 7593984 read_size_bytes: type: "integer" format: "uint64" x-nullable: true example: 7593984 write_count_normalized: type: "integer" format: "uint64" x-nullable: true example: 7593984 write_size_bytes: type: "integer" format: "uint64" x-nullable: true example: 7593984 ContainerTopResponse: type: "object" x-go-name: "TopResponse" title: "ContainerTopResponse" description: |- Container "top" response. properties: Titles: description: "The ps column titles" type: "array" items: type: "string" example: Titles: - "UID" - "PID" - "PPID" - "C" - "STIME" - "TTY" - "TIME" - "CMD" Processes: description: |- Each process running in the container, where each process is an array of values corresponding to the titles. type: "array" items: type: "array" items: type: "string" example: Processes: - - "root" - "13642" - "882" - "0" - "17:03" - "pts/0" - "00:00:00" - "/bin/bash" - - "root" - "13735" - "13642" - "0" - "17:06" - "pts/0" - "00:00:00" - "sleep 10" ContainerWaitResponse: description: "OK response to ContainerWait operation" type: "object" x-go-name: "WaitResponse" title: "ContainerWaitResponse" required: [StatusCode] properties: StatusCode: description: "Exit code of the container" type: "integer" format: "int64" x-nullable: false Error: $ref: "#/definitions/ContainerWaitExitError" ContainerWaitExitError: description: "container waiting error, if any" type: "object" x-go-name: "WaitExitError" properties: Message: description: "Details of an error" type: "string" SystemVersion: type: "object" description: | Response of Engine API: GET "/version" properties: Platform: type: "object" required: [Name] properties: Name: type: "string" Components: type: "array" description: | Information about system components items: type: "object" x-go-name: ComponentVersion required: [Name, Version] properties: Name: description: | Name of the component type: "string" example: "Engine" Version: description: | Version of the component type: "string" x-nullable: false example: "27.0.1" Details: description: | Key/value pairs of strings with additional information about the component. These values are intended for informational purposes only, and their content is not defined, and not part of the API specification. These messages can be printed by the client as information to the user. type: "object" x-nullable: true Version: description: "The version of the daemon" type: "string" example: "27.0.1" ApiVersion: description: | The default (and highest) API version that is supported by the daemon type: "string" example: "1.47" MinAPIVersion: description: | The minimum API version that is supported by the daemon type: "string" example: "1.24" GitCommit: description: | The Git commit of the source code that was used to build the daemon type: "string" example: "48a66213fe" GoVersion: description: | The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" example: "go1.22.7" Os: description: | The operating system that the daemon is running on ("linux" or "windows") type: "string" example: "linux" Arch: description: | Architecture of the daemon, as returned by the Go runtime (`GOARCH`). A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "amd64" KernelVersion: description: | The kernel version (`uname -r`) that the daemon is running on. This field is omitted when empty. type: "string" example: "6.8.0-31-generic" Experimental: description: | Indicates if the daemon is started with experimental features enabled. This field is omitted when empty / false. type: "boolean" example: true BuildTime: description: | The date and time that the daemon was compiled. type: "string" example: "2020-06-22T15:49:27.000000000+00:00" SystemInfo: type: "object" properties: ID: description: | Unique identifier of the daemon. <p><br /></p> > **Note**: The format of the ID itself is not part of the API, and > should not be considered stable. type: "string" example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" Containers: description: "Total number of containers on the host." type: "integer" example: 14 ContainersRunning: description: | Number of containers with status `"running"`. type: "integer" example: 3 ContainersPaused: description: | Number of containers with status `"paused"`. type: "integer" example: 1 ContainersStopped: description: | Number of containers with status `"stopped"`. type: "integer" example: 10 Images: description: | Total number of images on the host. Both _tagged_ and _untagged_ (dangling) images are counted. type: "integer" example: 508 Driver: description: "Name of the storage driver in use." type: "string" example: "overlay2" DriverStatus: description: | Information specific to the storage driver, provided as "label" / "value" pairs. This information is provided by the storage driver, and formatted in a way consistent with the output of `docker info` on the command line. <p><br /></p> > **Note**: The information returned in this field, including the > formatting of values and labels, should not be considered stable, > and may change without notice. type: "array" items: type: "array" items: type: "string" example: - ["Backing Filesystem", "extfs"] - ["Supports d_type", "true"] - ["Native Overlay Diff", "true"] DockerRootDir: description: | Root directory of persistent Docker state. Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` on Windows. type: "string" example: "/var/lib/docker" Plugins: $ref: "#/definitions/PluginsInfo" MemoryLimit: description: "Indicates if the host has memory limit support enabled." type: "boolean" example: true SwapLimit: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true KernelMemoryTCP: description: | Indicates if the host has kernel memory TCP limit support enabled. This field is omitted if not supported. Kernel memory TCP limits are not supported when using cgroups v2, which does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. type: "boolean" example: true CpuCfsPeriod: description: | Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host. type: "boolean" example: true CpuCfsQuota: description: | Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host. type: "boolean" example: true CPUShares: description: | Indicates if CPU Shares limiting is supported by the host. type: "boolean" example: true CPUSet: description: | Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) type: "boolean" example: true PidsLimit: description: "Indicates if the host kernel has PID limit support enabled." type: "boolean" example: true OomKillDisable: description: "Indicates if OOM killer disable is supported on the host." type: "boolean" IPv4Forwarding: description: "Indicates IPv4 forwarding is enabled." type: "boolean" example: true BridgeNfIptables: description: | Indicates if `bridge-nf-call-iptables` is available on the host when the daemon was started. <p><br /></p> > **Deprecated**: netfilter module is now loaded on-demand and no longer > during daemon startup, making this field obsolete. This field is always > `false` and will be removed in a API v1.50. type: "boolean" example: false BridgeNfIp6tables: description: | Indicates if `bridge-nf-call-ip6tables` is available on the host. <p><br /></p> > **Deprecated**: netfilter module is now loaded on-demand, and no longer > during daemon startup, making this field obsolete. This field is always > `false` and will be removed in a API v1.50. type: "boolean" example: false Debug: description: | Indicates if the daemon is running in debug-mode / with debug-level logging enabled. type: "boolean" example: true NFd: description: | The total number of file Descriptors in use by the daemon process. This information is only returned if debug-mode is enabled. type: "integer" example: 64 NGoroutines: description: | The number of goroutines that currently exist. This information is only returned if debug-mode is enabled. type: "integer" example: 174 SystemTime: description: | Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" example: "2017-08-08T20:28:29.06202363Z" LoggingDriver: description: | The logging driver to use as a default for new containers. type: "string" CgroupDriver: description: | The driver to use for managing cgroups. type: "string" enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" CgroupVersion: description: | The version of the cgroup. type: "string" enum: ["1", "2"] default: "1" example: "1" NEventsListener: description: "Number of event listeners subscribed." type: "integer" example: 30 KernelVersion: description: | Kernel version of the host. On Linux, this information obtained from `uname`. On Windows this information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd> registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. type: "string" example: "6.8.0-31-generic" OperatingSystem: description: | Name of the host's operating system, for example: "Ubuntu 24.04 LTS" or "Windows Server 2016 Datacenter" type: "string" example: "Ubuntu 24.04 LTS" OSVersion: description: | Version of the host's operating system <p><br /></p> > **Note**: The information returned in this field, including its > very existence, and the formatting of values, should not be considered > stable, and may change without notice. type: "string" example: "24.04" OSType: description: | Generic type of the operating system of the host, as returned by the Go runtime (`GOOS`). Currently returned values are "linux" and "windows". A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "linux" Architecture: description: | Hardware architecture of the host, as returned by the operating system. This is equivalent to the output of `uname -m` on Linux. Unlike `Arch` (from `/version`), this reports the machine's native architecture, which can differ from the Go runtime architecture when running a binary compiled for a different architecture (for example, a 32-bit binary running on 64-bit hardware). type: "string" example: "x86_64" NCPU: description: | The number of logical CPUs usable by the daemon. The number of available CPUs is checked by querying the operating system when the daemon starts. Changes to operating system CPU allocation after the daemon is started are not reflected. type: "integer" example: 4 MemTotal: description: | Total amount of physical memory available on the host, in bytes. type: "integer" format: "int64" example: 2095882240 IndexServerAddress: description: | Address / URL of the index server that is used for image search, and as a default for user authentication for Docker Hub and Docker Cloud. default: "https://index.docker.io/v1/" type: "string" example: "https://index.docker.io/v1/" RegistryConfig: $ref: "#/definitions/RegistryServiceConfig" GenericResources: $ref: "#/definitions/GenericResources" HttpProxy: description: | HTTP-proxy configured for the daemon. This value is obtained from the [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" HttpsProxy: description: | HTTPS-proxy configured for the daemon. This value is obtained from the [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL are masked in the API response. Containers do not automatically inherit this configuration. type: "string" example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" NoProxy: description: | Comma-separated list of domain extensions for which no proxy should be used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. Containers do not automatically inherit this configuration. type: "string" example: "*.local, 169.254/16" Name: description: "Hostname of the host." type: "string" example: "node5.corp.example.com" Labels: description: | User-defined labels (key/value metadata) as set on the daemon. <p><br /></p> > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, > set through the daemon configuration, and _node_ labels, set from a > manager node in the Swarm. Node labels are not included in this > field. Node labels can be retrieved using the `/nodes/(id)` endpoint > on a manager node in the Swarm. type: "array" items: type: "string" example: ["storage=ssd", "production"] ExperimentalBuild: description: | Indicates if experimental features are enabled on the daemon. type: "boolean" example: true ServerVersion: description: | Version string of the daemon. type: "string" example: "27.0.1" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) runtimes configured on the daemon. Keys hold the "name" used to reference the runtime. The Docker daemon relies on an OCI compliant runtime (invoked via the `containerd` daemon) as its interface to the Linux kernel namespaces, cgroups, and SELinux. The default runtime is `runc`, and automatically configured. Additional runtimes can be configured by the user and will be listed here. type: "object" additionalProperties: $ref: "#/definitions/Runtime" default: runc: path: "runc" example: runc: path: "runc" runc-master: path: "/go/bin/runc" custom: path: "/usr/local/bin/my-oci-runtime" runtimeArgs: ["--debug", "--systemd-cgroup=false"] DefaultRuntime: description: | Name of the default OCI runtime that is used when starting containers. The default can be overridden per-container at create time. type: "string" default: "runc" example: "runc" Swarm: $ref: "#/definitions/SwarmInfo" LiveRestoreEnabled: description: | Indicates if live restore is enabled. If enabled, containers are kept running when the daemon is shutdown or upon daemon start if running containers are detected. type: "boolean" default: false example: false Isolation: description: | Represents the isolation technology to use as a default for containers. The supported values are platform-specific. If no isolation value is specified on daemon start, on Windows client, the default is `hyperv`, and on Windows server, the default is `process`. This option is currently not used on other platforms. default: "default" type: "string" enum: - "default" - "hyperv" - "process" - "" InitBinary: description: | Name and, optional, path of the `docker-init` binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "docker-init" ContainerdCommit: $ref: "#/definitions/Commit" RuncCommit: $ref: "#/definitions/Commit" InitCommit: $ref: "#/definitions/Commit" SecurityOptions: description: | List of security features that are enabled on the daemon, such as apparmor, seccomp, SELinux, user-namespaces (userns), rootless and no-new-privileges. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value pairs. type: "array" items: type: "string" example: - "name=apparmor" - "name=seccomp,profile=default" - "name=selinux" - "name=userns" - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. If a commercial license has been applied to the daemon, information such as number of nodes, and expiration are included. type: "string" example: "Community Engine" DefaultAddressPools: description: | List of custom default address pools for local networks, which can be specified in the daemon.json file or dockerd option. Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 10.10.[0-255].0/24 address pools. type: "array" items: type: "object" properties: Base: description: "The network address in CIDR format" type: "string" example: "10.10.0.0/16" Size: description: "The network pool size" type: "integer" example: "24" Warnings: description: | List of warnings / informational messages about missing features, or issues related to the daemon configuration. These messages can be printed by the client as information to the user. type: "array" items: type: "string" example: - "WARNING: No memory limit support" CDISpecDirs: description: | List of directories where (Container Device Interface) CDI specifications are located. These specifications define vendor-specific modifications to an OCI runtime specification for a container being created. An empty list indicates that CDI device injection is disabled. Note that since using CDI device injection requires the daemon to have experimental enabled. For non-experimental daemons an empty list will always be returned. type: "array" items: type: "string" example: - "/etc/cdi" - "/var/run/cdi" Containerd: $ref: "#/definitions/ContainerdInfo" ContainerdInfo: description: | Information for connecting to the containerd instance that is used by the daemon. This is included for debugging purposes only. type: "object" x-nullable: true properties: Address: description: "The address of the containerd socket." type: "string" example: "/run/containerd/containerd.sock" Namespaces: description: | The namespaces that the daemon uses for running containers and plugins in containerd. These namespaces can be configured in the daemon configuration, and are considered to be used exclusively by the daemon, Tampering with the containerd instance may cause unexpected behavior. As these namespaces are considered to be exclusively accessed by the daemon, it is not recommended to change these values, or to change them to a value that is used by other systems, such as cri-containerd. type: "object" properties: Containers: description: | The default containerd namespace used for containers managed by the daemon. The default namespace for containers is "moby", but will be suffixed with the `<uid>.<gid>` of the remapped `root` if user-namespaces are enabled and the containerd image-store is used. type: "string" default: "moby" example: "moby" Plugins: description: | The default containerd namespace used for plugins managed by the daemon. The default namespace for plugins is "plugins.moby", but will be suffixed with the `<uid>.<gid>` of the remapped `root` if user-namespaces are enabled and the containerd image-store is used. type: "string" default: "plugins.moby" example: "plugins.moby" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: description: | Available plugins per type. <p><br /></p> > **Note**: Only unmanaged (V1) plugins are included in this list. > V1 plugins are "lazily" loaded, and are not returned in this list > if there is no resource using the plugin. type: "object" properties: Volume: description: "Names of available volume-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["local"] Network: description: "Names of available network-drivers, and network-driver plugins." type: "array" items: type: "string" example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] Authorization: description: "Names of available authorization plugins." type: "array" items: type: "string" example: ["img-authz-plugin", "hbm"] Log: description: "Names of available logging-drivers, and logging-driver plugins." type: "array" items: type: "string" example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "splunk", "syslog"] RegistryServiceConfig: description: | RegistryServiceConfig stores daemon registry services configuration. type: "object" x-nullable: true properties: AllowNondistributableArtifactsCIDRs: description: | List of IP ranges to which nondistributable artifacts can be pushed, using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). <p><br /></p> > **Deprecated**: Pushing nondistributable artifacts is now always enabled > and this field is always `null`. This field will be removed in a API v1.49. type: "array" items: type: "string" example: [] AllowNondistributableArtifactsHostnames: description: | List of registry hostnames to which nondistributable artifacts can be pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`. <p><br /></p> > **Deprecated**: Pushing nondistributable artifacts is now always enabled > and this field is always `null`. This field will be removed in a API v1.49. type: "array" items: type: "string" example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. By default, local registries (`::1/128` and `127.0.0.0/8`) are configured as insecure. All other registries are secure. Communicating with an insecure registry is not possible if the daemon assumes that registry is secure. This configuration override this behavior, insecure communication with registries whose resolved IP address is within the subnet described by the CIDR syntax. Registries can also be marked insecure by hostname. Those registries are listed under `IndexConfigs` and have their `Secure` field set to `false`. > **Warning**: Using this option can be useful when running a local > registry, but introduces security vulnerabilities. This option > should therefore ONLY be used for testing purposes. For increased > security, users should add their CA to their system's list of trusted > CAs instead of enabling this option. type: "array" items: type: "string" example: ["::1/128", "127.0.0.0/8"] IndexConfigs: type: "object" additionalProperties: $ref: "#/definitions/IndexInfo" example: "127.0.0.1:5000": "Name": "127.0.0.1:5000" "Mirrors": [] "Secure": false "Official": false "[2001:db8:a0b:12f0::1]:80": "Name": "[2001:db8:a0b:12f0::1]:80" "Mirrors": [] "Secure": false "Official": false "docker.io": Name: "docker.io" Mirrors: ["https://hub-mirror.corp.example.com:5000/"] Secure: true Official: true "registry.internal.corp.example.com:3000": Name: "registry.internal.corp.example.com:3000" Mirrors: [] Secure: false Official: false Mirrors: description: | List of registry URLs that act as a mirror for the official (`docker.io`) registry. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://[2001:db8:a0b:12f0::1]/" IndexInfo: description: IndexInfo contains information about a registry. type: "object" x-nullable: true properties: Name: description: | Name of the registry, such as "docker.io". type: "string" example: "docker.io" Mirrors: description: | List of mirrors, expressed as URIs. type: "array" items: type: "string" example: - "https://hub-mirror.corp.example.com:5000/" - "https://registry-2.docker.io/" - "https://registry-3.docker.io/" Secure: description: | Indicates if the registry is part of the list of insecure registries. If `false`, the registry is insecure. Insecure registries accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from unknown CAs) communication. > **Warning**: Insecure registries can be useful when running a local > registry. However, because its use creates security vulnerabilities > it should ONLY be enabled for testing purposes. For increased > security, users should add their CA to their system's list of > trusted CAs instead of enabling this option. type: "boolean" example: true Official: description: | Indicates whether this is an official registry (i.e., Docker Hub / docker.io) type: "boolean" example: true Runtime: description: | Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) runtime. The runtime is invoked by the daemon via the `containerd` daemon. OCI runtimes act as an interface to the Linux kernel namespaces, cgroups, and SELinux. type: "object" properties: path: description: | Name and, optional, path, of the OCI executable binary. If the path is omitted, the daemon searches the host's `$PATH` for the binary and uses the first result. type: "string" example: "/usr/local/bin/my-oci-runtime" runtimeArgs: description: | List of command-line arguments to pass to the runtime when invoked. type: "array" x-nullable: true items: type: "string" example: ["--debug", "--systemd-cgroup=false"] status: description: | Information specific to the runtime. While this API specification does not define data provided by runtimes, the following well-known properties may be provided by runtimes: `org.opencontainers.runtime-spec.features`: features structure as defined in the [OCI Runtime Specification](https://github.com/opencontainers/runtime-spec/blob/main/features.md), in a JSON string representation. <p><br /></p> > **Note**: The information returned in this field, including the > formatting of values and labels, should not be considered stable, > and may change without notice. type: "object" x-nullable: true additionalProperties: type: "string" example: "org.opencontainers.runtime-spec.features": "{\"ociVersionMin\":\"1.0.0\",\"ociVersionMax\":\"1.1.0\",\"...\":\"...\"}" Commit: description: | Commit holds the Git-commit (SHA1) that a binary was built from, as reported in the version-string of external tools, such as `containerd`, or `runC`. type: "object" properties: ID: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" Expected: description: | Commit ID of external tool expected by dockerd as set at build time. **Deprecated**: This field is deprecated and will be omitted in a API v1.49. type: "string" example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | Represents generic information about swarm. type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" default: "" example: "k67qz4598weg5unwwffg6z1m1" NodeAddr: description: | IP address at which this node can be reached by other nodes in the swarm. type: "string" default: "" example: "10.0.0.46" LocalNodeState: $ref: "#/definitions/LocalNodeState" ControlAvailable: type: "boolean" default: false example: true Error: type: "string" default: "" RemoteManagers: description: | List of ID's and addresses of other managers in the swarm. type: "array" default: null x-nullable: true items: $ref: "#/definitions/PeerNode" example: - NodeID: "71izy0goik036k48jg985xnds" Addr: "10.0.0.158:2377" - NodeID: "79y6h1o4gv8n120drcprv5nmc" Addr: "10.0.0.159:2377" - NodeID: "k67qz4598weg5unwwffg6z1m1" Addr: "10.0.0.46:2377" Nodes: description: "Total number of nodes in the swarm." type: "integer" x-nullable: true example: 4 Managers: description: "Total number of managers in the swarm." type: "integer" x-nullable: true example: 3 Cluster: $ref: "#/definitions/ClusterInfo" LocalNodeState: description: "Current local status of this node." type: "string" default: "" enum: - "" - "inactive" - "pending" - "active" - "error" - "locked" example: "active" PeerNode: description: "Represents a peer-node in the swarm" type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." type: "string" Addr: description: | IP address and ports at which this node can be reached. type: "string" NetworkAttachmentConfig: description: | Specifies how a service should be attached to a particular network. type: "object" properties: Target: description: | The target network for attachment. Must be a network name or ID. type: "string" Aliases: description: | Discoverable alternate names for the service on this network. type: "array" items: type: "string" DriverOpts: description: | Driver attachment options for the network target. type: "object" additionalProperties: type: "string" EventActor: description: | Actor describes something that generates events, like a container, network, or a volume. type: "object" properties: ID: description: "The ID of the object emitting the event" type: "string" example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" Attributes: description: | Various key/value attributes of the object, depending on its type. type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-label-value" image: "alpine:latest" name: "my-container" EventMessage: description: | EventMessage represents the information an event contains. type: "object" title: "SystemEventsResponse" properties: Type: description: "The type of object emitting the event" type: "string" enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] example: "container" Action: description: "The type of event" type: "string" example: "create" Actor: $ref: "#/definitions/EventActor" scope: description: | Scope of the event. Engine events are `local` scope. Cluster (Swarm) events are `swarm` scope. type: "string" enum: ["local", "swarm"] time: description: "Timestamp of event" type: "integer" format: "int64" example: 1629574695 timeNano: description: "Timestamp of event, with nanosecond accuracy" type: "integer" format: "int64" example: 1629574695515050031 OCIDescriptor: type: "object" x-go-name: Descriptor description: | A descriptor struct containing digest, media type, and size, as defined in the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). properties: mediaType: description: | The media type of the object this schema refers to. type: "string" example: "application/vnd.oci.image.manifest.v1+json" digest: description: | The digest of the targeted content. type: "string" example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" size: description: | The size in bytes of the blob. type: "integer" format: "int64" example: 424 urls: description: |- List of URLs from which this object MAY be downloaded. type: "array" items: type: "string" format: "uri" x-nullable: true annotations: description: |- Arbitrary metadata relating to the targeted content. type: "object" x-nullable: true additionalProperties: type: "string" example: "com.docker.official-images.bashbrew.arch": "amd64" "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" "org.opencontainers.image.base.name": "scratch" "org.opencontainers.image.created": "2025-01-27T00:00:00Z" "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" "org.opencontainers.image.version": "24.04" data: type: string x-nullable: true description: |- Data is an embedding of the targeted content. This is encoded as a base64 string when marshalled to JSON (automatically, by encoding/json). If present, Data can be used directly to avoid fetching the targeted content. example: null platform: $ref: "#/definitions/OCIPlatform" artifactType: description: |- ArtifactType is the IANA media type of this artifact. type: "string" x-nullable: true example: null OCIPlatform: type: "object" x-go-name: Platform x-nullable: true description: | Describes the platform which the image in the manifest runs on, as defined in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). properties: architecture: description: | The CPU architecture, for example `amd64` or `ppc64`. type: "string" example: "arm" os: description: | The operating system, for example `linux` or `windows`. type: "string" example: "windows" os.version: description: | Optional field specifying the operating system version, for example on Windows `10.0.19041.1165`. type: "string" example: "10.0.19041.1165" os.features: description: | Optional field specifying an array of strings, each listing a required OS feature (for example on Windows `win32k`). type: "array" items: type: "string" example: - "win32k" variant: description: | Optional field specifying a variant of the CPU, for example `v7` to specify ARMv7 when architecture is `arm`. type: "string" example: "v7" DistributionInspect: type: "object" x-go-name: DistributionInspect title: "DistributionInspectResponse" required: [Descriptor, Platforms] description: | Describes the result obtained from contacting the registry to retrieve image metadata. properties: Descriptor: $ref: "#/definitions/OCIDescriptor" Platforms: type: "array" description: | An array containing all platforms supported by the image. items: $ref: "#/definitions/OCIPlatform" ClusterVolume: type: "object" description: | Options and information specific to, and only present on, Swarm CSI cluster volumes. properties: ID: type: "string" description: | The Swarm ID of this volume. Because cluster volumes are Swarm objects, they have an ID, unlike non-cluster volumes. This ID can be used to refer to the Volume instead of the name. Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" UpdatedAt: type: "string" format: "dateTime" Spec: $ref: "#/definitions/ClusterVolumeSpec" Info: type: "object" description: | Information about the global status of the volume. properties: CapacityBytes: type: "integer" format: "int64" description: | The capacity of the volume in bytes. A value of 0 indicates that the capacity is unknown. VolumeContext: type: "object" description: | A map of strings to strings returned from the storage plugin when the volume is created. additionalProperties: type: "string" VolumeID: type: "string" description: | The ID of the volume as returned by the CSI storage plugin. This is distinct from the volume's ID as provided by Docker. This ID is never used by the user when communicating with Docker to refer to this volume. If the ID is blank, then the Volume has not been successfully created in the plugin yet. AccessibleTopology: type: "array" description: | The topology this volume is actually accessible from. items: $ref: "#/definitions/Topology" PublishStatus: type: "array" description: | The status of the volume as it pertains to its publishing and use on specific nodes items: type: "object" properties: NodeID: type: "string" description: | The ID of the Swarm node the volume is published on. State: type: "string" description: | The published state of the volume. * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. * `published` The volume is published successfully to the node. * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. enum: - "pending-publish" - "published" - "pending-node-unpublish" - "pending-controller-unpublish" PublishContext: type: "object" description: | A map of strings to strings returned by the CSI controller plugin when a volume is published. additionalProperties: type: "string" ClusterVolumeSpec: type: "object" description: | Cluster-specific options used to create the volume. properties: Group: type: "string" description: | Group defines the volume group of this volume. Volumes belonging to the same group can be referred to by group name when creating Services. Referring to a volume by group instructs Swarm to treat volumes in that group interchangeably for the purpose of scheduling. Volumes with an empty string for a group technically all belong to the same, emptystring group. AccessMode: type: "object" description: | Defines how the volume is used by tasks. properties: Scope: type: "string" description: | The set of nodes this volume can be used on at one time. - `single` The volume may only be scheduled to one node at a time. - `multi` the volume may be scheduled to any supported number of nodes at a time. default: "single" enum: ["single", "multi"] x-nullable: false Sharing: type: "string" description: | The number and way that different tasks can use this volume at one time. - `none` The volume may only be used by one task at a time. - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. - `all` The volume may have any number of readers and writers. default: "none" enum: ["none", "readonly", "onewriter", "all"] x-nullable: false MountVolume: type: "object" description: | Options for using this volume as a Mount-type volume. Either MountVolume or BlockVolume, but not both, must be present. properties: FsType: type: "string" description: | Specifies the filesystem type for the mount volume. Optional. MountFlags: type: "array" description: | Flags to pass when mounting the volume. Optional. items: type: "string" BlockVolume: type: "object" description: | Options for using this volume as a Block-type volume. Intentionally empty. Secrets: type: "array" description: | Swarm Secrets that are passed to the CSI storage plugin when operating on this volume. items: type: "object" description: | One cluster volume secret entry. Defines a key-value pair that is passed to the plugin. properties: Key: type: "string" description: | Key is the name of the key of the key-value pair passed to the plugin. Secret: type: "string" description: | Secret is the swarm Secret object from which to read data. This can be a Secret name or ID. The Secret data is retrieved by swarm and used as the value of the key-value pair passed to the plugin. AccessibilityRequirements: type: "object" description: | Requirements for the accessible topology of the volume. These fields are optional. For an in-depth description of what these fields mean, see the CSI specification. properties: Requisite: type: "array" description: | A list of required topologies, at least one of which the volume must be accessible from. items: $ref: "#/definitions/Topology" Preferred: type: "array" description: | A list of topologies that the volume should attempt to be provisioned in. items: $ref: "#/definitions/Topology" CapacityRange: type: "object" description: | The desired capacity that the volume should be created with. If empty, the plugin will decide the capacity. properties: RequiredBytes: type: "integer" format: "int64" description: | The volume must be at least this big. The value of 0 indicates an unspecified minimum LimitBytes: type: "integer" format: "int64" description: | The volume must not be bigger than this. The value of 0 indicates an unspecified maximum. Availability: type: "string" description: | The availability of the volume for use in tasks. - `active` The volume is fully available for scheduling on the cluster - `pause` No new workloads should use the volume, but existing workloads are not stopped. - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. default: "active" x-nullable: false enum: - "active" - "pause" - "drain" Topology: description: | A map of topological domains to topological segments. For in depth details, see documentation for the Topology object in the CSI specification. type: "object" additionalProperties: type: "string" ImageManifestSummary: x-go-name: "ManifestSummary" description: | ImageManifestSummary represents a summary of an image manifest. type: "object" required: ["ID", "Descriptor", "Available", "Size", "Kind"] properties: ID: description: | ID is the content-addressable ID of an image and is the same as the digest of the image manifest. type: "string" example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" Descriptor: $ref: "#/definitions/OCIDescriptor" Available: description: Indicates whether all the child content (image config, layers) is fully available locally. type: "boolean" example: true Size: type: "object" x-nullable: false required: ["Content", "Total"] properties: Total: type: "integer" format: "int64" example: 8213251 description: | Total is the total size (in bytes) of all the locally present data (both distributable and non-distributable) that's related to this manifest and its children. This equal to the sum of [Content] size AND all the sizes in the [Size] struct present in the Kind-specific data struct. For example, for an image kind (Kind == "image") this would include the size of the image content and unpacked image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). Content: description: | Content is the size (in bytes) of all the locally present content in the content store (e.g. image config, layers) referenced by this manifest and its children. This only includes blobs in the content store. type: "integer" format: "int64" example: 3987495 Kind: type: "string" example: "image" enum: - "image" - "attestation" - "unknown" description: | The kind of the manifest. kind | description -------------|----------------------------------------------------------- image | Image manifest that can be used to start a container. attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. ImageData: description: | The image data for the image manifest. This field is only populated when Kind is "image". type: "object" x-nullable: true x-omitempty: true required: ["Platform", "Containers", "Size", "UnpackedSize"] properties: Platform: $ref: "#/definitions/OCIPlatform" description: | OCI platform of the image. This will be the platform specified in the manifest descriptor from the index/manifest list. If it's not available, it will be obtained from the image config. Containers: description: | The IDs of the containers that are using this image. type: "array" items: type: "string" example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] Size: type: "object" x-nullable: false required: ["Unpacked"] properties: Unpacked: type: "integer" format: "int64" example: 3987495 description: | Unpacked is the size (in bytes) of the locally unpacked (uncompressed) image content that's directly usable by the containers running this image. It's independent of the distributable content - e.g. the image might still have an unpacked data that's still used by some container even when the distributable/compressed content is already gone. AttestationData: description: | The image data for the attestation manifest. This field is only populated when Kind is "attestation". type: "object" x-nullable: true x-omitempty: true required: ["For"] properties: For: description: | The digest of the image manifest that this attestation is for. type: "string" example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" paths: /containers/json: get: summary: "List containers" description: | Returns a list of containers. For details on the format, see the [inspect endpoint](#operation/ContainerInspect). Note that it uses a different, smaller representation of a container than inspecting a single container. For example, the list of linked containers is not propagated . operationId: "ContainerList" produces: - "application/json" parameters: - name: "all" in: "query" description: | Return all containers. By default, only running containers are shown. type: "boolean" default: false - name: "limit" in: "query" description: | Return this number of most recently created containers, including non-running ones. type: "integer" - name: "size" in: "query" description: | Return the size of container as fields `SizeRw` and `SizeRootFs`. type: "boolean" default: false - name: "filters" in: "query" description: | Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`) - `before`=(`<container id>` or `<container name>`) - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `exited=<int>` containers with exit code of `<int>` - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - `id=<ID>` a container's ID - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - `is-task=`(`true`|`false`) - `label=key` or `label="key=value"` of a container label - `name=<name>` a container's name - `network`=(`<network id>` or `<network name>`) - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`) - `since`=(`<container id>` or `<container name>`) - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - `volume`=(`<volume name>` or `<mount point destination>`) type: "string" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/ContainerSummary" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/create: post: summary: "Create a container" operationId: "ContainerCreate" consumes: - "application/json" - "application/octet-stream" produces: - "application/json" parameters: - name: "name" in: "query" description: | Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. type: "string" pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "platform" in: "query" description: | Platform in the format `os[/arch[/variant]]` used for image lookup. When specified, the daemon checks if the requested image is present in the local image cache with the given OS and Architecture, and otherwise returns a `404` status. If the option is not set, the host's native OS and Architecture are used to look up the image in the image cache. However, if no platform is passed and the given image does exist in the local image cache, but its OS or architecture does not match, the container is created with the available image, and a warning is added to the `Warnings` field in the response, for example; WARNING: The requested image's platform (linux/arm64/v8) does not match the detected host platform (linux/amd64) and no specific platform was requested type: "string" default: "" - name: "body" in: "body" description: "Container to create" schema: allOf: - $ref: "#/definitions/ContainerConfig" - type: "object" properties: HostConfig: $ref: "#/definitions/HostConfig" NetworkingConfig: $ref: "#/definitions/NetworkingConfig" example: Hostname: "" Domainname: "" User: "" AttachStdin: false AttachStdout: true AttachStderr: true Tty: false OpenStdin: false StdinOnce: false Env: - "FOO=bar" - "BAZ=quux" Cmd: - "date" Entrypoint: "" Image: "ubuntu" Labels: com.example.vendor: "Acme" com.example.license: "GPL" com.example.version: "1.0" Volumes: /volumes/data: {} WorkingDir: "" NetworkDisabled: false MacAddress: "12:34:56:78:9a:bc" ExposedPorts: 22/tcp: {} StopSignal: "SIGTERM" StopTimeout: 10 HostConfig: Binds: - "/tmp:/tmp" Links: - "redis3:redis" Memory: 0 MemorySwap: 0 MemoryReservation: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 CpuPeriod: 100000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpuQuota: 50000 CpusetCpus: "0,1" CpusetMems: "0,1" MaximumIOps: 0 MaximumIOBps: 0 BlkioWeight: 300 BlkioWeightDevice: - {} BlkioDeviceReadBps: - {} BlkioDeviceReadIOps: - {} BlkioDeviceWriteBps: - {} BlkioDeviceWriteIOps: - {} DeviceRequests: - Driver: "nvidia" Count: -1 DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] Capabilities: [["gpu", "nvidia", "compute"]] Options: property1: "string" property2: "string" MemorySwappiness: 60 OomKillDisable: false OomScoreAdj: 500 PidMode: "" PidsLimit: 0 PortBindings: 22/tcp: - HostPort: "11022" PublishAllPorts: false Privileged: false ReadonlyRootfs: false Dns: - "8.8.8.8" DnsOptions: - "" DnsSearch: - "" VolumesFrom: - "parent" - "other:ro" CapAdd: - "NET_ADMIN" CapDrop: - "MKNOD" GroupAdd: - "newgroup" RestartPolicy: Name: "" MaximumRetryCount: 0 AutoRemove: true NetworkMode: "bridge" Devices: [] Ulimits: - {} LogConfig: Type: "json-file" Config: {} SecurityOpt: [] StorageOpt: {} CgroupParent: "" VolumeDriver: "" ShmSize: 67108864 NetworkingConfig: EndpointsConfig: isolated_nw: IPAMConfig: IPv4Address: "172.20.30.33" IPv6Address: "2001:db8:abcd::3033" LinkLocalIPs: - "169.254.34.68" - "fe80::3468" Links: - "container_1" - "container_2" Aliases: - "server_x" - "server_y" database_nw: {} required: true responses: 201: description: "Container created successfully" schema: $ref: "#/definitions/ContainerCreateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /containers/{id}/json: get: summary: "Inspect a container" description: "Return low-level information about a container." operationId: "ContainerInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/ContainerInspectResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "size" in: "query" type: "boolean" default: false description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" tags: ["Container"] /containers/{id}/top: get: summary: "List processes running inside a container" description: | On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows. operationId: "ContainerTop" responses: 200: description: "no error" schema: $ref: "#/definitions/ContainerTopResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "ps_args" in: "query" description: "The arguments to pass to `ps`. For example, `aux`" type: "string" default: "-ef" tags: ["Container"] /containers/{id}/logs: get: summary: "Get container logs" description: | Get `stdout` and `stderr` logs from a container. Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. produces: - "application/vnd.docker.raw-stream" - "application/vnd.docker.multiplexed-stream" operationId: "ContainerLogs" responses: 200: description: | logs returned as a stream in response body. For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not set Content-Type. schema: type: "string" format: "binary" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "until" in: "query" description: "Only return logs before this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Container"] /containers/{id}/changes: get: summary: "Get changes on a container’s filesystem" description: | Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: - `0`: Modified ("C") - `1`: Added ("A") - `2`: Deleted ("D") operationId: "ContainerChanges" produces: ["application/json"] responses: 200: description: "The list of changes" schema: type: "array" items: $ref: "#/definitions/FilesystemChange" examples: application/json: - Path: "/dev" Kind: 0 - Path: "/dev/kmsg" Kind: 1 - Path: "/test" Kind: 1 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/export: get: summary: "Export a container" description: "Export the contents of a container as a tarball." operationId: "ContainerExport" produces: - "application/octet-stream" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/stats: get: summary: "Get container stats based on resource usage" description: | This endpoint returns a live stream of a container’s resource usage statistics. The `precpu_stats` is the CPU statistic of the *previous* read, and is used to calculate the CPU usage percentage. It is not an exact copy of the `cpu_stats` field. If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. On a cgroup v2 host, the following fields are not set * `blkio_stats`: all fields other than `io_service_bytes_recursive` * `cpu_stats`: `cpu_usage.percpu_usage` * `memory_stats`: `max_usage` and `failcnt` Also, `memory_stats.stats` fields are incompatible with cgroup v1. To calculate the values shown by the `stats` command of the docker cli tool the following formulas can be used: * used_memory = `memory_stats.usage - memory_stats.stats.cache` (cgroups v1) * used_memory = `memory_stats.usage - memory_stats.stats.inactive_file` (cgroups v2) * available_memory = `memory_stats.limit` * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/ContainerStatsResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "stream" in: "query" description: | Stream the output. If false, the stats will be output once and then it will disconnect. type: "boolean" default: true - name: "one-shot" in: "query" description: | Only get a single stat instead of waiting for 2 cycles. Must be used with `stream=false`. type: "boolean" default: false tags: ["Container"] /containers/{id}/resize: post: summary: "Resize a container TTY" description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" produces: - "text/plain" responses: 200: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "cannot resize container" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "h" in: "query" required: true description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" required: true description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] /containers/{id}/start: post: summary: "Start a container" operationId: "ContainerStart" responses: 204: description: "no error" 304: description: "container already started" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" tags: ["Container"] /containers/{id}/stop: post: summary: "Stop a container" operationId: "ContainerStop" responses: 204: description: "no error" 304: description: "container already stopped" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "signal" in: "query" description: | Signal to send to the container as an integer or string (e.g. `SIGINT`). type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/restart: post: summary: "Restart a container" operationId: "ContainerRestart" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "signal" in: "query" description: | Signal to send to the container as an integer or string (e.g. `SIGINT`). type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" type: "integer" tags: ["Container"] /containers/{id}/kill: post: summary: "Kill a container" description: | Send a POSIX signal to a container, defaulting to killing to the container. operationId: "ContainerKill" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is not running" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "signal" in: "query" description: | Signal to send to the container as an integer or string (e.g. `SIGINT`). type: "string" default: "SIGKILL" tags: ["Container"] /containers/{id}/update: post: summary: "Update a container" description: | Change various configuration options of a container without having to recreate it. operationId: "ContainerUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "The container has been updated." schema: $ref: "#/definitions/ContainerUpdateResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "update" in: "body" required: true schema: allOf: - $ref: "#/definitions/Resources" - type: "object" properties: RestartPolicy: $ref: "#/definitions/RestartPolicy" example: BlkioWeight: 300 CpuShares: 512 CpuPeriod: 100000 CpuQuota: 50000 CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 CpusetCpus: "0,1" CpusetMems: "0" Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" tags: ["Container"] /containers/{id}/rename: post: summary: "Rename a container" operationId: "ContainerRename" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "name already in use" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "name" in: "query" required: true description: "New name for the container" type: "string" tags: ["Container"] /containers/{id}/pause: post: summary: "Pause a container" description: | Use the freezer cgroup to suspend all processes in a container. Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the freezer cgroup the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/unpause: post: summary: "Unpause a container" description: "Resume a container which has been paused." operationId: "ContainerUnpause" responses: 204: description: "no error" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" tags: ["Container"] /containers/{id}/attach: post: summary: "Attach to a container" description: | Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. ### Hijacking This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. This is the response from the daemon for an attach request: ``` HTTP/1.1 200 OK Content-Type: application/vnd.docker.raw-stream [STREAM] ``` After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. For example, the client sends this request to upgrade the connection: ``` POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 Upgrade: tcp Connection: Upgrade ``` The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Content-Type: application/vnd.docker.raw-stream Connection: Upgrade Upgrade: tcp [STREAM] ``` ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream and the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). It is encoded on the first eight bytes like this: ```go header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} ``` `STREAM_TYPE` can be: - 0: `stdin` (is written on `stdout`) - 1: `stdout` - 2: `stderr` `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. The simplest way to implement this protocol is the following: 1. Read 8 bytes. 2. Choose `stdout` or `stderr` depending on the first byte. 3. Extract the frame size from the last four bytes. 4. Read the extracted size and output it on the correct output. 5. Goto 1. ### Stream format when using a TTY When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" - "application/vnd.docker.multiplexed-stream" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" - name: "logs" in: "query" description: | Replay previous logs from the container. This is useful for attaching to a container that has started and you want to output everything since the container started. If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. type: "boolean" default: false - name: "stream" in: "query" description: | Stream attached streams from the time the request was made onwards. type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/attach/ws: get: summary: "Attach to a container via a websocket" operationId: "ContainerAttachWebsocket" responses: 101: description: "no error, hints proxy about hijacking" 200: description: "no error, no upgrade header found" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "detachKeys" in: "query" description: | Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`. type: "string" - name: "logs" in: "query" description: "Return logs" type: "boolean" default: false - name: "stream" in: "query" description: "Return stream" type: "boolean" default: false - name: "stdin" in: "query" description: "Attach to `stdin`" type: "boolean" default: false - name: "stdout" in: "query" description: "Attach to `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Attach to `stderr`" type: "boolean" default: false tags: ["Container"] /containers/{id}/wait: post: summary: "Wait for a container" description: "Block until a container stops, then returns the exit code." operationId: "ContainerWait" produces: ["application/json"] responses: 200: description: "The container has exit." schema: $ref: "#/definitions/ContainerWaitResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "condition" in: "query" description: | Wait until a container state reaches the given condition. Defaults to `not-running` if omitted or empty. type: "string" enum: - "not-running" - "next-exit" - "removed" default: "not-running" tags: ["Container"] /containers/{id}: delete: summary: "Remove a container" operationId: "ContainerDelete" responses: 204: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "conflict" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: | You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "v" in: "query" description: "Remove anonymous volumes associated with the container." type: "boolean" default: false - name: "force" in: "query" description: "If the container is running, kill it before removing it." type: "boolean" default: false - name: "link" in: "query" description: "Remove the specified link associated with the container." type: "boolean" default: false tags: ["Container"] /containers/{id}/archive: head: summary: "Get information about files in a container" description: | A response header `X-Docker-Container-Path-Stat` is returned, containing a base64 - encoded JSON object with some filesystem header information about the path. operationId: "ContainerArchiveInfo" responses: 200: description: "no error" headers: X-Docker-Container-Path-Stat: type: "string" description: | A base64 - encoded JSON object with some filesystem header information about the path 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] get: summary: "Get an archive of a filesystem resource in a container" description: "Get a tar archive of a resource in the filesystem of container id." operationId: "ContainerArchive" produces: ["application/x-tar"] responses: 200: description: "no error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Container or path does not exist" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Resource in the container’s filesystem to archive." type: "string" tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" description: | Upload a tar archive to be extracted to a path in the filesystem of container id. `path` parameter is asserted to be a directory. If it exists as a file, 400 error will be returned with message "not a directory". operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: 200: description: "The content was extracted successfully" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "not a directory" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such container or path does not exist inside the container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the container" type: "string" - name: "path" in: "query" required: true description: "Path to a directory in the container to extract the archive’s contents into. " type: "string" - name: "noOverwriteDirNonDir" in: "query" description: | If `1`, `true`, or `True` then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa. type: "string" - name: "copyUIDGID" in: "query" description: | If `1`, `true`, then it will copy UID/GID maps to the dest file or dir type: "string" - name: "inputStream" in: "body" required: true description: | The input stream must be a tar archive compressed with one of the following algorithms: `identity` (no compression), `gzip`, `bzip2`, or `xz`. schema: type: "string" format: "binary" tags: ["Container"] /containers/prune: post: summary: "Delete stopped containers" produces: - "application/json" operationId: "ContainerPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ContainerPruneResponse" properties: ContainersDeleted: description: "Container IDs that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Container"] /images/json: get: summary: "List Images" description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." operationId: "ImageList" produces: - "application/json" responses: 200: description: "Summary image data for the images matching the query" schema: type: "array" items: $ref: "#/definitions/ImageSummary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "all" in: "query" description: "Show all images. Only images from a final layer (no children) are shown by default." type: "boolean" default: false - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - `dangling=true` - `label=key` or `label="key=value"` of an image label - `reference`=(`<image-name>[:<tag>]`) - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`) - `until=<timestamp>` type: "string" - name: "shared-size" in: "query" description: "Compute and show shared size as a `SharedSize` field on each image." type: "boolean" default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false - name: "manifests" in: "query" description: "Include `Manifests` in the image summary." type: "boolean" default: false tags: ["Image"] /build: post: summary: "Build an image" description: | Build an image from a tar archive with a `Dockerfile` in it. The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. The build is canceled if the client drops the connection by quitting or being killed. operationId: "ImageBuild" consumes: - "application/octet-stream" produces: - "application/json" parameters: - name: "inputStream" in: "body" description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" format: "binary" - name: "dockerfile" in: "query" description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." type: "string" default: "Dockerfile" - name: "t" in: "query" description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." type: "string" - name: "extrahosts" in: "query" description: "Extra hosts to add to /etc/hosts" type: "string" - name: "remote" in: "query" description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." type: "string" - name: "q" in: "query" description: "Suppress verbose build output." type: "boolean" default: false - name: "nocache" in: "query" description: "Do not use the cache when building the image." type: "boolean" default: false - name: "cachefrom" in: "query" description: "JSON array of images used for build cache resolution." type: "string" - name: "pull" in: "query" description: "Attempt to pull the image even if an older image exists locally." type: "string" - name: "rm" in: "query" description: "Remove intermediate containers after a successful build." type: "boolean" default: true - name: "forcerm" in: "query" description: "Always remove intermediate containers, even upon failure." type: "boolean" default: false - name: "memory" in: "query" description: "Set memory limit for build." type: "integer" - name: "memswap" in: "query" description: "Total memory (memory + swap). Set as `-1` to disable swap." type: "integer" - name: "cpushares" in: "query" description: "CPU shares (relative weight)." type: "integer" - name: "cpusetcpus" in: "query" description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." type: "string" - name: "cpuperiod" in: "query" description: "The length of a CPU period in microseconds." type: "integer" - name: "cpuquota" in: "query" description: "Microseconds of CPU time that the container can get in a CPU period." type: "integer" - name: "buildargs" in: "query" description: > JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) type: "string" - name: "shmsize" in: "query" description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." type: "integer" - name: "squash" in: "query" description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" type: "boolean" - name: "labels" in: "query" description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." type: "string" - name: "networkmode" in: "query" description: | Sets the networking mode for the run commands during build. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken as a custom network's name or ID to which this container should connect to. type: "string" - name: "Content-type" in: "header" type: "string" enum: - "application/x-tar" default: "application/x-tar" - name: "X-Registry-Config" in: "header" description: | This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: ``` { "docker.example.com": { "username": "janedoe", "password": "hunter2" }, "https://index.docker.io/v1/": { "username": "mobydock", "password": "conta1n3rize14" } } ``` Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. type: "string" - name: "platform" in: "query" description: "Platform in the format os[/arch[/variant]]" type: "string" default: "" - name: "target" in: "query" description: "Target build stage" type: "string" default: "" - name: "outputs" in: "query" description: | BuildKit output configuration in the format of a stringified JSON array of objects. Each object must have two top-level properties: `Type` and `Attrs`. The `Type` property must be set to 'moby'. The `Attrs` property is a map of attributes for the BuildKit output configuration. See https://docs.docker.com/build/exporters/oci-docker/ for more information. Example: ``` [{"Type":"moby","Attrs":{"type":"image","force-compression":"true","compression":"zstd"}}] ``` type: "string" default: "" - name: "version" in: "query" type: "string" default: "1" enum: ["1", "2"] description: | Version of the builder backend to use. - `1` is the first generation classic (deprecated) builder in the Docker daemon (default) - `2` is [BuildKit](https://github.com/moby/buildkit) responses: 200: description: "no error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /build/prune: post: summary: "Delete builder cache" produces: - "application/json" operationId: "BuildPrune" parameters: - name: "keep-storage" in: "query" description: | Amount of disk space in bytes to keep for cache > **Deprecated**: This parameter is deprecated and has been renamed to "reserved-space". > It is kept for backward compatibility and will be removed in API v1.52. type: "integer" format: "int64" - name: "reserved-space" in: "query" description: "Amount of disk space in bytes to keep for cache" type: "integer" format: "int64" - name: "max-used-space" in: "query" description: "Maximum amount of disk space allowed to keep for cache" type: "integer" format: "int64" - name: "min-free-space" in: "query" description: "Target amount of free disk space after pruning" type: "integer" format: "int64" - name: "all" in: "query" type: "boolean" description: "Remove all types of build cache" - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: - `until=<timestamp>` remove cache older than `<timestamp>`. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon's local time. - `id=<id>` - `parent=<id>` - `type=<string>` - `description=<string>` - `inuse` - `shared` - `private` responses: 200: description: "No error" schema: type: "object" title: "BuildPruneResponse" properties: CachesDeleted: type: "array" items: description: "ID of build cache object" type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /images/create: post: summary: "Create an image" description: "Pull or import an image." operationId: "ImageCreate" consumes: - "text/plain" - "application/octet-stream" produces: - "application/json" responses: 200: description: "no error" 404: description: "repository does not exist or no read access" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "fromImage" in: "query" description: | Name of the image to pull. If the name includes a tag or digest, specific behavior applies: - If only `fromImage` includes a tag, that tag is used. - If both `fromImage` and `tag` are provided, `tag` takes precedence. - If `fromImage` includes a digest, the image is pulled by digest, and `tag` is ignored. - If neither a tag nor digest is specified, all tags are pulled. type: "string" - name: "fromSrc" in: "query" description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." type: "string" - name: "repo" in: "query" description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." type: "string" - name: "tag" in: "query" description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." type: "string" - name: "message" in: "query" description: "Set commit message for imported image." type: "string" - name: "inputImage" in: "body" description: "Image content if the value `-` has been specified in fromSrc query parameter" schema: type: "string" required: false - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "changes" in: "query" description: | Apply `Dockerfile` instructions to the image that is created, for example: `changes=ENV DEBUG=true`. Note that `ENV DEBUG=true` should be URI component encoded. Supported `Dockerfile` instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` type: "array" items: type: "string" - name: "platform" in: "query" description: | Platform in the format os[/arch[/variant]]. When used in combination with the `fromImage` option, the daemon checks if the given image is present in the local image cache with the given OS and Architecture, and otherwise attempts to pull the image. If the option is not set, the host's native OS and Architecture are used. If the given image does not exist in the local image cache, the daemon attempts to pull the image with the host's native OS and Architecture. If the given image does exists in the local image cache, but its OS or architecture does not match, a warning is produced. When used with the `fromSrc` option to import an image from an archive, this option sets the platform information for the imported image. If the option is not set, the host's native OS and Architecture are used for the imported image. type: "string" default: "" tags: ["Image"] /images/{name}/json: get: summary: "Inspect an image" description: "Return low-level information about an image." operationId: "ImageInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/ImageInspect" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true - name: "manifests" in: "query" description: "Include Manifests in the image summary." type: "boolean" default: false required: false tags: ["Image"] /images/{name}/history: get: summary: "Get the history of an image" description: "Return parent layers of an image." operationId: "ImageHistory" produces: ["application/json"] responses: 200: description: "List of image layers" schema: type: "array" items: $ref: "#/definitions/ImageHistoryResponseItem" examples: application/json: - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" Created: 1398108230 CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" Tags: - "ubuntu:lucid" - "ubuntu:10.04" Size: 182964289 Comment: "" - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" Created: 1398108222 CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" Tags: [] Size: 0 Comment: "" - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" Created: 1371157430 CreatedBy: "" Tags: - "scratch12:latest" - "scratch:latest" Size: 0 Comment: "Imported from -" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true - name: "platform" type: "string" in: "query" description: | JSON-encoded OCI platform to select the platform-variant. If omitted, it defaults to any locally available platform, prioritizing the daemon's host platform. If the daemon provides a multi-platform image store, this selects the platform-variant to show the history for. If the image is a single-platform image, or if the multi-platform image does not provide a variant matching the given platform, an error is returned. Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/{name}/push: post: summary: "Push an image" description: | Push an image to a registry. If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. The push is cancelled if the HTTP connection is closed. operationId: "ImagePush" consumes: - "application/octet-stream" responses: 200: description: "No error" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | Name of the image to push. For example, `registry.example.com/myimage`. The image must be present in the local image store with the same name. The name should be provided without tag; if a tag is provided, it is ignored. For example, `registry.example.com/myimage:latest` is considered equivalent to `registry.example.com/myimage`. Use the `tag` parameter to specify the tag to push. type: "string" required: true - name: "tag" in: "query" description: | Tag of the image to push. For example, `latest`. If no tag is provided, all tags of the given image that are present in the local image store are pushed. type: "string" - name: "platform" type: "string" in: "query" description: | JSON-encoded OCI platform to select the platform-variant to push. If not provided, all available variants will attempt to be pushed. If the daemon provides a multi-platform image store, this selects the platform-variant to push to the registry. If the image is a single-platform image, or if the multi-platform image does not provide a variant matching the given platform, an error is returned. Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration. Refer to the [authentication section](#section/Authentication) for details. type: "string" required: true tags: ["Image"] /images/{name}/tag: post: summary: "Tag an image" description: | Create a tag that refers to a source image. This creates an additional reference (tag) to the source image. The tag can include a different repository name and/or tag. If the repository or tag already exists, it will be overwritten. operationId: "ImageTag" responses: 201: description: "No error" 400: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID to tag." type: "string" required: true - name: "repo" in: "query" description: "The repository to tag in. For example, `someuser/someimage`." type: "string" - name: "tag" in: "query" description: "The name of the new tag." type: "string" tags: ["Image"] /images/{name}: delete: summary: "Remove an image" description: | Remove an image, along with any untagged parent images that were referenced by that image. Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. operationId: "ImageDelete" produces: ["application/json"] responses: 200: description: "The image was deleted successfully" schema: type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" examples: application/json: - Untagged: "3e2f21a89f" - Deleted: "3e2f21a89f" - Deleted: "53b4f83ac9" 404: description: "No such image" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Conflict" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true - name: "force" in: "query" description: "Remove the image even if it is being used by stopped containers or has other tags" type: "boolean" default: false - name: "noprune" in: "query" description: "Do not delete untagged parent images" type: "boolean" default: false tags: ["Image"] /images/search: get: summary: "Search images" description: "Search for an image on Docker Hub." operationId: "ImageSearch" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: type: "object" title: "ImageSearchResponseItem" properties: description: type: "string" is_official: type: "boolean" is_automated: description: | Whether this repository has automated builds enabled. <p><br /></p> > **Deprecated**: This field is deprecated and will always be "false". type: "boolean" example: false name: type: "string" star_count: type: "integer" examples: application/json: - description: "A minimal Docker image based on Alpine Linux with a complete package index and only 5 MB in size!" is_official: true is_automated: false name: "alpine" star_count: 10093 - description: "Busybox base image." is_official: true is_automated: false name: "Busybox base image." star_count: 3037 - description: "The PostgreSQL object-relational database system provides reliability and data integrity." is_official: true is_automated: false name: "postgres" star_count: 12408 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "term" in: "query" description: "Term to search" type: "string" required: true - name: "limit" in: "query" description: "Maximum number of results to return" type: "integer" - name: "filters" in: "query" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - `is-official=(true|false)` - `stars=<number>` Matches images that has at least 'number' stars. type: "string" tags: ["Image"] /images/prune: post: summary: "Delete unused images" produces: - "application/json" operationId: "ImagePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `dangling=<boolean>` When set to `true` (or `1`), prune only unused *and* untagged images. When set to `false` (or `0`), all unused images are pruned. - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "ImagePruneResponse" properties: ImagesDeleted: description: "Images that were deleted" type: "array" items: $ref: "#/definitions/ImageDeleteResponseItem" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Image"] /auth: post: summary: "Check auth configuration" description: | Validate credentials for a registry and, if available, get an identity token for accessing the registry without password. operationId: "SystemAuth" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "An identity token was generated successfully." schema: type: "object" title: "SystemAuthResponse" required: [Status] properties: Status: description: "The status of the authentication" type: "string" x-nullable: false IdentityToken: description: "An opaque token used to authenticate a user after a successful login" type: "string" x-nullable: false examples: application/json: Status: "Login Succeeded" IdentityToken: "9cbaf023786cd7..." 204: description: "No error" 401: description: "Auth error" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "authConfig" in: "body" description: "Authentication to check" schema: $ref: "#/definitions/AuthConfig" tags: ["System"] /info: get: summary: "Get system information" operationId: "SystemInfo" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/SystemInfo" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /version: get: summary: "Get version" description: "Returns the version of Docker that is running and various information about the system that Docker is running on." operationId: "SystemVersion" produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/SystemVersion" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /_ping: get: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPing" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "OK" headers: Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: | Default version of docker image builder The default on Linux is version "2" (BuildKit), but the daemon can be configured to recommend version "1" (classic Builder). Windows does not yet support BuildKit for native Windows images, and uses "1" (classic builder) as a default. This value is a recommendation as advertised by the daemon, and it is up to the client to choose which builder to use. default: "2" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Swarm: type: "string" enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] description: | Contains information about Swarm status of the daemon, and if the daemon is acting as a manager or worker node. default: "inactive" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" headers: Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" tags: ["System"] head: summary: "Ping" description: "This is a dummy endpoint you can use to test if the server is accessible." operationId: "SystemPingHead" produces: ["text/plain"] responses: 200: description: "no error" schema: type: "string" example: "(empty)" headers: Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: type: "string" description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" Swarm: type: "string" enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] description: | Contains information about Swarm status of the daemon, and if the daemon is acting as a manager or worker node. default: "inactive" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" Pragma: type: "string" default: "no-cache" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["System"] /commit: post: summary: "Create a new image from a container" operationId: "ImageCommit" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IDResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "containerConfig" in: "body" description: "The container configuration" schema: $ref: "#/definitions/ContainerConfig" - name: "container" in: "query" description: "The ID or name of the container to commit" type: "string" - name: "repo" in: "query" description: "Repository name for the created image" type: "string" - name: "tag" in: "query" description: "Tag name for the create image" type: "string" - name: "comment" in: "query" description: "Commit message" type: "string" - name: "author" in: "query" description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)" type: "string" - name: "pause" in: "query" description: "Whether to pause the container before committing" type: "boolean" default: true - name: "changes" in: "query" description: "`Dockerfile` instructions to apply while committing" type: "string" tags: ["Image"] /events: get: summary: "Monitor events" description: | Stream real-time events from the server. Various objects within Docker report events when something happens to them. Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` The Docker daemon reports these events: `reload` Services report these events: `create`, `update`, and `remove` Nodes report these events: `create`, `update`, and `remove` Secrets report these events: `create`, `update`, and `remove` Configs report these events: `create`, `update`, and `remove` The Builder reports `prune` events operationId: "SystemEvents" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/EventMessage" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "since" in: "query" description: "Show events created since this timestamp then stream new events." type: "string" - name: "until" in: "query" description: "Show events created until this timestamp then stop streaming." type: "string" - name: "filters" in: "query" description: | A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - `config=<string>` config name or ID - `container=<string>` container name or ID - `daemon=<string>` daemon name or ID - `event=<string>` event type - `image=<string>` image name or ID - `label=<string>` image or container label - `network=<string>` network name or ID - `node=<string>` node ID - `plugin`=<string> plugin name or ID - `scope`=<string> local or swarm - `secret=<string>` secret name or ID - `service=<string>` service name or ID - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - `volume=<string>` volume name type: "string" tags: ["System"] /system/df: get: summary: "Get data usage information" operationId: "SystemDataUsage" responses: 200: description: "no error" schema: type: "object" title: "SystemDataUsageResponse" properties: LayersSize: type: "integer" format: "int64" Images: type: "array" items: $ref: "#/definitions/ImageSummary" Containers: type: "array" items: $ref: "#/definitions/ContainerSummary" Volumes: type: "array" items: $ref: "#/definitions/Volume" BuildCache: type: "array" items: $ref: "#/definitions/BuildCache" example: LayersSize: 1092588 Images: - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" ParentId: "" RepoTags: - "busybox:latest" RepoDigests: - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" Created: 1466724217 Size: 1092588 SharedSize: 0 Labels: {} Containers: 1 Containers: - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" Names: - "/top" Image: "busybox" ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" Command: "top" Created: 1472592424 Ports: [] SizeRootFs: 1092588 Labels: {} State: "exited" Status: "Exited (0) 56 minutes ago" HostConfig: NetworkMode: "default" NetworkSettings: Networks: bridge: IPAMConfig: null Links: null Aliases: null NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" Gateway: "172.18.0.1" IPAddress: "172.18.0.2" IPPrefixLen: 16 IPv6Gateway: "" GlobalIPv6Address: "" GlobalIPv6PrefixLen: 0 MacAddress: "02:42:ac:12:00:02" Mounts: [] Volumes: - Name: "my-volume" Driver: "local" Mountpoint: "/var/lib/docker/volumes/my-volume/_data" Labels: null Scope: "local" Options: null UsageData: Size: 10920104 RefCount: 2 BuildCache: - ID: "hw53o5aio51xtltp5xjp8v7fx" Parents: [] Type: "regular" Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" InUse: false Shared: true Size: 0 CreatedAt: "2021-06-28T13:31:01.474619385Z" LastUsedAt: "2021-07-07T22:02:32.738075951Z" UsageCount: 26 - ID: "ndlpt0hhvkqcdfkputsk4cq9c" Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] Type: "regular" Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" InUse: false Shared: true Size: 51 CreatedAt: "2021-06-28T13:31:03.002625487Z" LastUsedAt: "2021-07-07T22:02:32.773909517Z" UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "type" in: "query" description: | Object types, for which to compute and return data. type: "array" collectionFormat: multi items: type: "string" enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: summary: "Export an image" description: | Get a tarball containing all images and metadata for a repository. If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. ### Image tarball format An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). Additionally, includes the manifest.json file associated with a backwards compatible docker save format. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. ```json { "hello-world": { "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" } } ``` operationId: "ImageGet" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or ID" type: "string" required: true - name: "platform" type: "string" in: "query" description: | JSON encoded OCI platform describing a platform which will be used to select a platform-specific image to be saved if the image is multi-platform. If not provided, the full multi-platform image will be saved. Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/get: get: summary: "Export several images" description: | Get a tarball containing all images and metadata for several image repositories. For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageGetAll" produces: - "application/x-tar" responses: 200: description: "no error" schema: type: "string" format: "binary" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "names" in: "query" description: "Image names to filter by" type: "array" items: type: "string" - name: "platform" type: "string" in: "query" description: | JSON encoded OCI platform describing a platform which will be used to select a platform-specific image to be saved if the image is multi-platform. If not provided, the full multi-platform image will be saved. Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/load: post: summary: "Import images" description: | Load a set of images and tags into a repository. For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageLoad" consumes: - "application/x-tar" produces: - "application/json" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "imagesTarball" in: "body" description: "Tar archive containing images" schema: type: "string" format: "binary" - name: "quiet" in: "query" description: "Suppress progress details during load." type: "boolean" default: false - name: "platform" type: "string" in: "query" description: | JSON encoded OCI platform describing a platform which will be used to select a platform-specific image to be load if the image is multi-platform. If not provided, the full multi-platform image will be loaded. Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /containers/{id}/exec: post: summary: "Create an exec instance" description: "Run a command inside a running container." operationId: "ContainerExec" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IDResponse" 404: description: "no such container" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such container: c2ada9df5af8" 409: description: "container is paused" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execConfig" in: "body" description: "Exec configuration" schema: type: "object" title: "ExecConfig" properties: AttachStdin: type: "boolean" description: "Attach to `stdin` of the exec command." AttachStdout: type: "boolean" description: "Attach to `stdout` of the exec command." AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." ConsoleSize: type: "array" description: "Initial console size, as an `[height, width]` array." x-nullable: true minItems: 2 maxItems: 2 items: type: "integer" minimum: 0 example: [80, 64] DetachKeys: type: "string" description: | Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Tty: type: "boolean" description: "Allocate a pseudo-TTY." Env: description: | A list of environment variables in the form `["VAR=value", ...]`. type: "array" items: type: "string" Cmd: type: "array" description: "Command to run, as a string or array of strings." items: type: "string" Privileged: type: "boolean" description: "Runs the exec process with extended privileges." default: false User: type: "string" description: | The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`. WorkingDir: type: "string" description: | The working directory for the exec process inside the container. example: AttachStdin: false AttachStdout: true AttachStderr: true DetachKeys: "ctrl-p,ctrl-q" Tty: false Cmd: - "date" Env: - "FOO=bar" - "BAZ=quux" required: true - name: "id" in: "path" description: "ID or name of container" type: "string" required: true tags: ["Exec"] /exec/{id}/start: post: summary: "Start an exec instance" description: | Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command. operationId: "ExecStart" consumes: - "application/json" produces: - "application/vnd.docker.raw-stream" - "application/vnd.docker.multiplexed-stream" responses: 200: description: "No error" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Container is stopped or paused" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "execStartConfig" in: "body" schema: type: "object" title: "ExecStartConfig" properties: Detach: type: "boolean" description: "Detach from the command." example: false Tty: type: "boolean" description: "Allocate a pseudo-TTY." example: true ConsoleSize: type: "array" description: "Initial console size, as an `[height, width]` array." x-nullable: true minItems: 2 maxItems: 2 items: type: "integer" minimum: 0 example: [80, 64] - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /exec/{id}/resize: post: summary: "Resize an exec instance" description: | Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: 200: description: "No error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" - name: "h" in: "query" required: true description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" required: true description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] /exec/{id}/json: get: summary: "Inspect an exec instance" description: "Return low-level information about an exec instance." operationId: "ExecInspect" produces: - "application/json" responses: 200: description: "No error" schema: type: "object" title: "ExecInspectResponse" properties: CanRemove: type: "boolean" DetachKeys: type: "string" ID: type: "string" Running: type: "boolean" ExitCode: type: "integer" ProcessConfig: $ref: "#/definitions/ProcessConfig" OpenStdin: type: "boolean" OpenStderr: type: "boolean" OpenStdout: type: "boolean" ContainerID: type: "string" Pid: type: "integer" description: "The system process ID for the exec process." examples: application/json: CanRemove: false ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" DetachKeys: "" ExitCode: 2 ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" OpenStderr: true OpenStdin: true OpenStdout: true ProcessConfig: arguments: - "-c" - "exit 2" entrypoint: "sh" privileged: false tty: true user: "1000" Running: false Pid: 42000 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Exec instance ID" required: true type: "string" tags: ["Exec"] /volumes: get: summary: "List volumes" operationId: "VolumeList" produces: ["application/json"] responses: 200: description: "Summary volume data that matches the query" schema: $ref: "#/definitions/VolumeListResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the volumes list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all volumes that are not in use by a container. When set to `false` (or `0`), only volumes that are in use by one or more containers are returned. - `driver=<volume-driver-name>` Matches volumes based on their driver. - `label=<key>` or `label=<key>:<value>` Matches volumes based on the presence of a `label` alone or a `label` and a value. - `name=<volume-name>` Matches all or part of a volume name. type: "string" format: "json" tags: ["Volume"] /volumes/create: post: summary: "Create a volume" operationId: "VolumeCreate" consumes: ["application/json"] produces: ["application/json"] responses: 201: description: "The volume was created successfully" schema: $ref: "#/definitions/Volume" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "volumeConfig" in: "body" required: true description: "Volume configuration" schema: $ref: "#/definitions/VolumeCreateOptions" tags: ["Volume"] /volumes/{name}: get: summary: "Inspect a volume" operationId: "VolumeInspect" produces: ["application/json"] responses: 200: description: "No error" schema: $ref: "#/definitions/Volume" 404: description: "No such volume" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" tags: ["Volume"] put: summary: | "Update a volume. Valid only for Swarm cluster volumes" operationId: "VolumeUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such volume" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "The name or ID of the volume" type: "string" required: true - name: "body" in: "body" schema: # though the schema for is an object that contains only a # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object # means that if, later on, we support things like changing the # labels, we can do so without duplicating that information to the # ClusterVolumeSpec. type: "object" description: "Volume configuration" properties: Spec: $ref: "#/definitions/ClusterVolumeSpec" description: | The spec of the volume to update. Currently, only Availability may change. All other fields must remain unchanged. - name: "version" in: "query" description: | The version number of the volume being updated. This is required to avoid conflicting writes. Found in the volume's `ClusterVolume` field. type: "integer" format: "int64" required: true tags: ["Volume"] delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." operationId: "VolumeDelete" responses: 204: description: "The volume was removed" 404: description: "No such volume or volume driver" schema: $ref: "#/definitions/ErrorResponse" 409: description: "Volume is in use and cannot be removed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" required: true description: "Volume name or ID" type: "string" - name: "force" in: "query" description: "Force the removal of the volume" type: "boolean" default: false tags: ["Volume"] /volumes/prune: post: summary: "Delete unused volumes" produces: - "application/json" operationId: "VolumePrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. type: "string" responses: 200: description: "No error" schema: type: "object" title: "VolumePruneResponse" properties: VolumesDeleted: description: "Volumes that were deleted" type: "array" items: type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" format: "int64" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Volume"] /networks: get: summary: "List networks" description: | Returns a list of networks. For details on the format, see the [network inspect endpoint](#operation/NetworkInspect). Note that it uses a different, smaller representation of a network than inspecting a single network. For example, the list of containers attached to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Network" examples: application/json: - Name: "bridge" Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" Created: "2016-10-19T06:21:00.416543526Z" Scope: "local" Driver: "bridge" EnableIPv4: true EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: - Subnet: "172.17.0.0/16" Options: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" - Name: "none" Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "null" EnableIPv4: false EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} - Name: "host" Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" Created: "0001-01-01T00:00:00Z" Scope: "local" Driver: "host" EnableIPv4: false EnableIPv6: false Internal: false Attachable: false Ingress: false IPAM: Driver: "default" Config: [] Containers: {} Options: {} 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: - `dangling=<boolean>` When set to `true` (or `1`), returns all networks that are not in use by a container. When set to `false` (or `0`), only networks that are in use by one or more containers are returned. - `driver=<driver-name>` Matches a network's driver. - `id=<network-id>` Matches all or part of a network ID. - `label=<key>` or `label=<key>=<value>` of a network label. - `name=<network-name>` Matches all or part of a network name. - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. type: "string" tags: ["Network"] /networks/{id}: get: summary: "Inspect a network" operationId: "NetworkInspect" produces: - "application/json" responses: 200: description: "No error" schema: $ref: "#/definitions/Network" 404: description: "Network not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "verbose" in: "query" description: "Detailed inspect output for troubleshooting" type: "boolean" default: false - name: "scope" in: "query" description: "Filter the network by scope (swarm, global, or local)" type: "string" tags: ["Network"] delete: summary: "Remove a network" operationId: "NetworkDelete" responses: 204: description: "No error" 403: description: "operation not supported for pre-defined networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such network" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" tags: ["Network"] /networks/create: post: summary: "Create a network" operationId: "NetworkCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "Network created successfully" schema: $ref: "#/definitions/NetworkCreateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: | Forbidden operation. This happens when trying to create a network named after a pre-defined network, or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. schema: $ref: "#/definitions/ErrorResponse" 404: description: "plugin not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "networkConfig" in: "body" description: "Network configuration" required: true schema: type: "object" title: "NetworkCreateRequest" required: ["Name"] properties: Name: description: "The network's name." type: "string" example: "my_network" Driver: description: "Name of the network driver plugin to use." type: "string" default: "bridge" example: "bridge" Scope: description: | The level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). type: "string" Internal: description: "Restrict external access to the network." type: "boolean" Attachable: description: | Globally scoped network is manually attachable by regular containers from workers in swarm mode. type: "boolean" example: true Ingress: description: | Ingress network is the network which provides the routing-mesh in swarm mode. type: "boolean" example: false ConfigOnly: description: | Creates a config-only network. Config-only networks are placeholder networks for network configurations to be used by other networks. Config-only networks cannot be used directly to run containers or services. type: "boolean" default: false example: false ConfigFrom: description: | Specifies the source which will provide the configuration for this network. The specified network must be an existing config-only network; see ConfigOnly. $ref: "#/definitions/ConfigReference" IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" EnableIPv4: description: "Enable IPv4 on the network." type: "boolean" example: true EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" example: true Options: description: "Network specific options to be used by the drivers." type: "object" additionalProperties: type: "string" example: com.docker.network.bridge.default_bridge: "true" com.docker.network.bridge.enable_icc: "true" com.docker.network.bridge.enable_ip_masquerade: "true" com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" com.docker.network.bridge.name: "docker0" com.docker.network.driver.mtu: "1500" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" example: com.example.some-label: "some-value" com.example.some-other-label: "some-other-value" tags: ["Network"] /networks/{id}/connect: post: summary: "Connect a container to a network" description: "The network must be either a local-scoped network or a swarm-scoped network with the `attachable` option set. A network cannot be re-attached to a running container" operationId: "NetworkConnect" consumes: - "application/json" responses: 200: description: "No error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "Operation forbidden" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkConnectRequest" properties: Container: type: "string" description: "The ID or name of the container to connect to the network." EndpointConfig: $ref: "#/definitions/EndpointSettings" example: Container: "3613f73ba0e4" EndpointConfig: IPAMConfig: IPv4Address: "172.24.56.89" IPv6Address: "2001:db8::5689" MacAddress: "02:42:ac:12:05:02" Priority: 100 tags: ["Network"] /networks/{id}/disconnect: post: summary: "Disconnect a container from a network" operationId: "NetworkDisconnect" consumes: - "application/json" responses: 200: description: "No error" 403: description: "Operation not supported for swarm scoped networks" schema: $ref: "#/definitions/ErrorResponse" 404: description: "Network or container not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "Network ID or name" required: true type: "string" - name: "container" in: "body" required: true schema: type: "object" title: "NetworkDisconnectRequest" properties: Container: type: "string" description: | The ID or name of the container to disconnect from the network. Force: type: "boolean" description: | Force the container to disconnect from the network. tags: ["Network"] /networks/prune: post: summary: "Delete unused networks" produces: - "application/json" operationId: "NetworkPrune" parameters: - name: "filters" in: "query" description: | Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels. type: "string" responses: 200: description: "No error" schema: type: "object" title: "NetworkPruneResponse" properties: NetworksDeleted: description: "Networks that were deleted" type: "array" items: type: "string" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Network"] /plugins: get: summary: "List plugins" operationId: "PluginList" description: "Returns information about installed plugins." produces: ["application/json"] responses: 200: description: "No error" schema: type: "array" items: $ref: "#/definitions/Plugin" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: - `capability=<capability name>` - `enable=<true>|<false>` tags: ["Plugin"] /plugins/privileges: get: summary: "Get plugin privileges" operationId: "GetPluginPrivileges" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: - "Plugin" /plugins/pull: post: summary: "Install a plugin" operationId: "PluginPull" description: | Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). produces: - "application/json" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "remote" in: "query" description: | Remote reference for plugin to install. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "name" in: "query" description: | Local name for the pulled plugin. The `:latest` tag is optional, and is used as the default if omitted. required: false type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/{name}/json: get: summary: "Inspect a plugin" operationId: "PluginInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" tags: ["Plugin"] /plugins/{name}: delete: summary: "Remove a plugin" operationId: "PluginDelete" responses: 200: description: "no error" schema: $ref: "#/definitions/Plugin" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "force" in: "query" description: | Disable the plugin before removing. This may result in issues if the plugin is in use by a container. type: "boolean" default: false tags: ["Plugin"] /plugins/{name}/enable: post: summary: "Enable a plugin" operationId: "PluginEnable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "timeout" in: "query" description: "Set the HTTP client timeout (in seconds)" type: "integer" default: 0 tags: ["Plugin"] /plugins/{name}/disable: post: summary: "Disable a plugin" operationId: "PluginDisable" responses: 200: description: "no error" 404: description: "plugin is not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "force" in: "query" description: | Force disable a plugin even if still in use. required: false type: "boolean" tags: ["Plugin"] /plugins/{name}/upgrade: post: summary: "Upgrade a plugin" operationId: "PluginUpgrade" responses: 204: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "remote" in: "query" description: | Remote reference to upgrade to. The `:latest` tag is optional, and is used as the default if omitted. required: true type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration to use when pulling a plugin from a registry. Refer to the [authentication section](#section/Authentication) for details. type: "string" - name: "body" in: "body" schema: type: "array" items: $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" Value: - "host" - Name: "mount" Description: "" Value: - "/data" - Name: "device" Description: "" Value: - "/dev/cpu_dma_latency" tags: ["Plugin"] /plugins/create: post: summary: "Create a plugin" operationId: "PluginCreate" consumes: - "application/x-tar" responses: 204: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "query" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "tarContext" in: "body" description: "Path to tar containing plugin rootfs and manifest" schema: type: "string" format: "binary" tags: ["Plugin"] /plugins/{name}/push: post: summary: "Push a plugin" operationId: "PluginPush" description: | Push a plugin to the registry. parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" responses: 200: description: "no error" 404: description: "plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /plugins/{name}/set: post: summary: "Configure a plugin" operationId: "PluginSet" consumes: - "application/json" parameters: - name: "name" in: "path" description: | The name of the plugin. The `:latest` tag is optional, and is the default if omitted. required: true type: "string" - name: "body" in: "body" schema: type: "array" items: type: "string" example: ["DEBUG=1"] responses: 204: description: "No error" 404: description: "Plugin not installed" schema: $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Plugin"] /nodes: get: summary: "List nodes" operationId: "NodeList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Node" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" description: | Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). Available filters: - `id=<node id>` - `label=<engine label>` - `membership=`(`accepted`|`pending`)` - `name=<node name>` - `node.label=<node label>` - `role=`(`manager`|`worker`)` type: "string" tags: ["Node"] /nodes/{id}: get: summary: "Inspect a node" operationId: "NodeInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Node" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true tags: ["Node"] delete: summary: "Delete a node" operationId: "NodeDelete" responses: 200: description: "no error" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the node" type: "string" required: true - name: "force" in: "query" description: "Force remove a node from the swarm" default: false type: "boolean" tags: ["Node"] /nodes/{id}/update: post: summary: "Update a node" operationId: "NodeUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such node" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID of the node" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/NodeSpec" - name: "version" in: "query" description: | The version number of the node object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Node"] /swarm: get: summary: "Inspect swarm" operationId: "SwarmInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Swarm" 404: description: "no such swarm" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/init: post: summary: "Initialize a new swarm" operationId: "SwarmInit" produces: - "application/json" - "text/plain" responses: 200: description: "no error" schema: description: "The node ID" type: "string" example: "7v2t30z9blmxuhnyo6s4cpenp" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmInitRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used. type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" DataPathPort: description: | DataPathPort specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. if no port is set or is set to 0, default port 4789 will be used. type: "integer" format: "uint32" DefaultAddrPool: description: | Default Address Pool specifies default subnet pools for global scope networks. type: "array" items: type: "string" example: ["10.10.0.0/16", "20.20.0.0/16"] ForceNewCluster: description: "Force creation of a new swarm." type: "boolean" SubnetSize: description: | SubnetSize specifies the subnet size of the networks created from the default subnet pool. type: "integer" format: "uint32" Spec: $ref: "#/definitions/SwarmSpec" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" DataPathPort: 4789 DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] SubnetSize: 24 ForceNewCluster: false Spec: Orchestration: {} Raft: {} Dispatcher: {} CAConfig: {} EncryptionConfig: AutoLockManagers: false tags: ["Swarm"] /swarm/join: post: summary: "Join an existing swarm" operationId: "SwarmJoin" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is already part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmJoinRequest" properties: ListenAddr: description: | Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). type: "string" AdvertiseAddr: description: | Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`, or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope network drivers will publish towards other nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" RemoteAddrs: description: | Addresses of manager nodes already participating in the swarm. type: "array" items: type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" DataPathAddr: "192.168.1.1" RemoteAddrs: - "node1:2377" JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" tags: ["Swarm"] /swarm/leave: post: summary: "Leave a swarm" operationId: "SwarmLeave" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "force" description: | Force leave swarm, even if this is the last manager or that it will break the cluster. in: "query" type: "boolean" default: false tags: ["Swarm"] /swarm/update: post: summary: "Update a swarm" operationId: "SwarmUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: $ref: "#/definitions/SwarmSpec" - name: "version" in: "query" description: | The version number of the swarm object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true - name: "rotateWorkerToken" in: "query" description: "Rotate the worker join token." type: "boolean" default: false - name: "rotateManagerToken" in: "query" description: "Rotate the manager join token." type: "boolean" default: false - name: "rotateManagerUnlockKey" in: "query" description: "Rotate the manager unlock key." type: "boolean" default: false tags: ["Swarm"] /swarm/unlockkey: get: summary: "Get the unlock key" operationId: "SwarmUnlockkey" consumes: - "application/json" responses: 200: description: "no error" schema: type: "object" title: "UnlockKeyResponse" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /swarm/unlock: post: summary: "Unlock a locked manager" operationId: "SwarmUnlock" consumes: - "application/json" produces: - "application/json" parameters: - name: "body" in: "body" required: true schema: type: "object" title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." type: "string" example: UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" responses: 200: description: "no error" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" tags: ["Swarm"] /services: get: summary: "List services" operationId: "ServiceList" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Service" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: - `id=<service id>` - `label=<service label>` - `mode=["replicated"|"global"]` - `name=<service name>` - name: "status" in: "query" type: "boolean" description: | Include service status, with count of running and desired tasks. tags: ["Service"] /services/create: post: summary: "Create a service" operationId: "ServiceCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/ServiceCreateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 403: description: "network is not eligible for services" schema: $ref: "#/definitions/ErrorResponse" 409: description: "name conflicts with an existing service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "web" TaskTemplate: ContainerSpec: Image: "nginx:alpine" Mounts: - ReadOnly: true Source: "web-data" Target: "/usr/share/nginx/html" Type: "volume" VolumeOptions: DriverConfig: {} Labels: com.example.something: "something-value" Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] User: "33" DNSConfig: Nameservers: ["8.8.8.8"] Search: ["example.org"] Options: ["timeout:3"] Secrets: - File: Name: "www.example.org.key" UID: "33" GID: "33" Mode: 384 SecretID: "fpjqlhnwb19zds35k8wn80lq9" SecretName: "example_org_domain_key" OomScoreAdj: 0 LogDriver: Name: "json-file" Options: max-file: "3" max-size: "10M" Placement: {} Resources: Limits: MemoryBytes: 104857600 Reservations: {} RestartPolicy: Condition: "on-failure" Delay: 10000000000 MaxAttempts: 10 Mode: Replicated: Replicas: 4 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Ports: - Protocol: "tcp" PublishedPort: 8080 TargetPort: 80 Labels: foo: "bar" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}: get: summary: "Inspect a service" operationId: "ServiceInspect" responses: 200: description: "no error" schema: $ref: "#/definitions/Service" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "insertDefaults" in: "query" description: "Fill empty fields with default values." type: "boolean" default: false tags: ["Service"] delete: summary: "Delete a service" operationId: "ServiceDelete" responses: 200: description: "no error" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" tags: ["Service"] /services/{id}/update: post: summary: "Update a service" operationId: "ServiceUpdate" consumes: ["application/json"] produces: ["application/json"] responses: 200: description: "no error" schema: $ref: "#/definitions/ServiceUpdateResponse" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID or name of service." required: true type: "string" - name: "body" in: "body" required: true schema: allOf: - $ref: "#/definitions/ServiceSpec" - type: "object" example: Name: "top" TaskTemplate: ContainerSpec: Image: "busybox" Args: - "top" OomScoreAdj: 0 Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ForceUpdate: 0 Mode: Replicated: Replicas: 1 UpdateConfig: Parallelism: 2 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 RollbackConfig: Parallelism: 1 Delay: 1000000000 FailureAction: "pause" Monitor: 15000000000 MaxFailureRatio: 0.15 EndpointSpec: Mode: "vip" - name: "version" in: "query" description: | The version number of the service object being updated. This is required to avoid conflicting writes. This version number should be the value as currently set on the service *before* the update. You can find the current version by calling `GET /services/{id}` required: true type: "integer" - name: "registryAuthFrom" in: "query" description: | If the `X-Registry-Auth` header is not specified, this parameter indicates where to find registry authorization credentials. type: "string" enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" description: | Set to this parameter to `previous` to cause a server-side rollback to the previous service spec. The supplied spec will be ignored in this case. type: "string" - name: "X-Registry-Auth" in: "header" description: | A base64url-encoded auth configuration for pulling from private registries. Refer to the [authentication section](#section/Authentication) for details. type: "string" tags: ["Service"] /services/{id}/logs: get: summary: "Get service logs" description: | Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. produces: - "application/vnd.docker.raw-stream" - "application/vnd.docker.multiplexed-stream" operationId: "ServiceLogs" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such service" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such service: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID or name of the service" type: "string" - name: "details" in: "query" description: "Show service context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Service"] /tasks: get: summary: "List tasks" operationId: "TaskList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Task" example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" Version: Index: 71 CreatedAt: "2016-06-07T21:07:31.171892745Z" UpdatedAt: "2016-06-07T21:07:31.376370513Z" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:31.290032978Z" State: "running" Message: "started" ContainerStatus: ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" PID: 677 DesiredState: "running" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.10/16" - ID: "1yljwbmlr8er2waf8orvqpwms" Version: Index: 30 CreatedAt: "2016-06-07T21:07:30.019104782Z" UpdatedAt: "2016-06-07T21:07:30.231958098Z" Name: "hopeful_cori" Spec: ContainerSpec: Image: "redis" Resources: Limits: {} Reservations: {} RestartPolicy: Condition: "any" MaxAttempts: 0 Placement: {} ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" Slot: 1 NodeID: "60gvrl6tm78dmak4yl7srz94v" Status: Timestamp: "2016-06-07T21:07:30.202183143Z" State: "shutdown" Message: "shutdown" ContainerStatus: ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" DesiredState: "shutdown" NetworksAttachments: - Network: ID: "4qvuz4ko70xaltuqbt8956gd1" Version: Index: 18 CreatedAt: "2016-06-07T20:31:11.912919752Z" UpdatedAt: "2016-06-07T21:07:29.955277358Z" Spec: Name: "ingress" Labels: com.docker.swarm.internal: "true" DriverConfiguration: {} IPAMOptions: Driver: {} Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" DriverState: Name: "overlay" Options: com.docker.network.driver.overlay.vxlanid_list: "256" IPAMOptions: Driver: Name: "default" Configs: - Subnet: "10.255.0.0/16" Gateway: "10.255.0.1" Addresses: - "10.255.0.5/16" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: - `desired-state=(running | shutdown | accepted)` - `id=<task id>` - `label=key` or `label="key=value"` - `name=<task name>` - `node=<node id or name>` - `service=<service name>` tags: ["Task"] /tasks/{id}: get: summary: "Inspect a task" operationId: "TaskInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Task" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "ID of the task" required: true type: "string" tags: ["Task"] /tasks/{id}/logs: get: summary: "Get task logs" description: | Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" produces: - "application/vnd.docker.raw-stream" - "application/vnd.docker.multiplexed-stream" responses: 200: description: "logs returned as a stream in response body" schema: type: "string" format: "binary" 404: description: "no such task" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such task: c2ada9df5af8" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true description: "ID of the task" type: "string" - name: "details" in: "query" description: "Show task context and extra details provided to logs." type: "boolean" default: false - name: "follow" in: "query" description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" in: "query" description: "Return logs from `stdout`" type: "boolean" default: false - name: "stderr" in: "query" description: "Return logs from `stderr`" type: "boolean" default: false - name: "since" in: "query" description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" type: "boolean" default: false - name: "tail" in: "query" description: | Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Task"] /secrets: get: summary: "List secrets" operationId: "SecretList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Secret" example: - ID: "blt1owaxmitz71s9v5zh81zun" Version: Index: 85 CreatedAt: "2017-07-20T13:55:28.678958722Z" UpdatedAt: "2017-07-20T13:55:28.678958722Z" Spec: Name: "mysql-passwd" Labels: some.label: "some.value" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: - `id=<secret id>` - `label=<key> or label=<key>=value` - `name=<secret name>` - `names=<secret name>` tags: ["Secret"] /secrets/create: post: summary: "Create a secret" operationId: "SecretCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IDResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/SecretSpec" - type: "object" example: Name: "app-key.crt" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" tags: ["Secret"] /secrets/{id}: get: summary: "Inspect a secret" operationId: "SecretInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Secret" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" Labels: foo: "bar" Driver: Name: "secret-bucket" Options: OptionA: "value for driver option A" OptionB: "value for driver option B" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] delete: summary: "Delete a secret" operationId: "SecretDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "secret not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the secret" tags: ["Secret"] /secrets/{id}/update: post: summary: "Update a Secret" operationId: "SecretUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such secret" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the secret" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/SecretSpec" description: | The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values. - name: "version" in: "query" description: | The version number of the secret object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Secret"] /configs: get: summary: "List configs" operationId: "ConfigList" produces: - "application/json" responses: 200: description: "no error" schema: type: "array" items: $ref: "#/definitions/Config" example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "server.conf" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "filters" in: "query" type: "string" description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: - `id=<config id>` - `label=<key> or label=<key>=value` - `name=<config name>` - `names=<config name>` tags: ["Config"] /configs/create: post: summary: "Create a config" operationId: "ConfigCreate" consumes: - "application/json" produces: - "application/json" responses: 201: description: "no error" schema: $ref: "#/definitions/IDResponse" 409: description: "name conflicts with an existing object" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "body" in: "body" schema: allOf: - $ref: "#/definitions/ConfigSpec" - type: "object" example: Name: "server.conf" Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" tags: ["Config"] /configs/{id}: get: summary: "Inspect a config" operationId: "ConfigInspect" produces: - "application/json" responses: 200: description: "no error" schema: $ref: "#/definitions/Config" examples: application/json: ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 CreatedAt: "2016-11-05T01:20:17.327670065Z" UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] delete: summary: "Delete a config" operationId: "ConfigDelete" produces: - "application/json" responses: 204: description: "no error" 404: description: "config not found" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" required: true type: "string" description: "ID of the config" tags: ["Config"] /configs/{id}/update: post: summary: "Update a Config" operationId: "ConfigUpdate" responses: 200: description: "no error" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 404: description: "no such config" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" 503: description: "node is not part of a swarm" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" description: "The ID or name of the config" type: "string" required: true - name: "body" in: "body" schema: $ref: "#/definitions/ConfigSpec" description: | The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values. - name: "version" in: "query" description: | The version number of the config object being updated. This is required to avoid conflicting writes. type: "integer" format: "int64" required: true tags: ["Config"] /distribution/{name}/json: get: summary: "Get image information from the registry" description: | Return image digest and platform information by contacting the registry. operationId: "DistributionInspect" produces: - "application/json" responses: 200: description: "descriptor and platform information" schema: $ref: "#/definitions/DistributionInspect" 401: description: "Failed authentication or no image found" schema: $ref: "#/definitions/ErrorResponse" examples: application/json: message: "No such image: someimage (tag: latest)" 500: description: "Server error" schema: $ref: "#/definitions/ErrorResponse" parameters: - name: "name" in: "path" description: "Image name or id" type: "string" required: true tags: ["Distribution"] /session: post: summary: "Initialize interactive session" description: | Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. ### Hijacking This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. For example, the client sends this request to upgrade the connection: ``` POST /session HTTP/1.1 Upgrade: h2c Connection: Upgrade ``` The Docker daemon responds with a `101 UPGRADED` response follow with the raw stream: ``` HTTP/1.1 101 UPGRADED Connection: Upgrade Upgrade: h2c ``` operationId: "Session" produces: - "application/vnd.docker.raw-stream" responses: 101: description: "no error, hijacking successful" 400: description: "bad parameter" schema: $ref: "#/definitions/ErrorResponse" 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" tags: ["Session"]
unknown
github
https://github.com/moby/moby
api/docs/v1.48.yaml
// Copyright IBM Corp. 2016, 2025 // SPDX-License-Identifier: BUSL-1.1 package command import ( "encoding/json" "fmt" "path" "sort" "strconv" "strings" "github.com/hashicorp/cli" "github.com/posener/complete" ) var ( _ cli.Command = (*KVMetadataGetCommand)(nil) _ cli.CommandAutocomplete = (*KVMetadataGetCommand)(nil) ) type KVMetadataGetCommand struct { *BaseCommand flagMount string } func (c *KVMetadataGetCommand) Synopsis() string { return "Retrieves key metadata from the KV store" } func (c *KVMetadataGetCommand) Help() string { helpText := ` Usage: vault kv metadata get [options] KEY Retrieves the metadata from Vault's key-value store at the given key name. If no key exists with that name, an error is returned. $ vault kv metadata get -mount=secret foo The deprecated path-like syntax can also be used, but this should be avoided for KV v2, as the fact that it is not actually the full API path to the secret (secret/metadata/foo) can cause confusion: $ vault kv metadata get secret/foo Additional flags and more advanced use cases are detailed below. ` + c.Flags().Help() return strings.TrimSpace(helpText) } func (c *KVMetadataGetCommand) Flags() *FlagSets { set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) // Common Options f := set.NewFlagSet("Common Options") f.StringVar(&StringVar{ Name: "mount", Target: &c.flagMount, Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value Usage: `Specifies the path where the KV backend is mounted. If specified, the next argument will be interpreted as the secret path. If this flag is not specified, the next argument will be interpreted as the combined mount path and secret path, with /metadata/ automatically appended between KV v2 secrets.`, }) return set } func (c *KVMetadataGetCommand) AutocompleteArgs() complete.Predictor { return nil } func (c *KVMetadataGetCommand) AutocompleteFlags() complete.Flags { return c.Flags().Completions() } func (c *KVMetadataGetCommand) Run(args []string) int { f := c.Flags() if err := f.Parse(args); err != nil { c.UI.Error(err.Error()) return 1 } args = f.Args() switch { case len(args) < 1: c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) return 1 case len(args) > 1: c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) return 1 } client, err := c.Client() if err != nil { c.UI.Error(err.Error()) return 2 } // If true, we're working with "-mount=secret foo" syntax. // If false, we're using "secret/foo" syntax. mountFlagSyntax := c.flagMount != "" var ( mountPath string partialPath string v2 bool ) // Parse the paths and grab the KV version if mountFlagSyntax { // In this case, this arg is the secret path (e.g. "foo"). partialPath = sanitizePath(args[0]) mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) if err != nil { c.UI.Error(err.Error()) return 2 } if v2 { partialPath = path.Join(mountPath, partialPath) } } else { // In this case, this arg is a path-like combination of mountPath/secretPath. // (e.g. "secret/foo") partialPath = sanitizePath(args[0]) mountPath, v2, err = isKVv2(partialPath, client) if err != nil { c.UI.Error(err.Error()) return 2 } } if !v2 { c.UI.Error("Metadata not supported on KV Version 1") return 1 } fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) secret, err := client.Logical().Read(fullPath) if err != nil { c.UI.Error(fmt.Sprintf("Error reading %s: %s", fullPath, err)) return 2 } if secret == nil { c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) return 2 } if c.flagField != "" { return PrintRawField(c.UI, secret, c.flagField) } // If we have wrap info print the secret normally. if secret.WrapInfo != nil || c.flagFormat != "table" { return OutputSecret(c.UI, secret) } versionsRaw, ok := secret.Data["versions"] if !ok || versionsRaw == nil { c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) OutputSecret(c.UI, secret) return 2 } versions := versionsRaw.(map[string]interface{}) delete(secret.Data, "versions") outputPath(c.UI, fullPath, "Metadata Path") c.UI.Info(getHeaderForMap("Metadata", secret.Data)) OutputSecret(c.UI, secret) versionKeys := []int{} for k := range versions { i, err := strconv.Atoi(k) if err != nil { c.UI.Error(fmt.Sprintf("Error parsing version %s", k)) return 2 } versionKeys = append(versionKeys, i) } // Sort the versions by key and display them in order. sort.Ints(versionKeys) for _, v := range versionKeys { version, err := expandVersionAttribution(versions[strconv.Itoa(v)]) if err != nil { c.UI.Error(fmt.Sprintf("Error parsing version %d: %s", v, err.Error())) return 2 } c.UI.Info("\n" + getHeaderForMap(fmt.Sprintf("Version %d", v), version)) OutputData(c.UI, version) } return 0 } func expandVersionAttribution(versionData interface{}) (map[string]interface{}, error) { version, ok := versionData.(map[string]interface{}) if !ok { return nil, fmt.Errorf("version is not a map") } attributionKeys := []string{"created_by", "deleted_by"} for _, key := range attributionKeys { if version[key] == nil { continue } attr, err := json.Marshal(version[key]) if err != nil { return nil, fmt.Errorf("failed to parse attribution data for %q", key) } version[key] = string(attr) } return version, nil }
go
github
https://github.com/hashicorp/vault
command/kv_metadata_get.go
import os import bdb import types from Tkinter import * from WindowList import ListedToplevel from ScrolledList import ScrolledList class Idb(bdb.Bdb): def __init__(self, gui): self.gui = gui bdb.Bdb.__init__(self) def user_line(self, frame): if self.in_rpc_code(frame): self.set_step() return message = self.__frame2message(frame) self.gui.interaction(message, frame) def user_exception(self, frame, info): if self.in_rpc_code(frame): self.set_step() return message = self.__frame2message(frame) self.gui.interaction(message, frame, info) def in_rpc_code(self, frame): if frame.f_code.co_filename.count('rpc.py'): return True else: prev_frame = frame.f_back if prev_frame.f_code.co_filename.count('Debugger.py'): # (that test will catch both Debugger.py and RemoteDebugger.py) return False return self.in_rpc_code(prev_frame) def __frame2message(self, frame): code = frame.f_code filename = code.co_filename lineno = frame.f_lineno basename = os.path.basename(filename) message = "%s:%s" % (basename, lineno) if code.co_name != "?": message = "%s: %s()" % (message, code.co_name) return message class Debugger: vstack = vsource = vlocals = vglobals = None def __init__(self, pyshell, idb=None): if idb is None: idb = Idb(self) self.pyshell = pyshell self.idb = idb self.frame = None self.make_gui() self.interacting = 0 def run(self, *args): try: self.interacting = 1 return self.idb.run(*args) finally: self.interacting = 0 def close(self, event=None): if self.interacting: self.top.bell() return if self.stackviewer: self.stackviewer.close(); self.stackviewer = None # Clean up pyshell if user clicked debugger control close widget. # (Causes a harmless extra cycle through close_debugger() if user # toggled debugger from pyshell Debug menu) self.pyshell.close_debugger() # Now close the debugger control window.... self.top.destroy() def make_gui(self): pyshell = self.pyshell self.flist = pyshell.flist self.root = root = pyshell.root self.top = top = ListedToplevel(root) self.top.wm_title("Debug Control") self.top.wm_iconname("Debug") top.wm_protocol("WM_DELETE_WINDOW", self.close) self.top.bind("<Escape>", self.close) # self.bframe = bframe = Frame(top) self.bframe.pack(anchor="w") self.buttons = bl = [] # self.bcont = b = Button(bframe, text="Go", command=self.cont) bl.append(b) self.bstep = b = Button(bframe, text="Step", command=self.step) bl.append(b) self.bnext = b = Button(bframe, text="Over", command=self.next) bl.append(b) self.bret = b = Button(bframe, text="Out", command=self.ret) bl.append(b) self.bret = b = Button(bframe, text="Quit", command=self.quit) bl.append(b) # for b in bl: b.configure(state="disabled") b.pack(side="left") # self.cframe = cframe = Frame(bframe) self.cframe.pack(side="left") # if not self.vstack: self.__class__.vstack = BooleanVar(top) self.vstack.set(1) self.bstack = Checkbutton(cframe, text="Stack", command=self.show_stack, variable=self.vstack) self.bstack.grid(row=0, column=0) if not self.vsource: self.__class__.vsource = BooleanVar(top) self.bsource = Checkbutton(cframe, text="Source", command=self.show_source, variable=self.vsource) self.bsource.grid(row=0, column=1) if not self.vlocals: self.__class__.vlocals = BooleanVar(top) self.vlocals.set(1) self.blocals = Checkbutton(cframe, text="Locals", command=self.show_locals, variable=self.vlocals) self.blocals.grid(row=1, column=0) if not self.vglobals: self.__class__.vglobals = BooleanVar(top) self.bglobals = Checkbutton(cframe, text="Globals", command=self.show_globals, variable=self.vglobals) self.bglobals.grid(row=1, column=1) # self.status = Label(top, anchor="w") self.status.pack(anchor="w") self.error = Label(top, anchor="w") self.error.pack(anchor="w", fill="x") self.errorbg = self.error.cget("background") # self.fstack = Frame(top, height=1) self.fstack.pack(expand=1, fill="both") self.flocals = Frame(top) self.flocals.pack(expand=1, fill="both") self.fglobals = Frame(top, height=1) self.fglobals.pack(expand=1, fill="both") # if self.vstack.get(): self.show_stack() if self.vlocals.get(): self.show_locals() if self.vglobals.get(): self.show_globals() def interaction(self, message, frame, info=None): self.frame = frame self.status.configure(text=message) # if info: type, value, tb = info try: m1 = type.__name__ except AttributeError: m1 = "%s" % str(type) if value is not None: try: m1 = "%s: %s" % (m1, str(value)) except: pass bg = "yellow" else: m1 = "" tb = None bg = self.errorbg self.error.configure(text=m1, background=bg) # sv = self.stackviewer if sv: stack, i = self.idb.get_stack(self.frame, tb) sv.load_stack(stack, i) # self.show_variables(1) # if self.vsource.get(): self.sync_source_line() # for b in self.buttons: b.configure(state="normal") # self.top.wakeup() self.root.mainloop() # for b in self.buttons: b.configure(state="disabled") self.status.configure(text="") self.error.configure(text="", background=self.errorbg) self.frame = None def sync_source_line(self): frame = self.frame if not frame: return filename, lineno = self.__frame2fileline(frame) if filename[:1] + filename[-1:] != "<>" and os.path.exists(filename): self.flist.gotofileline(filename, lineno) def __frame2fileline(self, frame): code = frame.f_code filename = code.co_filename lineno = frame.f_lineno return filename, lineno def cont(self): self.idb.set_continue() self.root.quit() def step(self): self.idb.set_step() self.root.quit() def next(self): self.idb.set_next(self.frame) self.root.quit() def ret(self): self.idb.set_return(self.frame) self.root.quit() def quit(self): self.idb.set_quit() self.root.quit() stackviewer = None def show_stack(self): if not self.stackviewer and self.vstack.get(): self.stackviewer = sv = StackViewer(self.fstack, self.flist, self) if self.frame: stack, i = self.idb.get_stack(self.frame, None) sv.load_stack(stack, i) else: sv = self.stackviewer if sv and not self.vstack.get(): self.stackviewer = None sv.close() self.fstack['height'] = 1 def show_source(self): if self.vsource.get(): self.sync_source_line() def show_frame(self, (frame, lineno)): self.frame = frame self.show_variables() localsviewer = None globalsviewer = None def show_locals(self): lv = self.localsviewer if self.vlocals.get(): if not lv: self.localsviewer = NamespaceViewer(self.flocals, "Locals") else: if lv: self.localsviewer = None lv.close() self.flocals['height'] = 1 self.show_variables() def show_globals(self): gv = self.globalsviewer if self.vglobals.get(): if not gv: self.globalsviewer = NamespaceViewer(self.fglobals, "Globals") else: if gv: self.globalsviewer = None gv.close() self.fglobals['height'] = 1 self.show_variables() def show_variables(self, force=0): lv = self.localsviewer gv = self.globalsviewer frame = self.frame if not frame: ldict = gdict = None else: ldict = frame.f_locals gdict = frame.f_globals if lv and gv and ldict is gdict: ldict = None if lv: lv.load_dict(ldict, force, self.pyshell.interp.rpcclt) if gv: gv.load_dict(gdict, force, self.pyshell.interp.rpcclt) def set_breakpoint_here(self, filename, lineno): self.idb.set_break(filename, lineno) def clear_breakpoint_here(self, filename, lineno): self.idb.clear_break(filename, lineno) def clear_file_breaks(self, filename): self.idb.clear_all_file_breaks(filename) def load_breakpoints(self): "Load PyShellEditorWindow breakpoints into subprocess debugger" pyshell_edit_windows = self.pyshell.flist.inversedict.keys() for editwin in pyshell_edit_windows: filename = editwin.io.filename try: for lineno in editwin.breakpoints: self.set_breakpoint_here(filename, lineno) except AttributeError: continue class StackViewer(ScrolledList): def __init__(self, master, flist, gui): ScrolledList.__init__(self, master, width=80) self.flist = flist self.gui = gui self.stack = [] def load_stack(self, stack, index=None): self.stack = stack self.clear() for i in range(len(stack)): frame, lineno = stack[i] try: modname = frame.f_globals["__name__"] except: modname = "?" code = frame.f_code filename = code.co_filename funcname = code.co_name import linecache sourceline = linecache.getline(filename, lineno) import string sourceline = string.strip(sourceline) if funcname in ("?", "", None): item = "%s, line %d: %s" % (modname, lineno, sourceline) else: item = "%s.%s(), line %d: %s" % (modname, funcname, lineno, sourceline) if i == index: item = "> " + item self.append(item) if index is not None: self.select(index) def popup_event(self, event): "override base method" if self.stack: return ScrolledList.popup_event(self, event) def fill_menu(self): "override base method" menu = self.menu menu.add_command(label="Go to source line", command=self.goto_source_line) menu.add_command(label="Show stack frame", command=self.show_stack_frame) def on_select(self, index): "override base method" if 0 <= index < len(self.stack): self.gui.show_frame(self.stack[index]) def on_double(self, index): "override base method" self.show_source(index) def goto_source_line(self): index = self.listbox.index("active") self.show_source(index) def show_stack_frame(self): index = self.listbox.index("active") if 0 <= index < len(self.stack): self.gui.show_frame(self.stack[index]) def show_source(self, index): if not (0 <= index < len(self.stack)): return frame, lineno = self.stack[index] code = frame.f_code filename = code.co_filename if os.path.isfile(filename): edit = self.flist.open(filename) if edit: edit.gotoline(lineno) class NamespaceViewer: def __init__(self, master, title, dict=None): width = 0 height = 40 if dict: height = 20*len(dict) # XXX 20 == observed height of Entry widget self.master = master self.title = title import repr self.repr = repr.Repr() self.repr.maxstring = 60 self.repr.maxother = 60 self.frame = frame = Frame(master) self.frame.pack(expand=1, fill="both") self.label = Label(frame, text=title, borderwidth=2, relief="groove") self.label.pack(fill="x") self.vbar = vbar = Scrollbar(frame, name="vbar") vbar.pack(side="right", fill="y") self.canvas = canvas = Canvas(frame, height=min(300, max(40, height)), scrollregion=(0, 0, width, height)) canvas.pack(side="left", fill="both", expand=1) vbar["command"] = canvas.yview canvas["yscrollcommand"] = vbar.set self.subframe = subframe = Frame(canvas) self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw") self.load_dict(dict) dict = -1 def load_dict(self, dict, force=0, rpc_client=None): if dict is self.dict and not force: return subframe = self.subframe frame = self.frame for c in subframe.children.values(): c.destroy() self.dict = None if not dict: l = Label(subframe, text="None") l.grid(row=0, column=0) else: names = dict.keys() names.sort() row = 0 for name in names: value = dict[name] svalue = self.repr.repr(value) # repr(value) # Strip extra quotes caused by calling repr on the (already) # repr'd value sent across the RPC interface: if rpc_client: svalue = svalue[1:-1] l = Label(subframe, text=name) l.grid(row=row, column=0, sticky="nw") l = Entry(subframe, width=0, borderwidth=0) l.insert(0, svalue) l.grid(row=row, column=1, sticky="nw") row = row+1 self.dict = dict # XXX Could we use a <Configure> callback for the following? subframe.update_idletasks() # Alas! width = subframe.winfo_reqwidth() height = subframe.winfo_reqheight() canvas = self.canvas self.canvas["scrollregion"] = (0, 0, width, height) if height > 300: canvas["height"] = 300 frame.pack(expand=1) else: canvas["height"] = height frame.pack(expand=0) def close(self): self.frame.destroy()
unknown
codeparrot/codeparrot-clean
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for chord_pipelines.""" # internal imports import tensorflow as tf from magenta.common import testing_lib as common_testing_lib from magenta.music import chords_lib from magenta.music import constants from magenta.music import sequences_lib from magenta.music import testing_lib from magenta.pipelines import chord_pipelines from magenta.protobuf import music_pb2 NO_CHORD = constants.NO_CHORD class ChordPipelinesTest(tf.test.TestCase): def _unit_transform_test(self, unit, input_instance, expected_outputs): outputs = unit.transform(input_instance) self.assertTrue(isinstance(outputs, list)) common_testing_lib.assert_set_equality(self, expected_outputs, outputs) self.assertEqual(unit.input_type, type(input_instance)) if outputs: self.assertEqual(unit.output_type, type(outputs[0])) def testChordsExtractor(self): note_sequence = common_testing_lib.parse_test_proto( music_pb2.NoteSequence, """ time_signatures: { numerator: 4 denominator: 4} tempos: { qpm: 60}""") testing_lib.add_chords_to_sequence( note_sequence, [('C', 2), ('Am', 4), ('F', 5)]) quantized_sequence = sequences_lib.quantize_note_sequence( note_sequence, steps_per_quarter=1) quantized_sequence.total_quantized_steps = 8 expected_events = [[NO_CHORD, NO_CHORD, 'C', 'C', 'Am', 'F', 'F', 'F']] expected_chord_progressions = [] for events_list in expected_events: chords = chords_lib.ChordProgression( events_list, steps_per_quarter=1, steps_per_bar=4) expected_chord_progressions.append(chords) unit = chord_pipelines.ChordsExtractor(all_transpositions=False) self._unit_transform_test(unit, quantized_sequence, expected_chord_progressions) if __name__ == '__main__': tf.test.main()
unknown
codeparrot/codeparrot-clean
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from tacker.plugins.common import constants from tacker.vnfm.policy_actions import abstract_action from tacker.vnfm import utils as vnfm_utils LOG = logging.getLogger(__name__) class VNFActionAutoscaling(abstract_action.AbstractPolicyAction): def get_type(self): return 'autoscaling' def get_name(self): return 'autoscaling' def get_description(self): return 'Tacker VNF auto-scaling policy' def execute_action(self, plugin, context, vnf_dict, args): vnf_id = vnf_dict['id'] vnfm_utils.log_events(context, vnf_dict, constants.RES_EVT_MONITOR, "ActionAutoscalingHeat invoked") plugin.create_vnf_scale(context, vnf_id, args)
unknown
codeparrot/codeparrot-clean
import pytest from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException @pytest.mark.parametrize('group_names,is_active', [ (['oo_masters_to_config'], True), (['oo_nodes_to_config'], True), (['oo_etcd_to_config'], True), (['oo_masters_to_config', 'oo_nodes_to_config'], True), (['oo_masters_to_config', 'oo_etcd_to_config'], True), ([], False), (['lb'], False), (['nfs'], False), ]) def test_is_active(group_names, is_active): task_vars = dict( group_names=group_names, ) assert DiskAvailability(None, task_vars).is_active() == is_active @pytest.mark.parametrize('desc, ansible_mounts, expect_chunks', [ ( 'empty ansible_mounts', [], ['determine mount point', 'none'], ), ( 'missing relevant mount paths', [{'mount': '/mnt'}], ['determine mount point', '/mnt'], ), ( 'missing size_available', [{'mount': '/var'}, {'mount': '/usr'}, {'mount': '/tmp'}], ['missing', 'size_available'], ), ]) def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks): task_vars = dict( group_names=['oo_masters_to_config'], ansible_mounts=ansible_mounts, ) with pytest.raises(OpenShiftCheckException) as excinfo: DiskAvailability(fake_execute_module, task_vars).run() for chunk in expect_chunks: assert chunk in str(excinfo.value) @pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [ ( ['oo_masters_to_config'], 0, [{ 'mount': '/', 'size_available': 40 * 10**9 + 1, }], ), ( ['oo_nodes_to_config'], 0, [{ 'mount': '/', 'size_available': 15 * 10**9 + 1, }], ), ( ['oo_etcd_to_config'], 0, [{ 'mount': '/', 'size_available': 20 * 10**9 + 1, }], ), ( ['oo_etcd_to_config'], 1, # configure lower threshold [{ 'mount': '/', 'size_available': 1 * 10**9 + 1, # way smaller than recommended }], ), ( ['oo_etcd_to_config'], 0, [{ # not enough space on / ... 'mount': '/', 'size_available': 2 * 10**9, }, { # ... but enough on /var 'mount': '/var', 'size_available': 20 * 10**9 + 1, }], ), ( ['oo_masters_to_config'], 0, [{ 'mount': '/', 'size_available': 2 * 10**9, }, { # not enough directly on /var 'mount': '/var', 'size_available': 10 * 10**9 + 1, }, { # but subdir mounts add up to enough 'mount': '/var/lib/docker', 'size_available': 20 * 10**9 + 1, }, { 'mount': '/var/lib/origin', 'size_available': 20 * 10**9 + 1, }], ), ]) def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts): task_vars = dict( group_names=group_names, openshift_check_min_host_disk_gb=configured_min, ansible_mounts=ansible_mounts, ) check = DiskAvailability(fake_execute_module, task_vars) check.run() assert not check.failures @pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [ ( 'test with no space available', ['oo_masters_to_config'], 0, [{ 'mount': '/', 'size_available': 1, }], ['0.0 GB'], ), ( 'test with a higher configured required value', ['oo_masters_to_config'], 100, # set a higher threshold [{ 'mount': '/', 'size_available': 50 * 10**9, # would normally be enough... }], ['100.0 GB'], ), ( 'test with 1GB available, but "0" GB space requirement', ['oo_nodes_to_config'], 0, [{ 'mount': '/', 'size_available': 1 * 10**9, }], ['1.0 GB'], ), ( 'test with no space available, but "0" GB space requirement', ['oo_etcd_to_config'], 0, [{ 'mount': '/', 'size_available': 1, }], ['0.0 GB'], ), ( 'test with enough space for a node, but not for a master', ['oo_nodes_to_config', 'oo_masters_to_config'], 0, [{ 'mount': '/', 'size_available': 15 * 10**9 + 1, }], ['15.0 GB'], ), ( 'test failure with enough space on "/", but not enough on "/var"', ['oo_etcd_to_config'], 0, [{ # enough space on / ... 'mount': '/', 'size_available': 20 * 10**9 + 1, }, { # .. but not enough on /var 'mount': '/var', 'size_available': 0, }], ['0.0 GB'], ), ], ids=lambda argval: argval[0]) def test_fails_with_insufficient_disk_space(name, group_names, configured_min, ansible_mounts, expect_chunks): task_vars = dict( group_names=group_names, openshift_check_min_host_disk_gb=configured_min, ansible_mounts=ansible_mounts, ) check = DiskAvailability(fake_execute_module, task_vars) check.run() assert check.failures for chunk in 'below recommended'.split() + expect_chunks: assert chunk in str(check.failures[0]) @pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [ ( 'test without enough space for master under "upgrade" context', ['oo_nodes_to_config', 'oo_masters_to_config'], "upgrade", [{ 'mount': '/', 'size_available': 1 * 10**9 + 1, 'size_total': 21 * 10**9 + 1, }], True, ["1.0 GB"], ), ( 'test with enough space for master under "upgrade" context', ['oo_nodes_to_config', 'oo_masters_to_config'], "upgrade", [{ 'mount': '/', 'size_available': 10 * 10**9 + 1, 'size_total': 21 * 10**9 + 1, }], False, [], ), ( 'test with not enough space for master, and non-upgrade context', ['oo_nodes_to_config', 'oo_masters_to_config'], "health", [{ 'mount': '/', # not enough space for a master, # "health" context should not lower requirement 'size_available': 20 * 10**9 + 1, }], True, ["20.0 GB", "below minimum"], ), ], ids=lambda argval: argval[0]) def test_min_required_space_changes_with_upgrade_context(name, group_names, context, ansible_mounts, failed, extra_words): task_vars = dict( r_openshift_health_checker_playbook_context=context, group_names=group_names, ansible_mounts=ansible_mounts, ) check = DiskAvailability(fake_execute_module, task_vars) check.run() assert bool(check.failures) == failed for word in extra_words: assert word in str(check.failures[0]) def fake_execute_module(*args): raise AssertionError('this function should not be called')
unknown
codeparrot/codeparrot-clean
# coding=utf-8 # Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import os from sickbeard.metadata import generic from sickbeard.metadata import kodi_12plus from sickrage.helper.common import replace_extension from sickrage.helper.encoding import ek class KODIMetadata(kodi_12plus.KODI_12PlusMetadata): """ Metadata generation class for KODI (legacy). The following file structure is used: show_root/tvshow.nfo (show metadata) show_root/fanart.jpg (fanart) show_root/folder.jpg (poster) show_root/folder.jpg (banner) show_root/Season ##/filename.ext (*) show_root/Season ##/filename.nfo (episode metadata) show_root/Season ##/filename.tbn (episode thumb) show_root/season##.tbn (season posters) show_root/season-all.tbn (season all poster) """ def __init__(self, show_metadata=False, episode_metadata=False, fanart=False, poster=False, banner=False, episode_thumbnails=False, season_posters=False, season_banners=False, season_all_poster=False, season_all_banner=False): generic.GenericMetadata.__init__(self, show_metadata, episode_metadata, fanart, poster, banner, episode_thumbnails, season_posters, season_banners, season_all_poster, season_all_banner) self.name = 'KODI' self.poster_name = self.banner_name = "folder.jpg" self.season_all_poster_name = "season-all.tbn" # web-ui metadata template self.eg_show_metadata = "tvshow.nfo" self.eg_episode_metadata = "Season##\\<i>filename</i>.nfo" self.eg_fanart = "fanart.jpg" self.eg_poster = "folder.jpg" self.eg_banner = "folder.jpg" self.eg_episode_thumbnails = "Season##\\<i>filename</i>.tbn" self.eg_season_posters = "season##.tbn" self.eg_season_banners = "<i>not supported</i>" self.eg_season_all_poster = "season-all.tbn" self.eg_season_all_banner = "<i>not supported</i>" # Override with empty methods for unsupported features def create_season_banners(self, ep_obj): pass def create_season_all_banner(self, show_obj): pass @staticmethod def get_episode_thumb_path(ep_obj): """ Returns the path where the episode thumbnail should be stored. Defaults to the same path as the episode file but with a .tbn extension. ep_obj: a TVEpisode instance for which to create the thumbnail """ if ek(os.path.isfile, ep_obj.location): tbn_filename = replace_extension(ep_obj.location, 'tbn') else: return None return tbn_filename @staticmethod def get_season_poster_path(show_obj, season): """ Returns the full path to the file for a given season poster. show_obj: a TVShow instance for which to generate the path season: a season number to be used for the path. Note that season 0 means specials. """ # Our specials thumbnail is, well, special if season == 0: season_poster_filename = 'season-specials' else: season_poster_filename = 'season' + str(season).zfill(2) return ek(os.path.join, show_obj.location, season_poster_filename + '.tbn') # present a standard "interface" from the module metadata_class = KODIMetadata
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python #========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ from __future__ import print_function usage = """usage: BuildHeaderTest.py <module_name> <module_source_path> <module_binary_path> <maximum_number_of_headers> This script generates a a source file designed to check the headers in each module. The generated HeaderTest can be found in the module binary 'test' directory in a file <module_name>HeaderTest#.cxx. This contains a null main(), but includes all the classes in the module. The primary purpose of this test is to make sure there are not missing module dependencies. It also tests for syntax and missing #include's. """ # Headers to not test because of dependecy issues, etc. BANNED_HEADERS = set() HEADER = """/* * This file is part of the statismo library. * * Author: Marcel Luethi (marcel.luethi@unibas.ch) * * Copyright (c) 2011 University of Basel * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the project's author nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ // This file has been generated by BuildHeaderTest.py // To regenerate, build the StatismoHeaderTests target. // This is a test to include each header file. """ TRAILER = """ #include <cstdlib> // needed for EXIT_SUCCESS macro int main ( int , char* [] ) { return EXIT_SUCCESS; } """ import glob import os import sys if len(sys.argv) < 6: print(usage) sys.exit(1) def main(): module_name = sys.argv[1] module_source_path = sys.argv[2] module_binary_path = sys.argv[3] maximum_number_of_headers = int(sys.argv[4]) test_num = int(sys.argv[5]) # Get all the header files. include_dir = os.path.join(module_source_path, 'include') h_files = glob.glob(os.path.join(include_dir, '*.h')) h_files = [os.path.basename(h) for h in h_files] added_header_idx = maximum_number_of_headers * (test_num - 1) test_source_path = os.path.join(module_binary_path, 'tests') if not os.path.exists(test_source_path): os.makedirs(test_source_path) test_source_file = os.path.join(test_source_path, str(module_name) + 'HeaderTest' + str(test_num) + '.cxx') test_src = open(test_source_file, 'w') try: test_src.write(HEADER) if added_header_idx + maximum_number_of_headers > len(h_files): max_idx = added_header_idx + len(h_files) % maximum_number_of_headers else: max_idx = added_header_idx + maximum_number_of_headers for i in range(added_header_idx, max_idx): # Use the .hxx if possible. hxx_file = h_files[i][:-1] + 'hxx' # Files that include VTK headers need to link to VTK. if h_files[i] in BANNED_HEADERS or h_files[i].lower().find('vtk') != -1: to_include = '// #include "' + h_files[i] + '" // Banned in BuildHeaderTest.py\n' elif os.path.exists(os.path.join(module_source_path, 'include', hxx_file)): to_include = '#include "' + hxx_file + '"\n' else: to_include = '#include "' + h_files[i] + '"\n' test_src.write(to_include) test_src.write(TRAILER) finally: test_src.close() return 0 if __name__ == "__main__": ret = main() sys.exit(ret)
unknown
codeparrot/codeparrot-clean
import numpy as np import pandas as pd class TimeLogicalOps: def setup(self): N = 10_000 left, right, lmask, rmask = np.random.randint(0, 2, size=(4, N)).astype("bool") self.left = pd.arrays.BooleanArray(left, lmask) self.right = pd.arrays.BooleanArray(right, rmask) def time_or_scalar(self): self.left | True self.left | False def time_or_array(self): self.left | self.right def time_and_scalar(self): self.left & True self.left & False def time_and_array(self): self.left & self.right def time_xor_scalar(self): self.left ^ True self.left ^ False def time_xor_array(self): self.left ^ self.right
python
github
https://github.com/pandas-dev/pandas
asv_bench/benchmarks/boolean.py
# frozen_string_literal: true module ActiveRecord class PredicateBuilder class RangeHandler # :nodoc: RangeWithBinds = Struct.new(:begin, :end, :exclude_end?) def initialize(predicate_builder) @predicate_builder = predicate_builder end def call(attribute, value) begin_bind = predicate_builder.build_bind_attribute(attribute.name, value.begin) end_bind = predicate_builder.build_bind_attribute(attribute.name, value.end) attribute.between(RangeWithBinds.new(begin_bind, end_bind, value.exclude_end?)) end private attr_reader :predicate_builder end end end
ruby
github
https://github.com/rails/rails
activerecord/lib/active_record/relation/predicate_builder/range_handler.rb
""" Module responsible for execution of NDFrame.describe() method. Method NDFrame.describe() delegates actual execution to function describe_ndframe(). """ from __future__ import annotations from abc import ( ABC, abstractmethod, ) from typing import ( TYPE_CHECKING, cast, ) import numpy as np from pandas._typing import ( DtypeObj, NDFrameT, npt, ) from pandas.util._validators import validate_percentile from pandas.core.dtypes.common import ( is_bool_dtype, is_numeric_dtype, ) from pandas.core.dtypes.dtypes import ( ArrowDtype, DatetimeTZDtype, ExtensionDtype, ) from pandas.core.arrays.floating import Float64Dtype from pandas.core.reshape.concat import concat from pandas.io.formats.format import format_percentiles if TYPE_CHECKING: from collections.abc import ( Callable, Hashable, Sequence, ) from pandas import ( DataFrame, Series, ) def describe_ndframe( *, obj: NDFrameT, include: str | Sequence[str] | None, exclude: str | Sequence[str] | None, percentiles: Sequence[float] | np.ndarray | None, ) -> NDFrameT: """Describe series or dataframe. Called from pandas.core.generic.NDFrame.describe() Parameters ---------- obj: DataFrame or Series Either dataframe or series to be described. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. Returns ------- Dataframe or series description. """ percentiles = _refine_percentiles(percentiles) describer: NDFrameDescriberAbstract if obj.ndim == 1: describer = SeriesDescriber( obj=cast("Series", obj), ) else: describer = DataFrameDescriber( obj=cast("DataFrame", obj), include=include, exclude=exclude, ) result = describer.describe(percentiles=percentiles) return cast(NDFrameT, result) class NDFrameDescriberAbstract(ABC): """Abstract class for describing dataframe or series. Parameters ---------- obj : Series or DataFrame Object to be described. """ def __init__(self, obj: DataFrame | Series) -> None: self.obj = obj @abstractmethod def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series: """Do describe either series or dataframe. Parameters ---------- percentiles : list-like of numbers The percentiles to include in the output. """ class SeriesDescriber(NDFrameDescriberAbstract): """Class responsible for creating series description.""" obj: Series def describe(self, percentiles: Sequence[float] | np.ndarray) -> Series: describe_func = select_describe_func( self.obj, ) return describe_func(self.obj, percentiles) class DataFrameDescriber(NDFrameDescriberAbstract): """Class responsible for creating dataobj description. Parameters ---------- obj : DataFrame DataFrame to be described. include : 'all', list-like of dtypes or None A white list of data types to include in the result. exclude : list-like of dtypes or None A black list of data types to omit from the result. """ obj: DataFrame def __init__( self, obj: DataFrame, *, include: str | Sequence[str] | None, exclude: str | Sequence[str] | None, ) -> None: self.include = include self.exclude = exclude if obj.ndim == 2 and obj.columns.size == 0: raise ValueError("Cannot describe a DataFrame without columns") super().__init__(obj) def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame: data = self._select_data() ldesc: list[Series] = [] for _, series in data.items(): describe_func = select_describe_func(series) ldesc.append(describe_func(series, percentiles)) col_names = reorder_columns(ldesc) d = concat( [x.reindex(col_names) for x in ldesc], axis=1, ignore_index=True, sort=False, ) d.columns = data.columns.copy() return d def _select_data(self) -> DataFrame: """Select columns to be described.""" if (self.include is None) and (self.exclude is None): # when some numerics are found, keep only numerics default_include: list[npt.DTypeLike] = [np.number, "datetime"] data = self.obj.select_dtypes(include=default_include) if len(data.columns) == 0: data = self.obj elif self.include == "all": if self.exclude is not None: msg = "exclude must be None when include is 'all'" raise ValueError(msg) data = self.obj else: data = self.obj.select_dtypes( include=self.include, exclude=self.exclude, ) if len(data.columns) == 0: msg = "No columns match the specified include or exclude data types" raise ValueError(msg) return data def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]: """Set a convenient order for rows for display.""" names: list[Hashable] = [] seen_names: set[Hashable] = set() ldesc_indexes = sorted((x.index for x in ldesc), key=len) for idxnames in ldesc_indexes: for name in idxnames: if name not in seen_names: seen_names.add(name) names.append(name) return names def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series: """Describe series containing numerical data. Parameters ---------- series : Series Series to be described. percentiles : list-like of numbers The percentiles to include in the output. """ from pandas import Series formatted_percentiles = format_percentiles(percentiles) if len(percentiles) == 0: quantiles = [] else: quantiles = series.quantile(percentiles).tolist() stat_index = ["count", "mean", "std", "min", *formatted_percentiles, "max"] d = [ series.count(), series.mean(), series.std(), series.min(), *quantiles, series.max(), ] # GH#48340 - always return float on non-complex numeric data dtype: DtypeObj | None if isinstance(series.dtype, ExtensionDtype): if isinstance(series.dtype, ArrowDtype): if series.dtype.kind == "m": # GH53001: describe timedeltas with object dtype dtype = None else: import pyarrow as pa dtype = ArrowDtype(pa.float64()) else: dtype = Float64Dtype() elif series.dtype.kind in "iufb": # i.e. numeric but exclude complex dtype dtype = np.dtype("float") else: dtype = None return Series(d, index=stat_index, name=series.name, dtype=dtype) def describe_categorical_1d( data: Series, percentiles_ignored: Sequence[float], ) -> Series: """Describe series containing categorical data. Parameters ---------- data : Series Series to be described. percentiles_ignored : list-like of numbers Ignored, but in place to unify interface. """ names = ["count", "unique", "top", "freq"] objcounts = data.value_counts() count_unique = len(objcounts[objcounts != 0]) if count_unique > 0: top, freq = objcounts.index[0], objcounts.iloc[0] dtype = None else: # If the DataFrame is empty, set 'top' and 'freq' to None # to maintain output shape consistency top, freq = np.nan, np.nan dtype = "object" result = [data.count(), count_unique, top, freq] from pandas import Series return Series(result, index=names, name=data.name, dtype=dtype) def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series: """Describe series containing datetime64 dtype. Parameters ---------- data : Series Series to be described. percentiles : list-like of numbers The percentiles to include in the output. """ # GH-30164 from pandas import Series formatted_percentiles = format_percentiles(percentiles) stat_index = ["count", "mean", "min", *formatted_percentiles, "max"] d = [ data.count(), data.mean(), data.min(), *data.quantile(percentiles).tolist(), data.max(), ] return Series(d, index=stat_index, name=data.name) def select_describe_func( data: Series, ) -> Callable: """Select proper function for describing series based on data type. Parameters ---------- data : Series Series to be described. """ if is_bool_dtype(data.dtype): return describe_categorical_1d elif is_numeric_dtype(data): return describe_numeric_1d elif data.dtype.kind == "M" or isinstance(data.dtype, DatetimeTZDtype): return describe_timestamp_1d elif data.dtype.kind == "m": return describe_numeric_1d else: return describe_categorical_1d def _refine_percentiles( percentiles: Sequence[float] | np.ndarray | None, ) -> npt.NDArray[np.float64]: """ Ensure that percentiles are unique and sorted. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. """ if percentiles is None: return np.array([0.25, 0.5, 0.75]) percentiles = np.asarray(percentiles) # get them all to be in [0, 1] validate_percentile(percentiles) # sort and check for duplicates unique_pcts = np.unique(percentiles) assert percentiles is not None if len(unique_pcts) < len(percentiles): raise ValueError("percentiles cannot contain duplicates") return unique_pcts
python
github
https://github.com/pandas-dev/pandas
pandas/core/methods/describe.py
package images import ( "testing" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "gotest.tools/v3/assert" ) func TestOnlyPlatformWithFallback(t *testing.T) { p := ocispec.Platform{ OS: "linux", Architecture: "arm", Variant: "v8", } // Check no variant assert.Assert(t, OnlyPlatformWithFallback(p).Match(ocispec.Platform{ OS: p.OS, Architecture: p.Architecture, })) // check with variant assert.Assert(t, OnlyPlatformWithFallback(p).Match(ocispec.Platform{ OS: p.OS, Architecture: p.Architecture, Variant: p.Variant, })) // Make sure non-matches are false. assert.Assert(t, !OnlyPlatformWithFallback(p).Match(ocispec.Platform{ OS: p.OS, Architecture: "amd64", })) }
go
github
https://github.com/moby/moby
daemon/images/images_test.go
from flask import (render_template, current_app, Blueprint, redirect, url_for, request, flash, session) from flask.ext.login import login_user, logout_user from flask.ext.principal import ( Identity, AnonymousIdentity, identity_changed ) from webapp.extensions import oid, facebook, twitter from webapp.models import db, User from webapp.forms import LoginForm, RegisterForm, OpenIDForm main_blueprint = Blueprint( 'main', __name__, template_folder='../templates/main' ) @main_blueprint.route('/') def index(): return redirect(url_for('blog.home')) @main_blueprint.route('/login', methods=['GET', 'POST']) @oid.loginhandler def login(): form = LoginForm() openid_form = OpenIDForm() if openid_form.validate_on_submit(): return oid.try_login( openid_form.openid.data, ask_for=['nickname', 'email'], ask_for_optional=['fullname'] ) if form.validate_on_submit(): user = User.query.filter_by(username=form.username.data).one() login_user(user, remember=form.remember.data) identity_changed.send( current_app._get_current_object(), identity=Identity(user.id) ) flash("You have been logged in.", category="success") return redirect(url_for('blog.home')) openid_errors = oid.fetch_error() if openid_errors: flash(openid_errors, category="danger") return render_template('login.html', form=form, openid_form=openid_form) @main_blueprint.route('/facebook') def facebook_login(): return facebook.authorize( callback=url_for( '.facebook_authorized', next=request.referrer or None, _external=True ) ) @main_blueprint.route('/facebook/authorized') @facebook.authorized_handler def facebook_authorized(resp): if resp is None: return 'Access denied: reason=%s error=%s' % ( request.args['error_reason'], request.args['error_description'] ) session['facebook_oauth_token'] = (resp['access_token'], '') me = facebook.get('/me') user = User.query.filter_by( username=me.data['first_name'] + " " + me.data['last_name'] ).first() if not user: user = User(me.data['first_name'] + " " + me.data['last_name']) db.session.add(user) db.session.commit() login_user(user) flash("You have been logged in.", category="success") return redirect(request.args.get('next') or url_for('blog.home')) @main_blueprint.route('/twitter-login') def twitter_login(): return twitter.authorize( callback=url_for( '.twitter_authorized', next=request.referrer or None, _external=True ) ) @main_blueprint.route('/twitter-login/authorized') @twitter.authorized_handler def twitter_authorized(resp): if resp is None: return 'Access denied: reason=%s error=%s' % ( request.args['error_reason'], request.args['error_description'] ) session['twitter_oauth_token'] = resp['oauth_token'] + \ resp['oauth_token_secret'] user = User.query.filter_by(username=resp['screen_name']).first() if not user: user = User(resp['screen_name'], '') db.session.add(user) db.session.commit() login_user(user) flash("You have been logged in.", category="success") return redirect(request.args.get('next') or url_for('blog.home')) @main_blueprint.route('/logout', methods=['GET', 'POST']) def logout(): logout_user() identity_changed.send( current_app._get_current_object(), identity=AnonymousIdentity() ) flash("You have been logged out.", category="success") return redirect(url_for('.login')) @main_blueprint.route('/register', methods=['GET', 'POST']) @oid.loginhandler def register(): form = RegisterForm() openid_form = OpenIDForm() if openid_form.validate_on_submit(): return oid.try_login( openid_form.openid.data, ask_for=['nickname', 'email'], ask_for_optional=['fullname'] ) if form.validate_on_submit(): new_user = User(form.username.data) new_user.set_password(form.password.data) db.session.add(new_user) db.session.commit() flash("Your user has been created, please login.", category="success") return redirect(url_for('.login')) openid_errors = oid.fetch_error() if openid_errors: flash(openid_errors, category="danger") return render_template('register.html', form=form, openid_form=openid_form)
unknown
codeparrot/codeparrot-clean
import json import pickle from django.contrib.gis.gdal import ( CoordTransform, GDALException, OGRGeometry, OGRGeomType, SpatialReference, ) from django.contrib.gis.gdal.geometries import CircularString, CurvePolygon from django.contrib.gis.geos import GEOSException from django.template import Context from django.template.engine import Engine from django.test import SimpleTestCase from ..test_data import TestDataMixin class OGRGeomTest(SimpleTestCase, TestDataMixin): "This tests the OGR Geometry." def test_geomtype(self): "Testing OGRGeomType object." # OGRGeomType should initialize on all these inputs. OGRGeomType(1) OGRGeomType(7) OGRGeomType("point") OGRGeomType("GeometrycollectioN") OGRGeomType("LINearrING") OGRGeomType("Unknown") # Should throw TypeError on this input with self.assertRaises(GDALException): OGRGeomType(23) with self.assertRaises(GDALException): OGRGeomType("fooD") with self.assertRaises(GDALException): OGRGeomType(4001) # Equivalence can take strings, ints, and other OGRGeomTypes self.assertEqual(OGRGeomType(1), OGRGeomType(1)) self.assertEqual(OGRGeomType(7), "GeometryCollection") self.assertEqual(OGRGeomType("point"), "POINT") self.assertNotEqual(OGRGeomType("point"), 2) self.assertEqual(OGRGeomType("unknown"), 0) self.assertEqual(OGRGeomType(6), "MULtiPolyGON") self.assertEqual(OGRGeomType(1), OGRGeomType("point")) self.assertNotEqual(OGRGeomType("POINT"), OGRGeomType(6)) # Testing the Django field name equivalent property. self.assertEqual("PointField", OGRGeomType("Point").django) self.assertEqual("GeometryField", OGRGeomType("Geometry").django) self.assertEqual("GeometryField", OGRGeomType("Unknown").django) self.assertIsNone(OGRGeomType("none").django) # 'Geometry' initialization implies an unknown geometry type. gt = OGRGeomType("Geometry") self.assertEqual(0, gt.num) self.assertEqual("Unknown", gt.name) def test_geom_type_repr(self): self.assertEqual(repr(OGRGeomType("point")), "<OGRGeomType: Point>") def test_geomtype_25d(self): "Testing OGRGeomType object with 25D types." wkb25bit = OGRGeomType.wkb25bit self.assertEqual(OGRGeomType(wkb25bit + 1), "Point25D") self.assertEqual(OGRGeomType("MultiLineString25D"), (5 + wkb25bit)) self.assertEqual( "GeometryCollectionField", OGRGeomType("GeometryCollection25D").django ) def test_wkt(self): "Testing WKT output." for g in self.geometries.wkt_out: geom = OGRGeometry(g.wkt) self.assertEqual(g.wkt, geom.wkt) def test_ewkt(self): "Testing EWKT input/output." for ewkt_val in ("POINT (1 2 3)", "LINEARRING (0 0,1 1,2 1,0 0)"): # First with ewkt output when no SRID in EWKT self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt) # No test consumption with an SRID specified. ewkt_val = "SRID=4326;%s" % ewkt_val geom = OGRGeometry(ewkt_val) self.assertEqual(ewkt_val, geom.ewkt) self.assertEqual(4326, geom.srs.srid) def test_gml(self): "Testing GML output." for g in self.geometries.wkt_out: geom = OGRGeometry(g.wkt) exp_gml = g.gml self.assertEqual(exp_gml, geom.gml) def test_hex(self): "Testing HEX input/output." for g in self.geometries.hex_wkt: geom1 = OGRGeometry(g.wkt) self.assertEqual(g.hex.encode(), geom1.hex) # Constructing w/HEX geom2 = OGRGeometry(g.hex) self.assertEqual(geom1, geom2) def test_wkb(self): "Testing WKB input/output." for g in self.geometries.hex_wkt: geom1 = OGRGeometry(g.wkt) wkb = geom1.wkb self.assertEqual(wkb.hex().upper(), g.hex) # Constructing w/WKB. geom2 = OGRGeometry(wkb) self.assertEqual(geom1, geom2) def test_json(self): "Testing GeoJSON input/output." for g in self.geometries.json_geoms: geom = OGRGeometry(g.wkt) if not hasattr(g, "not_equal"): # Loading jsons to prevent decimal differences self.assertEqual(json.loads(g.json), json.loads(geom.json)) self.assertEqual(json.loads(g.json), json.loads(geom.geojson)) self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json)) # Test input with some garbage content (but valid json) (#15529) geom = OGRGeometry( '{"type": "Point", "coordinates": [ 100.0, 0.0 ], "other": "<test>"}' ) self.assertIsInstance(geom, OGRGeometry) def test_points(self): "Testing Point objects." OGRGeometry("POINT(0 0)") for p in self.geometries.points: if not hasattr(p, "z"): # No 3D pnt = OGRGeometry(p.wkt) self.assertEqual(1, pnt.geom_type) self.assertEqual("POINT", pnt.geom_name) self.assertEqual(p.x, pnt.x) self.assertEqual(p.y, pnt.y) self.assertEqual((p.x, p.y), pnt.tuple) def test_multipoints(self): "Testing MultiPoint objects." for mp in self.geometries.multipoints: mgeom1 = OGRGeometry(mp.wkt) # First one from WKT self.assertEqual(4, mgeom1.geom_type) self.assertEqual("MULTIPOINT", mgeom1.geom_name) mgeom2 = OGRGeometry("MULTIPOINT") # Creating empty multipoint mgeom3 = OGRGeometry("MULTIPOINT") for g in mgeom1: mgeom2.add(g) # adding each point from the multipoints mgeom3.add(g.wkt) # should take WKT as well self.assertEqual(mgeom1, mgeom2) # they should equal self.assertEqual(mgeom1, mgeom3) self.assertEqual(mp.coords, mgeom2.coords) self.assertEqual(mp.n_p, mgeom2.point_count) def test_linestring(self): "Testing LineString objects." prev = OGRGeometry("POINT(0 0)") for ls in self.geometries.linestrings: linestr = OGRGeometry(ls.wkt) self.assertEqual(2, linestr.geom_type) self.assertEqual("LINESTRING", linestr.geom_name) self.assertEqual(ls.n_p, linestr.point_count) self.assertEqual(ls.coords, linestr.tuple) self.assertEqual(linestr, OGRGeometry(ls.wkt)) self.assertNotEqual(linestr, prev) msg = "Index out of range when accessing points of a line string: %s." with self.assertRaisesMessage(IndexError, msg % len(linestr)): linestr.__getitem__(len(linestr)) prev = linestr # Testing the x, y properties. x = [tmpx for tmpx, tmpy in ls.coords] y = [tmpy for tmpx, tmpy in ls.coords] self.assertEqual(x, linestr.x) self.assertEqual(y, linestr.y) def test_multilinestring(self): "Testing MultiLineString objects." prev = OGRGeometry("POINT(0 0)") for mls in self.geometries.multilinestrings: mlinestr = OGRGeometry(mls.wkt) self.assertEqual(5, mlinestr.geom_type) self.assertEqual("MULTILINESTRING", mlinestr.geom_name) self.assertEqual(mls.n_p, mlinestr.point_count) self.assertEqual(mls.coords, mlinestr.tuple) self.assertEqual(mlinestr, OGRGeometry(mls.wkt)) self.assertNotEqual(mlinestr, prev) prev = mlinestr for ls in mlinestr: self.assertEqual(2, ls.geom_type) self.assertEqual("LINESTRING", ls.geom_name) msg = "Index out of range when accessing geometry in a collection: %s." with self.assertRaisesMessage(IndexError, msg % len(mlinestr)): mlinestr.__getitem__(len(mlinestr)) def test_linearring(self): "Testing LinearRing objects." prev = OGRGeometry("POINT(0 0)") for rr in self.geometries.linearrings: lr = OGRGeometry(rr.wkt) # self.assertEqual(101, lr.geom_type.num) self.assertEqual("LINEARRING", lr.geom_name) self.assertEqual(rr.n_p, len(lr)) self.assertEqual(lr, OGRGeometry(rr.wkt)) self.assertNotEqual(lr, prev) prev = lr def test_polygons(self): "Testing Polygon objects." # Testing `from_bbox` class method bbox = (-180, -90, 180, 90) p = OGRGeometry.from_bbox(bbox) self.assertEqual(bbox, p.extent) prev = OGRGeometry("POINT(0 0)") for p in self.geometries.polygons: poly = OGRGeometry(p.wkt) self.assertEqual(3, poly.geom_type) self.assertEqual("POLYGON", poly.geom_name) self.assertEqual(p.n_p, poly.point_count) self.assertEqual(p.n_i + 1, len(poly)) msg = "Index out of range when accessing rings of a polygon: %s." with self.assertRaisesMessage(IndexError, msg % len(poly)): poly.__getitem__(len(poly)) # Testing area & centroid. self.assertAlmostEqual(p.area, poly.area, 9) x, y = poly.centroid.tuple self.assertAlmostEqual(p.centroid[0], x, 9) self.assertAlmostEqual(p.centroid[1], y, 9) # Testing equivalence self.assertEqual(poly, OGRGeometry(p.wkt)) self.assertNotEqual(poly, prev) if p.ext_ring_cs: ring = poly[0] self.assertEqual(p.ext_ring_cs, ring.tuple) self.assertEqual(p.ext_ring_cs, poly[0].tuple) self.assertEqual(len(p.ext_ring_cs), ring.point_count) for r in poly: self.assertEqual("LINEARRING", r.geom_name) def test_polygons_templates(self): # Accessing Polygon attributes in templates should work. engine = Engine() template = engine.from_string("{{ polygons.0.wkt }}") polygons = [OGRGeometry(p.wkt) for p in self.geometries.multipolygons[:2]] content = template.render(Context({"polygons": polygons})) self.assertIn("MULTIPOLYGON (((100", content) def test_closepolygons(self): "Testing closing Polygon objects." # Both rings in this geometry are not closed. poly = OGRGeometry("POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))") self.assertEqual(8, poly.point_count) with self.assertRaises(GDALException): poly.centroid poly.close_rings() self.assertEqual( 10, poly.point_count ) # Two closing points should've been added self.assertEqual(OGRGeometry("POINT(2.5 2.5)"), poly.centroid) def test_multipolygons(self): "Testing MultiPolygon objects." OGRGeometry("POINT(0 0)") for mp in self.geometries.multipolygons: mpoly = OGRGeometry(mp.wkt) self.assertEqual(6, mpoly.geom_type) self.assertEqual("MULTIPOLYGON", mpoly.geom_name) if mp.valid: self.assertEqual(mp.n_p, mpoly.point_count) self.assertEqual(mp.num_geom, len(mpoly)) msg = "Index out of range when accessing geometry in a collection: %s." with self.assertRaisesMessage(IndexError, msg % len(mpoly)): mpoly.__getitem__(len(mpoly)) for p in mpoly: self.assertEqual("POLYGON", p.geom_name) self.assertEqual(3, p.geom_type) self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt) def test_srs(self): "Testing OGR Geometries with Spatial Reference objects." for mp in self.geometries.multipolygons: # Creating a geometry w/spatial reference sr = SpatialReference("WGS84") mpoly = OGRGeometry(mp.wkt, sr) self.assertEqual(sr.wkt, mpoly.srs.wkt) # Ensuring that SRS is propagated to clones. klone = mpoly.clone() self.assertEqual(sr.wkt, klone.srs.wkt) # Ensuring all children geometries (polygons and their rings) all # return the assigned spatial reference as well. for poly in mpoly: self.assertEqual(sr.wkt, poly.srs.wkt) for ring in poly: self.assertEqual(sr.wkt, ring.srs.wkt) # Ensuring SRS propagate in topological ops. a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr) b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr) diff = a.difference(b) union = a.union(b) self.assertEqual(sr.wkt, diff.srs.wkt) self.assertEqual(sr.srid, union.srs.srid) # Instantiating w/an integer SRID mpoly = OGRGeometry(mp.wkt, 4326) self.assertEqual(4326, mpoly.srid) mpoly.srs = SpatialReference(4269) self.assertEqual(4269, mpoly.srid) self.assertEqual("NAD83", mpoly.srs.name) # Incrementing through the multipolygon after the spatial reference # has been re-assigned. for poly in mpoly: self.assertEqual(mpoly.srs.wkt, poly.srs.wkt) poly.srs = 32140 for ring in poly: # Changing each ring in the polygon self.assertEqual(32140, ring.srs.srid) self.assertEqual("NAD83 / Texas South Central", ring.srs.name) ring.srs = str(SpatialReference(4326)) # back to WGS84 self.assertEqual(4326, ring.srs.srid) # Using the `srid` property. ring.srid = 4322 self.assertEqual("WGS 72", ring.srs.name) self.assertEqual(4322, ring.srid) # srs/srid may be assigned their own values, even when srs is None. mpoly = OGRGeometry(mp.wkt, srs=None) mpoly.srs = mpoly.srs mpoly.srid = mpoly.srid def test_srs_transform(self): "Testing transform()." orig = OGRGeometry("POINT (-104.609 38.255)", 4326) trans = OGRGeometry("POINT (992385.4472045 481455.4944650)", 2774) # Using an srid, a SpatialReference object, and a CoordTransform object # or transformations. t1, t2, t3 = orig.clone(), orig.clone(), orig.clone() t1.transform(trans.srid) t2.transform(SpatialReference("EPSG:2774")) ct = CoordTransform(SpatialReference("WGS84"), SpatialReference(2774)) t3.transform(ct) # Testing use of the `clone` keyword. k1 = orig.clone() k2 = k1.transform(trans.srid, clone=True) self.assertEqual(k1, orig) self.assertNotEqual(k1, k2) # Different PROJ versions use different transformations, all are # correct as having a 1 meter accuracy. prec = -1 for p in (t1, t2, t3, k2): self.assertAlmostEqual(trans.x, p.x, prec) self.assertAlmostEqual(trans.y, p.y, prec) def test_transform_dim(self): "Testing coordinate dimension is the same on transformed geometries." ls_orig = OGRGeometry("LINESTRING(-104.609 38.255)", 4326) ls_trans = OGRGeometry("LINESTRING(992385.4472045 481455.4944650)", 2774) # Different PROJ versions use different transformations, all are # correct as having a 1 meter accuracy. prec = -1 ls_orig.transform(ls_trans.srs) # Making sure the coordinate dimension is still 2D. self.assertEqual(2, ls_orig.coord_dim) self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec) self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec) def test_difference(self): "Testing difference()." for i in range(len(self.geometries.topology_geoms)): a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a) b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b) d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt) d2 = a.difference(b) self.assertTrue(d1.geos.equals(d2.geos)) self.assertTrue( d1.geos.equals((a - b).geos) ) # __sub__ is difference operator a -= b # testing __isub__ self.assertTrue(d1.geos.equals(a.geos)) def test_intersection(self): "Testing intersects() and intersection()." for i in range(len(self.geometries.topology_geoms)): a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a) b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b) i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt) self.assertTrue(a.intersects(b)) i2 = a.intersection(b) self.assertTrue(i1.geos.equals(i2.geos)) self.assertTrue( i1.geos.equals((a & b).geos) ) # __and__ is intersection operator a &= b # testing __iand__ self.assertTrue(i1.geos.equals(a.geos)) def test_symdifference(self): "Testing sym_difference()." for i in range(len(self.geometries.topology_geoms)): a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a) b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b) d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt) d2 = a.sym_difference(b) self.assertTrue(d1.geos.equals(d2.geos)) self.assertTrue( d1.geos.equals((a ^ b).geos) ) # __xor__ is symmetric difference operator a ^= b # testing __ixor__ self.assertTrue(d1.geos.equals(a.geos)) def test_union(self): "Testing union()." for i in range(len(self.geometries.topology_geoms)): a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a) b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b) u1 = OGRGeometry(self.geometries.union_geoms[i].wkt) u2 = a.union(b) self.assertTrue(u1.geos.equals(u2.geos)) self.assertTrue(u1.geos.equals((a | b).geos)) # __or__ is union operator a |= b # testing __ior__ self.assertTrue(u1.geos.equals(a.geos)) def test_add(self): "Testing GeometryCollection.add()." # Can't insert a Point into a MultiPolygon. mp = OGRGeometry("MultiPolygon") pnt = OGRGeometry("POINT(5 23)") with self.assertRaises(GDALException): mp.add(pnt) # GeometryCollection.add may take an OGRGeometry (if another collection # of the same type all child geoms will be added individually) or WKT. for mp in self.geometries.multipolygons: mpoly = OGRGeometry(mp.wkt) mp1 = OGRGeometry("MultiPolygon") mp2 = OGRGeometry("MultiPolygon") mp3 = OGRGeometry("MultiPolygon") for poly in mpoly: mp1.add(poly) # Adding a geometry at a time mp2.add(poly.wkt) # Adding WKT mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once. for tmp in (mp1, mp2, mp3): self.assertEqual(mpoly, tmp) def test_extent(self): "Testing `extent` property." # The xmin, ymin, xmax, ymax of the MultiPoint should be returned. mp = OGRGeometry("MULTIPOINT(5 23, 0 0, 10 50)") self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent) # Testing on the 'real world' Polygon. poly = OGRGeometry(self.geometries.polygons[3].wkt) ring = poly.shell x, y = ring.x, ring.y xmin, ymin = min(x), min(y) xmax, ymax = max(x), max(y) self.assertEqual((xmin, ymin, xmax, ymax), poly.extent) def test_25D(self): "Testing 2.5D geometries." pnt_25d = OGRGeometry("POINT(1 2 3)") self.assertEqual("Point25D", pnt_25d.geom_type.name) self.assertEqual(3.0, pnt_25d.z) self.assertEqual(3, pnt_25d.coord_dim) ls_25d = OGRGeometry("LINESTRING(1 1 1,2 2 2,3 3 3)") self.assertEqual("LineString25D", ls_25d.geom_type.name) self.assertEqual([1.0, 2.0, 3.0], ls_25d.z) self.assertEqual(3, ls_25d.coord_dim) def test_pickle(self): "Testing pickle support." g1 = OGRGeometry("LINESTRING(1 1 1,2 2 2,3 3 3)", "WGS84") g2 = pickle.loads(pickle.dumps(g1)) self.assertEqual(g1, g2) self.assertEqual(4326, g2.srs.srid) self.assertEqual(g1.srs.wkt, g2.srs.wkt) def test_ogrgeometry_transform_workaround(self): "Testing coordinate dimensions on geometries after transformation." # A bug in GDAL versions prior to 1.7 changes the coordinate # dimension of a geometry after it has been transformed. # This test ensures that the bug workarounds employed within # `OGRGeometry.transform` indeed work. wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))" wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))" srid = 4326 # For both the 2D and 3D MultiLineString, ensure _both_ the dimension # of the collection and the component LineString have the expected # coordinate dimension after transform. geom = OGRGeometry(wkt_2d, srid) geom.transform(srid) self.assertEqual(2, geom.coord_dim) self.assertEqual(2, geom[0].coord_dim) self.assertEqual(wkt_2d, geom.wkt) geom = OGRGeometry(wkt_3d, srid) geom.transform(srid) self.assertEqual(3, geom.coord_dim) self.assertEqual(3, geom[0].coord_dim) self.assertEqual(wkt_3d, geom.wkt) # Testing binary predicates, `assertIs` is used to check that bool is # returned. def test_equivalence_regression(self): "Testing equivalence methods with non-OGRGeometry instances." self.assertIsNotNone(OGRGeometry("POINT(0 0)")) self.assertNotEqual(OGRGeometry("LINESTRING(0 0, 1 1)"), 3) def test_contains(self): self.assertIs( OGRGeometry("POINT(0 0)").contains(OGRGeometry("POINT(0 0)")), True ) self.assertIs( OGRGeometry("POINT(0 0)").contains(OGRGeometry("POINT(0 1)")), False ) def test_crosses(self): self.assertIs( OGRGeometry("LINESTRING(0 0, 1 1)").crosses( OGRGeometry("LINESTRING(0 1, 1 0)") ), True, ) self.assertIs( OGRGeometry("LINESTRING(0 0, 0 1)").crosses( OGRGeometry("LINESTRING(1 0, 1 1)") ), False, ) def test_disjoint(self): self.assertIs( OGRGeometry("LINESTRING(0 0, 1 1)").disjoint( OGRGeometry("LINESTRING(0 1, 1 0)") ), False, ) self.assertIs( OGRGeometry("LINESTRING(0 0, 0 1)").disjoint( OGRGeometry("LINESTRING(1 0, 1 1)") ), True, ) def test_equals(self): self.assertIs( OGRGeometry("POINT(0 0)").contains(OGRGeometry("POINT(0 0)")), True ) self.assertIs( OGRGeometry("POINT(0 0)").contains(OGRGeometry("POINT(0 1)")), False ) def test_intersects(self): self.assertIs( OGRGeometry("LINESTRING(0 0, 1 1)").intersects( OGRGeometry("LINESTRING(0 1, 1 0)") ), True, ) self.assertIs( OGRGeometry("LINESTRING(0 0, 0 1)").intersects( OGRGeometry("LINESTRING(1 0, 1 1)") ), False, ) def test_overlaps(self): self.assertIs( OGRGeometry("POLYGON ((0 0, 0 2, 2 2, 2 0, 0 0))").overlaps( OGRGeometry("POLYGON ((1 1, 1 5, 5 5, 5 1, 1 1))") ), True, ) self.assertIs( OGRGeometry("POINT(0 0)").overlaps(OGRGeometry("POINT(0 1)")), False ) def test_touches(self): self.assertIs( OGRGeometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))").touches( OGRGeometry("LINESTRING(0 2, 2 0)") ), True, ) self.assertIs( OGRGeometry("POINT(0 0)").touches(OGRGeometry("POINT(0 1)")), False ) def test_within(self): self.assertIs( OGRGeometry("POINT(0.5 0.5)").within( OGRGeometry("POLYGON ((0 0, 0 1, 1 1, 1 0, 0 0))") ), True, ) self.assertIs( OGRGeometry("POINT(0 0)").within(OGRGeometry("POINT(0 1)")), False ) def test_from_gml(self): self.assertEqual( OGRGeometry("POINT(0 0)"), OGRGeometry.from_gml( '<gml:Point gml:id="p21" ' 'srsName="http://www.opengis.net/def/crs/EPSG/0/4326">' ' <gml:pos srsDimension="2">0 0</gml:pos>' "</gml:Point>" ), ) def test_empty(self): self.assertIs(OGRGeometry("POINT (0 0)").empty, False) self.assertIs(OGRGeometry("POINT EMPTY").empty, True) def test_empty_point_to_geos(self): p = OGRGeometry("POINT EMPTY", srs=4326) self.assertEqual(p.geos.ewkt, p.ewkt) def test_geometry_types(self): tests = [ ("Point", 1, True), ("LineString", 2, True), ("Polygon", 3, True), ("MultiPoint", 4, True), ("Multilinestring", 5, True), ("MultiPolygon", 6, True), ("GeometryCollection", 7, True), ("CircularString", 8, True), ("CompoundCurve", 9, True), ("CurvePolygon", 10, True), ("MultiCurve", 11, True), ("MultiSurface", 12, True), # 13 (Curve) and 14 (Surface) are abstract types. ("PolyhedralSurface", 15, False), ("TIN", 16, False), ("Triangle", 17, False), ("Linearring", 2, True), # Types 1 - 7 with Z dimension have 2.5D enums. ("Point Z", -2147483647, True), # 1001 ("LineString Z", -2147483646, True), # 1002 ("Polygon Z", -2147483645, True), # 1003 ("MultiPoint Z", -2147483644, True), # 1004 ("Multilinestring Z", -2147483643, True), # 1005 ("MultiPolygon Z", -2147483642, True), # 1006 ("GeometryCollection Z", -2147483641, True), # 1007 ("CircularString Z", 1008, True), ("CompoundCurve Z", 1009, True), ("CurvePolygon Z", 1010, True), ("MultiCurve Z", 1011, True), ("MultiSurface Z", 1012, True), ("PolyhedralSurface Z", 1015, False), ("TIN Z", 1016, False), ("Triangle Z", 1017, False), ("Point M", 2001, True), ("LineString M", 2002, True), ("Polygon M", 2003, True), ("MultiPoint M", 2004, True), ("MultiLineString M", 2005, True), ("MultiPolygon M", 2006, True), ("GeometryCollection M", 2007, True), ("CircularString M", 2008, True), ("CompoundCurve M", 2009, True), ("CurvePolygon M", 2010, True), ("MultiCurve M", 2011, True), ("MultiSurface M", 2012, True), ("PolyhedralSurface M", 2015, False), ("TIN M", 2016, False), ("Triangle M", 2017, False), ("Point ZM", 3001, True), ("LineString ZM", 3002, True), ("Polygon ZM", 3003, True), ("MultiPoint ZM", 3004, True), ("MultiLineString ZM", 3005, True), ("MultiPolygon ZM", 3006, True), ("GeometryCollection ZM", 3007, True), ("CircularString ZM", 3008, True), ("CompoundCurve ZM", 3009, True), ("CurvePolygon ZM", 3010, True), ("MultiCurve ZM", 3011, True), ("MultiSurface ZM", 3012, True), ("PolyhedralSurface ZM", 3015, False), ("TIN ZM", 3016, False), ("Triangle ZM", 3017, False), ] for test in tests: geom_type, num, supported = test with self.subTest(geom_type=geom_type, num=num, supported=supported): if supported: g = OGRGeometry(f"{geom_type} EMPTY") self.assertEqual(g.geom_type.num, num) else: type_ = geom_type.replace(" ", "") msg = f"Unsupported geometry type: {type_}" with self.assertRaisesMessage(TypeError, msg): OGRGeometry(f"{geom_type} EMPTY") def test_is_3d_and_set_3d(self): geom = OGRGeometry("POINT (1 2)") self.assertIs(geom.is_3d, False) geom.set_3d(True) self.assertIs(geom.is_3d, True) self.assertEqual(geom.wkt, "POINT (1 2 0)") geom.set_3d(False) self.assertIs(geom.is_3d, False) self.assertEqual(geom.wkt, "POINT (1 2)") msg = "Input to 'set_3d' must be a boolean, got 'None'" with self.assertRaisesMessage(ValueError, msg): geom.set_3d(None) def test_wkt_and_wkb_output(self): tests = [ # 2D ("POINT (1 2)", "0101000000000000000000f03f0000000000000040"), ( "LINESTRING (30 10,10 30)", "0102000000020000000000000000003e400000000000002" "44000000000000024400000000000003e40", ), ( "POLYGON ((30 10,40 40,20 40,30 10))", "010300000001000000040000000000000000003e400000000000002440000000000000" "44400000000000004440000000000000344000000000000044400000000000003e4000" "00000000002440", ), ( "MULTIPOINT (10 40,40 30)", "0104000000020000000101000000000000000000244000000000000044400101000000" "00000000000044400000000000003e40", ), ( "MULTILINESTRING ((10 10,20 20),(40 40,30 30,40 20))", "0105000000020000000102000000020000000000000000002440000000000000244000" "0000000000344000000000000034400102000000030000000000000000004440000000" "00000044400000000000003e400000000000003e400000000000004440000000000000" "3440", ), ( "MULTIPOLYGON (((30 20,45 40,10 40,30 20)),((15 5,40 10,10 20,15 5)))", "010600000002000000010300000001000000040000000000000000003e400000000000" "0034400000000000804640000000000000444000000000000024400000000000004440" "0000000000003e40000000000000344001030000000100000004000000000000000000" "2e40000000000000144000000000000044400000000000002440000000000000244000" "000000000034400000000000002e400000000000001440", ), ( "GEOMETRYCOLLECTION (POINT (40 10))", "010700000001000000010100000000000000000044400000000000002440", ), # 3D ( "POINT (1 2 3)", "0101000080000000000000f03f00000000000000400000000000000840", ), ( "LINESTRING (30 10 3,10 30 3)", "0102000080020000000000000000003e40000000000000244000000000000008400000" "0000000024400000000000003e400000000000000840", ), ( "POLYGON ((30 10 3,40 40 3,30 10 3))", "010300008001000000030000000000000000003e400000000000002440000000000000" "08400000000000004440000000000000444000000000000008400000000000003e4000" "000000000024400000000000000840", ), ( "MULTIPOINT (10 40 3,40 30 3)", "0104000080020000000101000080000000000000244000000000000044400000000000" "000840010100008000000000000044400000000000003e400000000000000840", ), ( "MULTILINESTRING ((10 10 3,20 20 3))", "0105000080010000000102000080020000000000000000002440000000000000244000" "00000000000840000000000000344000000000000034400000000000000840", ), ( "MULTIPOLYGON (((30 20 3,45 40 3,30 20 3)))", "010600008001000000010300008001000000030000000000000000003e400000000000" "0034400000000000000840000000000080464000000000000044400000000000000840" "0000000000003e4000000000000034400000000000000840", ), ( "GEOMETRYCOLLECTION (POINT (40 10 3))", "0107000080010000000101000080000000000000444000000000000024400000000000" "000840", ), ] for geom, wkb in tests: with self.subTest(geom=geom): g = OGRGeometry(geom) self.assertEqual(g.wkt, geom) self.assertEqual(g.wkb.hex(), wkb) def test_measure_is_measure_and_set_measure(self): geom = OGRGeometry("POINT (1 2 3)") self.assertIs(geom.is_measured, False) geom.set_measured(True) self.assertIs(geom.is_measured, True) self.assertEqual(geom.wkt, "POINT ZM (1 2 3 0)") geom.set_measured(False) self.assertIs(geom.is_measured, False) self.assertEqual(geom.wkt, "POINT (1 2 3)") msg = "Input to 'set_measured' must be a boolean, got 'None'" with self.assertRaisesMessage(ValueError, msg): geom.set_measured(None) def test_point_m_coordinate(self): geom = OGRGeometry("POINT ZM (1 2 3 4)") self.assertEqual(geom.m, 4) geom = OGRGeometry("POINT (1 2 3 4)") self.assertEqual(geom.m, 4) geom = OGRGeometry("POINT M (1 2 3)") self.assertEqual(geom.m, 3) geom = OGRGeometry("POINT Z (1 2 3)") self.assertEqual(geom.m, None) def test_point_m_tuple(self): geom = OGRGeometry("POINT ZM (1 2 3 4)") self.assertEqual(geom.tuple, (geom.x, geom.y, geom.z, geom.m)) geom = OGRGeometry("POINT M (1 2 3)") self.assertEqual(geom.tuple, (geom.x, geom.y, geom.m)) geom = OGRGeometry("POINT Z (1 2 3)") self.assertEqual(geom.tuple, (geom.x, geom.y, geom.z)) geom = OGRGeometry("POINT (1 2 3)") self.assertEqual(geom.tuple, (geom.x, geom.y, geom.z)) def test_point_m_wkt_wkb(self): wkt = "POINT ZM (1 2 3 4)" geom = OGRGeometry(wkt) self.assertEqual(geom.wkt, wkt) self.assertEqual( geom.wkb.hex(), "01b90b0000000000000000f03f00000000000000" "4000000000000008400000000000001040", ) wkt = "POINT M (1 2 3)" geom = OGRGeometry(wkt) self.assertEqual(geom.wkt, wkt) self.assertEqual( geom.wkb.hex(), "01d1070000000000000000f03f00000000000000400000000000000840", ) def test_point_m_dimension_types(self): geom = OGRGeometry("POINT ZM (1 2 3 4)") self.assertEqual(geom.geom_type.name, "PointZM") self.assertEqual(geom.geom_type.num, 3001) geom = OGRGeometry("POINT M (1 2 3)") self.assertEqual(geom.geom_type.name, "PointM") self.assertEqual(geom.geom_type.num, 2001) def test_point_m_dimension_geos(self): """GEOSGeometry does not yet support the M dimension.""" geom = OGRGeometry("POINT ZM (1 2 3 4)") self.assertEqual(geom.geos.wkt, "POINT Z (1 2 3)") geom = OGRGeometry("POINT M (1 2 3)") self.assertEqual(geom.geos.wkt, "POINT (1 2)") def test_centroid(self): point = OGRGeometry("POINT (1 2 3)") self.assertEqual(point.centroid.wkt, "POINT (1 2)") linestring = OGRGeometry("LINESTRING (0 0 0, 1 1 1, 2 2 2)") self.assertEqual(linestring.centroid.wkt, "POINT (1 1)") polygon = OGRGeometry("POLYGON((0 0, 10 0, 10 10, 0 10, 0 0))") self.assertEqual(polygon.centroid.wkt, "POINT (5 5)") multipoint = OGRGeometry("MULTIPOINT (0 0,10 10)") self.assertEqual(multipoint.centroid.wkt, "POINT (5 5)") multilinestring = OGRGeometry( "MULTILINESTRING ((0 0,0 10,0 20),(10 0,10 10,10 20))" ) self.assertEqual(multilinestring.centroid.wkt, "POINT (5 10)") multipolygon = OGRGeometry( "MULTIPOLYGON(((0 0, 10 0, 10 10, 0 10, 0 0))," "((20 20, 20 30, 30 30, 30 20, 20 20)))" ) self.assertEqual(multipolygon.centroid.wkt, "POINT (15 15)") geometrycollection = OGRGeometry( "GEOMETRYCOLLECTION (POINT (110 260),LINESTRING (110 0,110 60))" ) self.assertEqual(geometrycollection.centroid.wkt, "POINT (110 30)") def test_linestring_m_dimension(self): geom = OGRGeometry("LINESTRING(0 1 2 10, 1 2 3 11, 2 3 4 12)") self.assertIs(geom.is_measured, True) self.assertEqual(geom.m, [10.0, 11.0, 12.0]) self.assertEqual(geom[0], (0.0, 1.0, 2.0, 10.0)) geom = OGRGeometry("LINESTRING M (0 1 10, 1 2 11)") self.assertIs(geom.is_measured, True) self.assertEqual(geom.m, [10.0, 11.0]) self.assertEqual(geom[0], (0.0, 1.0, 10.0)) geom.set_measured(False) self.assertIs(geom.is_measured, False) self.assertIs(geom.m, None) def test_polygon_m_dimension(self): geom = OGRGeometry("POLYGON Z ((0 0 0, 10 0 0, 10 10 0, 0 10 0, 0 0 0))") self.assertIs(geom.is_measured, False) self.assertEqual( geom.shell.wkt, "LINEARRING (0 0 0,10 0 0,10 10 0,0 10 0,0 0 0)" ) geom = OGRGeometry("POLYGON M ((0 0 0, 10 0 0, 10 10 0, 0 10 0, 0 0 0))") self.assertIs(geom.is_measured, True) self.assertEqual( geom.shell.wkt, "LINEARRING M (0 0 0,10 0 0,10 10 0,0 10 0,0 0 0)" ) geom = OGRGeometry( "POLYGON ZM ((0 0 0 1, 10 0 0 1, 10 10 0 1, 0 10 0 1, 0 0 0 1))" ) self.assertIs(geom.is_measured, True) self.assertEqual( geom.shell.wkt, "LINEARRING ZM (0 0 0 1,10 0 0 1,10 10 0 1,0 10 0 1,0 0 0 1)", ) geom.set_measured(False) self.assertEqual(geom.wkt, "POLYGON ((0 0 0,10 0 0,10 10 0,0 10 0,0 0 0))") self.assertEqual( geom.shell.wkt, "LINEARRING (0 0 0,10 0 0,10 10 0,0 10 0,0 0 0)" ) def test_multi_geometries_m_dimension(self): tests = [ "MULTIPOINT M ((10 40 10), (40 30 10), (20 20 10))", "MULTIPOINT ZM ((10 40 0 10), (40 30 1 10), (20 20 1 10))", "MULTILINESTRING M ((10 10 1, 20 20 2),(40 40 1, 30 30 2))", "MULTILINESTRING ZM ((10 10 0 1, 20 20 0 2),(40 40 1, 30 30 0 2))", ( "MULTIPOLYGON ZM (((30 20 1 0, 45 40 1 0, 30 20 1 0))," "((15 5 0 0, 40 10 0 0, 15 5 0 0)))" ), ( "GEOMETRYCOLLECTION M (POINT M (40 10 0)," "LINESTRING M (10 10 0, 20 20 0, 10 40 0))" ), ( "GEOMETRYCOLLECTION ZM (POINT ZM (40 10 0 1)," "LINESTRING ZM (10 10 1 0, 20 20 1 0, 10 40 1 0))" ), ] for geom_input in tests: with self.subTest(geom_input=geom_input): geom = OGRGeometry(geom_input) self.assertIs(geom.is_measured, True) def test_has_curve(self): for geom in self.geometries.curved_geoms: with self.subTest(wkt=geom.wkt): geom = OGRGeometry(geom.wkt) self.assertIs(geom.has_curve, True) msg = f"GEOS does not support {geom.__class__.__qualname__}." with self.assertRaisesMessage(GEOSException, msg): geom.geos geom = OGRGeometry("POINT (0 1)") self.assertIs(geom.has_curve, False) def test_get_linear_geometry(self): geom = OGRGeometry("CIRCULARSTRING (-0.797 0.466,-0.481 0.62,-0.419 0.473)") linear = geom.get_linear_geometry() self.assertEqual(linear.geom_name, "LINESTRING") self.assertIs(linear.has_curve, False) def test_get_linear_geometry_no_conversion_possible(self): wkt = "POINT (0 0)" geom = OGRGeometry(wkt) geom2 = geom.get_linear_geometry() self.assertEqual(geom2.wkt, wkt) def test_get_curve_geometry(self): linear_string = OGRGeometry( "LINESTRING (-0.797 0.466,-0.797500910583869 0.479079607685707," "-0.797096828208069 0.49216256476959,-0.795789684575482 0.505186328593822," "-0.793585728444384 0.518088639471983,-0.79049549575663 0.530807818319715," "-0.786533759270668 0.543283061509385,-0.781719457941079 0.555454731539925," "-0.776075606381369 0.567264642132187,-0.769629184843353 0.578656336386302," "-0.76241101023902 0.589575356672327,-0.754455588821145 0.599969504963013," "-0.745800951227352 0.609789092364991,-0.736488470675795 0.618987176654798," "-0.726562665181888 0.627519786684672,-0.716070984741265 0.635346132585369," "-0.705063584496685 0.642428800760598,-0.693593084972889 0.648733932741749," "-0.681714320525941 0.654231387047048,-0.669484077209319 0.658894883272069," "-0.656960821309923 0.662702127722269,-0.644204419852031 0.665634919987354," "-0.631275854404748 0.667679239947688,-0.618236929561618 0.668825314797118," "-0.60514997748578 0.669067665761503,-0.592077559933017 0.66840513428977," "-0.579082169177269 0.666840887592428,-0.566225929268313 0.664382403500809," "-0.553570299049824 0.661041434719465,-0.541175778357228 0.656833952642756," "-0.529101618800212 0.651780071004197,-0.5174055405123 0.645903949723276," "-0.506143456221622 0.639233679409784,-0.495369203961872 0.631801147077652," "-0.485134289701335 0.623641883709865,-0.475487641120239 0.614794894404014," "-0.46647537371355 0.605302471909454,-0.458140570337321 0.595209994448282," "-0.450523075252448 0.58456570878613,-0.443659303650563 0.573420499590156," "-0.437582067572208 0.561827646176397,-0.432320419050072 0.549842567809747," "-0.427899511226613 0.537522558773986,-0.424340478110267 0.524926514478182," "-0.421660333544978 0.512114649909193,-0.419871889876113 0.499148211775737," "-0.418983696701434 0.486089185720561,-0.419 0.473)" ) curve = linear_string.get_curve_geometry() self.assertEqual(curve.geom_name, "CIRCULARSTRING") self.assertEqual( curve.wkt, "CIRCULARSTRING (-0.797 0.466,-0.618236929561618 " "0.668825314797118,-0.419 0.473)", ) def test_get_curve_geometry_no_conversion_possible(self): geom = OGRGeometry("LINESTRING (0 0, 1 0, 2 0)") geom2 = geom.get_curve_geometry() self.assertEqual(geom2.wkt, geom.wkt) def test_curved_geometries(self): for geom in self.geometries.curved_geoms: with self.subTest(wkt=geom.wkt, geom_name=geom.name): g = OGRGeometry(geom.wkt) self.assertEqual(geom.name, g.geom_type.name) self.assertEqual(geom.num, g.geom_type.num) msg = f"GEOS does not support {g.__class__.__qualname__}." with self.assertRaisesMessage(GEOSException, msg): g.geos def test_circularstring_has_linestring_features(self): geom = OGRGeometry("CIRCULARSTRING ZM (1 5 0 1, 6 2 0 2, 7 3 0 3)") self.assertIsInstance(geom, CircularString) self.assertEqual(geom.x, [1, 6, 7]) self.assertEqual(geom.y, [5, 2, 3]) self.assertEqual(geom.z, [0, 0, 0]) self.assertEqual(geom.m, [1, 2, 3]) self.assertEqual( geom.tuple, ((1.0, 5.0, 0.0, 1.0), (6.0, 2.0, 0.0, 2.0), (7.0, 3.0, 0.0, 3.0)), ) self.assertEqual(geom[0], (1, 5, 0, 1)) self.assertEqual(len(geom), 3) def test_curvepolygon_has_polygon_features(self): geom = OGRGeometry( "CURVEPOLYGON ZM (CIRCULARSTRING ZM (0 0 0 0, 4 0 0 0, 4 4 0 0, 0 4 0 0, " "0 0 0 0), (1 1 0 0, 3 3 0 0, 3 1 0 0, 1 1 0 0))" ) self.assertIsInstance(geom, CurvePolygon) self.assertIsInstance(geom.shell, CircularString)
python
github
https://github.com/django/django
tests/gis_tests/gdal_tests/test_geom.py
/** * @license * Copyright Google LLC All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.dev/license */ import {ChangeDetectionStrategy, Component, input} from '@angular/core'; import {SerializedInjectedService} from '../../../../../../../../../protocol'; import {ResolutionPathComponent} from './resolution-path/resolution-path.component'; import {MatTooltip} from '@angular/material/tooltip'; import {MatExpansionModule} from '@angular/material/expansion'; @Component({ selector: 'ng-dependency-viewer', templateUrl: './dependency-viewer.component.html', styleUrl: './dependency-viewer.component.scss', imports: [MatExpansionModule, MatTooltip, ResolutionPathComponent], changeDetection: ChangeDetectionStrategy.OnPush, }) export class DependencyViewerComponent { readonly dependency = input.required<SerializedInjectedService>(); }
typescript
github
https://github.com/angular/angular
devtools/projects/ng-devtools/src/lib/devtools-tabs/directive-explorer/property-tab/property-view/property-view-body/dependency-viewer/dependency-viewer.component.ts
# Copyright 2013-2015 MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test the mongo_client module.""" import contextlib import datetime import os import socket import struct import sys import time import traceback import warnings sys.path[0:0] = [""] from bson import BSON from bson.codec_options import CodecOptions from bson.py3compat import thread, u from bson.son import SON from bson.tz_util import utc from pymongo import auth, message from pymongo.cursor import CursorType from pymongo.database import Database from pymongo.errors import (AutoReconnect, ConfigurationError, ConnectionFailure, InvalidName, OperationFailure, CursorNotFound, NetworkTimeout, InvalidURI) from pymongo.mongo_client import MongoClient from pymongo.pool import SocketInfo from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import (any_server_selector, writable_server_selector) from pymongo.server_type import SERVER_TYPE from pymongo.write_concern import WriteConcern from test import (client_context, client_knobs, host, pair, port, SkipTest, unittest, IntegrationTest, db_pwd, db_user, MockClientTest) from test.pymongo_mocks import MockClient from test.utils import (assertRaisesExactly, delay, remove_all_users, server_is_master_with_slave, get_pool, one, connected, wait_until, rs_or_single_client, rs_or_single_client_noauth, lazy_client_trial, NTHREADS) class ClientUnitTest(unittest.TestCase): """MongoClient tests that don't require a server.""" @classmethod def setUpClass(cls): cls.client = MongoClient(host, port, connect=False, serverSelectionTimeoutMS=100) def test_keyword_arg_defaults(self): client = MongoClient(socketTimeoutMS=None, connectTimeoutMS=20000, waitQueueTimeoutMS=None, waitQueueMultiple=None, socketKeepAlive=False, replicaSet=None, read_preference=ReadPreference.PRIMARY, ssl=False, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=0, # ssl.CERT_NONE ssl_ca_certs=None, connect=False, serverSelectionTimeoutMS=12000) options = client._MongoClient__options pool_opts = options.pool_options self.assertEqual(None, pool_opts.socket_timeout) # socket.Socket.settimeout takes a float in seconds self.assertEqual(20.0, pool_opts.connect_timeout) self.assertEqual(None, pool_opts.wait_queue_timeout) self.assertEqual(None, pool_opts.wait_queue_multiple) self.assertFalse(pool_opts.socket_keepalive) self.assertEqual(None, pool_opts.ssl_context) self.assertEqual(None, options.replica_set_name) self.assertEqual(ReadPreference.PRIMARY, client.read_preference) self.assertAlmostEqual(12, client.server_selection_timeout) def test_types(self): self.assertRaises(TypeError, MongoClient, 1) self.assertRaises(TypeError, MongoClient, 1.14) self.assertRaises(TypeError, MongoClient, "localhost", "27017") self.assertRaises(TypeError, MongoClient, "localhost", 1.14) self.assertRaises(TypeError, MongoClient, "localhost", []) self.assertRaises(ConfigurationError, MongoClient, []) def test_max_pool_size_zero(self): with self.assertRaises(ValueError): MongoClient(maxPoolSize=0) def test_get_db(self): def make_db(base, name): return base[name] self.assertRaises(InvalidName, make_db, self.client, "") self.assertRaises(InvalidName, make_db, self.client, "te$t") self.assertRaises(InvalidName, make_db, self.client, "te.t") self.assertRaises(InvalidName, make_db, self.client, "te\\t") self.assertRaises(InvalidName, make_db, self.client, "te/t") self.assertRaises(InvalidName, make_db, self.client, "te st") self.assertTrue(isinstance(self.client.test, Database)) self.assertEqual(self.client.test, self.client["test"]) self.assertEqual(self.client.test, Database(self.client, "test")) def test_get_database(self): codec_options = CodecOptions(tz_aware=True) write_concern = WriteConcern(w=2, j=True) db = self.client.get_database( 'foo', codec_options, ReadPreference.SECONDARY, write_concern) self.assertEqual('foo', db.name) self.assertEqual(codec_options, db.codec_options) self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) def test_getattr(self): self.assertTrue(isinstance(self.client['_does_not_exist'], Database)) with self.assertRaises(AttributeError) as context: self.client._does_not_exist # Message should be: # "AttributeError: MongoClient has no attribute '_does_not_exist'. To # access the _does_not_exist database, use client['_does_not_exist']". self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) def test_iteration(self): def iterate(): [a for a in self.client] self.assertRaises(TypeError, iterate) def test_get_default_database(self): c = MongoClient("mongodb://%s:%d/foo" % (host, port), connect=False) self.assertEqual(Database(c, 'foo'), c.get_default_database()) def test_get_default_database_error(self): # URI with no database. c = MongoClient("mongodb://%s:%d/" % (host, port), connect=False) self.assertRaises(ConfigurationError, c.get_default_database) def test_get_default_database_with_authsource(self): # Ensure we distinguish database name from authSource. uri = "mongodb://%s:%d/foo?authSource=src" % (host, port) c = MongoClient(uri, connect=False) self.assertEqual(Database(c, 'foo'), c.get_default_database()) class TestClient(IntegrationTest): def test_constants(self): # Set bad defaults. MongoClient.HOST = "somedomainthatdoesntexist.org" MongoClient.PORT = 123456789 with self.assertRaises(AutoReconnect): connected(MongoClient(serverSelectionTimeoutMS=10)) # Override the defaults. No error. connected(MongoClient(host, port)) # Set good defaults. MongoClient.HOST = host MongoClient.PORT = port # No error. connected(MongoClient()) def test_init_disconnected(self): c = rs_or_single_client(connect=False) self.assertIsInstance(c.is_primary, bool) self.assertIsInstance(c.is_mongos, bool) self.assertIsInstance(c.max_pool_size, int) self.assertIsInstance(c.nodes, frozenset) self.assertEqual(c.codec_options, CodecOptions()) self.assertIsInstance(c.max_bson_size, int) self.assertIsInstance(c.max_write_batch_size, int) self.assertFalse(c.primary) self.assertFalse(c.secondaries) c.pymongo_test.command('ismaster') # Auto-connect. if client_context.is_rs: # The primary's host and port are from the replica set config. self.assertIsNotNone(c.address) else: self.assertEqual(c.address, (host, port)) bad_host = "somedomainthatdoesntexist.org" c = MongoClient(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_init_disconnected_with_auth(self): uri = "mongodb://user:pass@somedomainthatdoesntexist" c = MongoClient(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) def test_equality(self): c = connected(rs_or_single_client()) self.assertEqual(client_context.rs_or_standalone_client, c) # Explicitly test inequality self.assertFalse(client_context.rs_or_standalone_client != c) def test_host_w_port(self): with self.assertRaises(ValueError): connected(MongoClient("%s:1234567" % host, connectTimeoutMS=1, serverSelectionTimeoutMS=10)) def test_repr(self): # Used to test 'eval' below. import bson client = MongoClient( 'mongodb://localhost:27017,localhost:27018/?replicaSet=replset' '&connectTimeoutMS=12345', connect=False, document_class=SON) the_repr = repr(client) self.assertIn('MongoClient(host=', the_repr) self.assertIn( "document_class=bson.son.SON, " "tz_aware=False, " "connect=False, ", the_repr) self.assertIn("connecttimeoutms='12345'", the_repr) self.assertIn("replicaset=", the_repr) self.assertEqual(eval(the_repr), client) @client_context.require_replica_set def test_repr_replica_set(self): self.assertIn("MongoClient(host=[", repr(self.client)) self.assertIn(pair, repr(self.client)) def test_getters(self): self.assertEqual(client_context.client.address, (host, port)) self.assertEqual(client_context.nodes, self.client.nodes) def test_database_names(self): self.client.pymongo_test.test.insert_one({"dummy": u("object")}) self.client.pymongo_test_mike.test.insert_one({"dummy": u("object")}) dbs = self.client.database_names() self.assertTrue("pymongo_test" in dbs) self.assertTrue("pymongo_test_mike" in dbs) def test_drop_database(self): self.assertRaises(TypeError, self.client.drop_database, 5) self.assertRaises(TypeError, self.client.drop_database, None) self.client.pymongo_test.test.insert_one({"dummy": u("object")}) self.client.pymongo_test2.test.insert_one({"dummy": u("object")}) dbs = self.client.database_names() self.assertIn("pymongo_test", dbs) self.assertIn("pymongo_test2", dbs) self.client.drop_database("pymongo_test") self.client.drop_database(self.client.pymongo_test2) raise SkipTest("This test often fails due to SERVER-2329") dbs = self.client.database_names() self.assertNotIn("pymongo_test", dbs) self.assertNotIn("pymongo_test2", dbs) def test_close(self): coll = self.client.pymongo_test.bar self.client.close() self.client.close() coll.count() self.client.close() self.client.close() coll.count() def test_bad_uri(self): with self.assertRaises(InvalidURI): MongoClient("http://localhost") @client_context.require_auth def test_auth_from_uri(self): self.client.admin.add_user("admin", "pass", roles=["root"]) self.addCleanup(self.client.admin.remove_user, 'admin') self.addCleanup(remove_all_users, self.client.pymongo_test) self.client.pymongo_test.add_user( "user", "pass", roles=['userAdmin', 'readWrite']) with self.assertRaises(OperationFailure): connected(rs_or_single_client( "mongodb://a:b@%s:%d" % (host, port))) # No error. connected(rs_or_single_client_noauth( "mongodb://admin:pass@%s:%d" % (host, port))) # Wrong database. uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) with self.assertRaises(OperationFailure): connected(rs_or_single_client(uri)) # No error. connected(rs_or_single_client_noauth( "mongodb://user:pass@%s:%d/pymongo_test" % (host, port))) # Auth with lazy connection. rs_or_single_client( "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False).pymongo_test.test.find_one() # Wrong password. bad_client = rs_or_single_client( "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False) self.assertRaises(OperationFailure, bad_client.pymongo_test.test.find_one) @client_context.require_auth def test_multiple_logins(self): self.client.pymongo_test.add_user('user1', 'pass', roles=['readWrite']) self.client.pymongo_test.add_user('user2', 'pass', roles=['readWrite']) self.addCleanup(remove_all_users, self.client.pymongo_test) client = rs_or_single_client_noauth( "mongodb://user1:pass@%s:%d/pymongo_test" % (host, port)) client.pymongo_test.test.find_one() with self.assertRaises(OperationFailure): # Can't log in to the same database with multiple users. client.pymongo_test.authenticate('user2', 'pass') client.pymongo_test.test.find_one() client.pymongo_test.logout() with self.assertRaises(OperationFailure): client.pymongo_test.test.find_one() client.pymongo_test.authenticate('user2', 'pass') client.pymongo_test.test.find_one() with self.assertRaises(OperationFailure): client.pymongo_test.authenticate('user1', 'pass') client.pymongo_test.test.find_one() @client_context.require_auth def test_lazy_auth_raises_operation_failure(self): lazy_client = rs_or_single_client( "mongodb://user:wrong@%s/pymongo_test" % host, connect=False) assertRaisesExactly( OperationFailure, lazy_client.test.collection.find_one) def test_unix_socket(self): if not hasattr(socket, "AF_UNIX"): raise SkipTest("UNIX-sockets are not supported on this system") mongodb_socket = '/tmp/mongodb-27017.sock' encoded_socket = '%2Ftmp%2Fmongodb-27017.sock' if not os.access(mongodb_socket, os.R_OK): raise SkipTest("Socket file is not accessible") if client_context.auth_enabled: uri = "mongodb://%s:%s@%s" % (db_user, db_pwd, encoded_socket) else: uri = "mongodb://%s" % encoded_socket # Confirm we can do operations via the socket. client = MongoClient(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = client.database_names() self.assertTrue("pymongo_test" in dbs) # Confirm it fails with a missing socket. self.assertRaises( ConnectionFailure, connected, MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100)) def test_fork(self): # Test using a client before and after a fork. if sys.platform == "win32": raise SkipTest("Can't fork on windows") try: import multiprocessing except ImportError: raise SkipTest("No multiprocessing module") db = self.client.pymongo_test # Ensure a socket is opened before the fork. db.test.find_one() def f(pipe): try: kill_cursors_executor = self.client._kill_cursors_executor servers = self.client._topology.select_servers( any_server_selector) # In child, only the thread that called fork() is alive. # The first operation should revive the rest. db.test.find_one() wait_until( lambda: all(s._monitor._executor._thread.is_alive() for s in servers), "restart monitor threads") wait_until(lambda: kill_cursors_executor._thread.is_alive(), "restart kill-cursors executor") except: traceback.print_exc() # Aid debugging. pipe.send(True) parent_pipe, child_pipe = multiprocessing.Pipe() p = multiprocessing.Process(target=f, args=(child_pipe,)) p.start() p.join(10) child_pipe.close() # Pipe will only have data if the child process failed. try: parent_pipe.recv() self.fail() except EOFError: pass def test_document_class(self): c = self.client db = c.pymongo_test db.test.insert_one({"x": 1}) self.assertEqual(dict, c.codec_options.document_class) self.assertTrue(isinstance(db.test.find_one(), dict)) self.assertFalse(isinstance(db.test.find_one(), SON)) c = rs_or_single_client(document_class=SON) db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) self.assertTrue(isinstance(db.test.find_one(), SON)) def test_timeouts(self): client = rs_or_single_client(connectTimeoutMS=10500) self.assertEqual(10.5, get_pool(client).opts.connect_timeout) client = rs_or_single_client(socketTimeoutMS=10500) self.assertEqual(10.5, get_pool(client).opts.socket_timeout) def test_socket_timeout_ms_validation(self): c = rs_or_single_client(socketTimeoutMS=10 * 1000) self.assertEqual(10, get_pool(c).opts.socket_timeout) c = connected(rs_or_single_client(socketTimeoutMS=None)) self.assertEqual(None, get_pool(c).opts.socket_timeout) self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=0) self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=-1) self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=1e10) self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS='foo') def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert_one({"x": 1}) # A $where clause that takes a second longer than the timeout where_func = delay(timeout_sec + 1) def get_x(db): doc = next(db.test.find().where(where_func)) return doc["x"] self.assertEqual(1, get_x(no_timeout.pymongo_test)) self.assertRaises(NetworkTimeout, get_x, timeout.pymongo_test) def test_server_selection_timeout(self): client = MongoClient(serverSelectionTimeoutMS=100, connect=False) self.assertAlmostEqual(0.1, client.server_selection_timeout) client = MongoClient(serverSelectionTimeoutMS=0, connect=False) self.assertAlmostEqual(0, client.server_selection_timeout) self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS="foo", connect=False) self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS=-1, connect=False) self.assertRaises(ConfigurationError, MongoClient, serverSelectionTimeoutMS=None, connect=False) client = MongoClient( 'mongodb://localhost/?serverSelectionTimeoutMS=100', connect=False) self.assertAlmostEqual(0.1, client.server_selection_timeout) client = MongoClient( 'mongodb://localhost/?serverSelectionTimeoutMS=0', connect=False) self.assertAlmostEqual(0, client.server_selection_timeout) # Test invalid timeout in URI ignored and set to default. client = MongoClient( 'mongodb://localhost/?serverSelectionTimeoutMS=-1', connect=False) self.assertAlmostEqual(30, client.server_selection_timeout) client = MongoClient( 'mongodb://localhost/?serverSelectionTimeoutMS=', connect=False) self.assertAlmostEqual(30, client.server_selection_timeout) def test_waitQueueTimeoutMS(self): client = rs_or_single_client(waitQueueTimeoutMS=2000) self.assertEqual(get_pool(client).opts.wait_queue_timeout, 2) def test_waitQueueMultiple(self): client = rs_or_single_client(maxPoolSize=3, waitQueueMultiple=2) pool = get_pool(client) self.assertEqual(pool.opts.wait_queue_multiple, 2) self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6) def test_socketKeepAlive(self): client = rs_or_single_client(socketKeepAlive=True) self.assertTrue(get_pool(client).opts.socket_keepalive) def test_tz_aware(self): self.assertRaises(ValueError, MongoClient, tz_aware='foo') aware = rs_or_single_client(tz_aware=True) naive = self.client aware.pymongo_test.drop_collection("test") now = datetime.datetime.utcnow() aware.pymongo_test.test.insert_one({"x": now}) self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo) self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo) self.assertEqual( aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None), naive.pymongo_test.test.find_one()["x"]) @client_context.require_ipv6 def test_ipv6(self): if client_context.auth_enabled: auth_str = "%s:%s@" % (db_user, db_pwd) else: auth_str = "" uri = "mongodb://%s[::1]:%d" % (auth_str, port) if client_context.is_rs: uri += '/?replicaSet=' + client_context.replica_set_name client = rs_or_single_client_noauth(uri) client.pymongo_test.test.insert_one({"dummy": u("object")}) client.pymongo_test_bernie.test.insert_one({"dummy": u("object")}) dbs = client.database_names() self.assertTrue("pymongo_test" in dbs) self.assertTrue("pymongo_test_bernie" in dbs) @client_context.require_no_mongos def test_fsync_lock_unlock(self): if (server_is_master_with_slave(client_context.client) and client_context.version.at_least(2, 3, 0)): raise SkipTest('SERVER-7714') self.assertFalse(self.client.is_locked) # async flushing not supported on windows... if sys.platform not in ('cygwin', 'win32'): self.client.fsync(async=True) self.assertFalse(self.client.is_locked) self.client.fsync(lock=True) self.assertTrue(self.client.is_locked) locked = True self.client.unlock() for _ in range(5): locked = self.client.is_locked if not locked: break time.sleep(1) self.assertFalse(locked) def test_contextlib(self): client = rs_or_single_client() client.pymongo_test.drop_collection("test") client.pymongo_test.test.insert_one({"foo": "bar"}) # The socket used for the previous commands has been returned to the # pool self.assertEqual(1, len(get_pool(client).sockets)) with contextlib.closing(client): self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) self.assertEqual(1, len(get_pool(client).sockets)) self.assertEqual(0, len(get_pool(client).sockets)) with client as client: self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) self.assertEqual(0, len(get_pool(client).sockets)) def test_interrupt_signal(self): if sys.platform.startswith('java'): # We can't figure out how to raise an exception on a thread that's # blocked on a socket, whether that's the main thread or a worker, # without simply killing the whole thread in Jython. This suggests # PYTHON-294 can't actually occur in Jython. raise SkipTest("Can't test interrupts in Jython") # Test fix for PYTHON-294 -- make sure MongoClient closes its # socket if it gets an interrupt while waiting to recv() from it. db = self.client.pymongo_test # A $where clause which takes 1.5 sec to execute where = delay(1.5) # Need exactly 1 document so find() will execute its $where clause once db.drop_collection('foo') db.foo.insert_one({'_id': 1}) def interrupter(): # Raises KeyboardInterrupt in the main thread time.sleep(0.25) thread.interrupt_main() thread.start_new_thread(interrupter, ()) raised = False try: # Will be interrupted by a KeyboardInterrupt. next(db.foo.find({'$where': where})) except KeyboardInterrupt: raised = True # Can't use self.assertRaises() because it doesn't catch system # exceptions self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt") # Raises AssertionError due to PYTHON-294 -- Mongo's response to the # previous find() is still waiting to be read on the socket, so the # request id's don't match. self.assertEqual( {'_id': 1}, next(db.foo.find()) ) def test_operation_failure(self): # Ensure MongoClient doesn't close socket after it gets an error # response to getLastError. PYTHON-395. pool = get_pool(self.client) socket_count = len(pool.sockets) self.assertGreaterEqual(socket_count, 1) old_sock_info = next(iter(pool.sockets)) self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_one({'_id': 'foo'}) self.assertRaises( OperationFailure, self.client.pymongo_test.test.insert_one, {'_id': 'foo'}) self.assertEqual(socket_count, len(pool.sockets)) new_sock_info = next(iter(pool.sockets)) self.assertEqual(old_sock_info, new_sock_info) def test_kill_cursors(self): if (client_context.is_mongos and not client_context.version.at_least(2, 4, 7)): # Old mongos sends incorrectly formatted error response when # cursor isn't found, see SERVER-9738. raise SkipTest("Can't test kill_cursors against old mongos") self.collection = self.client.pymongo_test.test self.collection.drop() self.collection.insert_many([{'_id': i} for i in range(200)]) cursor = self.collection.find().batch_size(1) next(cursor) self.client.kill_cursors([cursor.cursor_id]) # Prevent killcursors from reaching the server while a getmore is in # progress -- the server logs "Assertion: 16089:Cannot kill active # cursor." time.sleep(2) def raises_cursor_not_found(): try: next(cursor) return False except CursorNotFound: return True wait_until(raises_cursor_not_found, 'close cursor') def test_kill_cursors_with_server_unavailable(self): with client_knobs(kill_cursor_frequency=9999999): client = MongoClient('doesnt exist', connect=False, serverSelectionTimeoutMS=0) # Wait for the first tick of the periodic kill-cursors to pass. time.sleep(1) # Enqueue a kill-cursors message. client.close_cursor(1234, ('doesnt-exist', 27017)) with warnings.catch_warnings(record=True) as user_warnings: client._process_kill_cursors_queue() self.assertIn("couldn't close cursor on ('doesnt-exist', 27017)", str(user_warnings[0].message)) def test_lazy_connect_w0(self): # Ensure that connect-on-demand works when the first operation is # an unacknowledged write. This exercises _writable_max_wire_version(). # Use a separate collection to avoid races where we're still # completing an operation on a collection while the next test begins. client = rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.insert_one({}) client = rs_or_single_client(connect=False) client.test_lazy_connect_w0.test.update_one({}, {'$set': {'x': 1}}) client = rs_or_single_client(connect=False) client.test_lazy_connect_w0.test.delete_one({}) @client_context.require_no_mongos def test_exhaust_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = rs_or_single_client(maxPoolSize=1) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. # Ensure a socket. connected(client) # Cause a network error. sock_info = one(pool.sockets) sock_info.sock.close() cursor = collection.find(cursor_type=CursorType.EXHAUST) with self.assertRaises(ConnectionFailure): next(cursor) self.assertTrue(sock_info.closed) # The semaphore was decremented despite the error. self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) @client_context.require_auth def test_auth_network_error(self): # Make sure there's no semaphore leak if we get a network error # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. c = connected(rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1)) # Simulate an authenticate() call on a different socket. credentials = auth._build_credentials_tuple( 'DEFAULT', 'admin', db_user, db_pwd, {}) c._cache_credentials('test', credentials, connect=False) # Cause a network error on the actual socket. pool = get_pool(c) socket_info = one(pool.sockets) socket_info.sock.close() # SocketInfo.check_auth logs in with the new credential, but gets a # socket.error. Should be reraised as AutoReconnect. self.assertRaises(AutoReconnect, c.test.collection.find_one) # No semaphore leak, the pool is allowed to make a new socket. c.test.collection.find_one() @client_context.require_no_replica_set def test_connect_to_standalone_using_replica_set_name(self): client = MongoClient(pair, replicaSet='anything', serverSelectionTimeoutMS=100) with self.assertRaises(AutoReconnect): client.test.test.find_one() @client_context.require_replica_set def test_stale_getmore(self): # A cursor is created, but its member goes down and is removed from # the topology before the getMore message is sent. Test that # MongoClient._send_message_with_response handles the error. with self.assertRaises(AutoReconnect): client = MongoClient(host, port, connect=False, serverSelectionTimeoutMS=100, replicaSet=client_context.replica_set_name) client._send_message_with_response( operation=message._GetMore('collection', 101, 1234), address=('not-a-member', 27017)) class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" def setUp(self): super(TestExhaustCursor, self).setUp() if client_context.is_mongos: raise SkipTest("mongos doesn't support exhaust, SERVER-2627") # mongod < 2.2.0 closes exhaust socket on error, so it behaves like # test_exhaust_query_network_error. Here we test that on query error # the client correctly keeps the socket *open* and checks it in. @client_context.require_version_min(2, 2, 0) def test_exhaust_query_server_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = connected(rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = get_pool(client) sock_info = one(pool.sockets) # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. cursor = collection.find( SON([('$query', {}), ('$orderby', True)]), cursor_type=CursorType.EXHAUST) self.assertRaises(OperationFailure, cursor.next) self.assertFalse(sock_info.closed) # The socket was checked in and the semaphore was decremented. self.assertIn(sock_info, pool.sockets) self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) def test_exhaust_getmore_server_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked # out on success but it's checked in on error to avoid semaphore leaks. client = rs_or_single_client(maxPoolSize=1) collection = client.pymongo_test.test collection.drop() collection.insert_many([{} for _ in range(200)]) self.addCleanup(client_context.client.pymongo_test.test.drop) pool = get_pool(client) pool._check_interval_seconds = None # Never check. sock_info = one(pool.sockets) cursor = collection.find(cursor_type=CursorType.EXHAUST) # Initial query succeeds. cursor.next() # Cause a server error on getmore. def receive_message(operation, request_id): # Discard the actual server response. SocketInfo.receive_message(sock_info, operation, request_id) # responseFlags bit 1 is QueryFailure. msg = struct.pack('<iiiii', 1 << 1, 0, 0, 0, 0) msg += BSON.encode({'$err': 'mock err', 'code': 0}) return msg saved = sock_info.receive_message sock_info.receive_message = receive_message self.assertRaises(OperationFailure, list, cursor) sock_info.receive_message = saved # The socket is returned the pool and it still works. self.assertEqual(200, collection.count()) self.assertIn(sock_info, pool.sockets) def test_exhaust_query_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. client = connected(rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. # Cause a network error. sock_info = one(pool.sockets) sock_info.sock.close() cursor = collection.find(cursor_type=CursorType.EXHAUST) self.assertRaises(ConnectionFailure, cursor.next) self.assertTrue(sock_info.closed) # The socket was closed and the semaphore was decremented. self.assertNotIn(sock_info, pool.sockets) self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) def test_exhaust_getmore_network_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked # out on success but it's checked in on error to avoid semaphore leaks. client = rs_or_single_client(maxPoolSize=1) collection = client.pymongo_test.test collection.drop() collection.insert_many([{} for _ in range(200)]) # More than one batch. pool = get_pool(client) pool._check_interval_seconds = None # Never check. cursor = collection.find(cursor_type=CursorType.EXHAUST) # Initial query succeeds. cursor.next() # Cause a network error. sock_info = cursor._Cursor__exhaust_mgr.sock sock_info.sock.close() # A getmore fails. self.assertRaises(ConnectionFailure, list, cursor) self.assertTrue(sock_info.closed) # The socket was closed and the semaphore was decremented. self.assertNotIn(sock_info, pool.sockets) self.assertTrue(pool._socket_semaphore.acquire(blocking=False)) class TestClientLazyConnect(IntegrationTest): """Test concurrent operations on a lazily-connecting MongoClient.""" def _get_client(self): return rs_or_single_client(connect=False) def test_insert_one(self): def reset(collection): collection.drop() def insert_one(collection, _): collection.insert_one({}) def test(collection): self.assertEqual(NTHREADS, collection.count()) lazy_client_trial(reset, insert_one, test, self._get_client) def test_update_one(self): def reset(collection): collection.drop() collection.insert_one({'i': 0}) # Update doc 10 times. def update_one(collection, _): collection.update_one({}, {'$inc': {'i': 1}}) def test(collection): self.assertEqual(NTHREADS, collection.find_one()['i']) lazy_client_trial(reset, update_one, test, self._get_client) def test_delete_one(self): def reset(collection): collection.drop() collection.insert_many([{'i': i} for i in range(NTHREADS)]) def delete_one(collection, i): collection.delete_one({'i': i}) def test(collection): self.assertEqual(0, collection.count()) lazy_client_trial(reset, delete_one, test, self._get_client) def test_find_one(self): results = [] def reset(collection): collection.drop() collection.insert_one({}) results[:] = [] def find_one(collection, _): results.append(collection.find_one()) def test(collection): self.assertEqual(NTHREADS, len(results)) lazy_client_trial(reset, find_one, test, self._get_client) def test_max_bson_size(self): # Client should have sane defaults before connecting, and should update # its configuration once connected. c = self._get_client() self.assertEqual(16 * (1024 ** 2), c.max_bson_size) self.assertEqual(2 * c.max_bson_size, c.max_message_size) # Make the client connect, so that it sets its max_bson_size and # max_message_size attributes. ismaster = c.db.command('ismaster') self.assertEqual(ismaster['maxBsonObjectSize'], c.max_bson_size) if 'maxMessageSizeBytes' in ismaster: self.assertEqual( ismaster['maxMessageSizeBytes'], c.max_message_size) class TestMongoClientFailover(MockClientTest): def test_discover_primary(self): # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=['a:1', 'b:2', 'c:3'], mongoses=[], host='b:2', # Pass a secondary. replicaSet='rs') wait_until(lambda: len(c.nodes) == 3, 'connect') self.assertEqual(c.address, ('a', 1)) # Fail over. c.kill_host('a:1') c.mock_primary = 'b:2' c.close() self.assertEqual(0, len(c.nodes)) t = c._get_topology() t.select_servers(writable_server_selector) # Reconnect. self.assertEqual(c.address, ('b', 2)) # a:1 not longer in nodes. self.assertLess(len(c.nodes), 3) # c:3 is rediscovered. t.select_server_by_address(('c', 3)) def test_reconnect(self): # Verify the node list isn't forgotten during a network failure. c = MockClient( standalones=[], members=['a:1', 'b:2', 'c:3'], mongoses=[], host='b:2', # Pass a secondary. replicaSet='rs') wait_until(lambda: len(c.nodes) == 3, 'connect') # Total failure. c.kill_host('a:1') c.kill_host('b:2') c.kill_host('c:3') # MongoClient discovers it's alone. self.assertRaises(AutoReconnect, c.db.collection.find_one) # But it can reconnect. c.revive_host('a:1') c._get_topology().select_servers(writable_server_selector) self.assertEqual(c.address, ('a', 1)) def _test_network_error(self, operation_callback): # Verify only the disconnected server is reset by a network failure. # Disable background refresh. with client_knobs(heartbeat_frequency=999999): c = MockClient( standalones=[], members=['a:1', 'b:2'], mongoses=[], host='a:1', replicaSet='rs', connect=False) # Set host-specific information so we can test whether it is reset. c.set_wire_version_range('a:1', 0, 1) c.set_wire_version_range('b:2', 0, 2) c._get_topology().select_servers(writable_server_selector) wait_until(lambda: len(c.nodes) == 2, 'connect') c.kill_host('a:1') # MongoClient is disconnected from the primary. self.assertRaises(AutoReconnect, operation_callback, c) # The primary's description is reset. server_a = c._get_topology().get_server_by_address(('a', 1)) sd_a = server_a.description self.assertEqual(SERVER_TYPE.Unknown, sd_a.server_type) self.assertEqual(0, sd_a.min_wire_version) self.assertEqual(0, sd_a.max_wire_version) # ...but not the secondary's. server_b = c._get_topology().get_server_by_address(('b', 2)) sd_b = server_b.description self.assertEqual(SERVER_TYPE.RSSecondary, sd_b.server_type) self.assertEqual(0, sd_b.min_wire_version) self.assertEqual(2, sd_b.max_wire_version) def test_network_error_on_query(self): callback = lambda client: client.db.collection.find_one() self._test_network_error(callback) def test_network_error_on_insert(self): callback = lambda client: client.db.collection.insert_one({}) self._test_network_error(callback) def test_network_error_on_update(self): callback = lambda client: client.db.collection.update_one( {}, {'$unset': 'x'}) self._test_network_error(callback) def test_network_error_on_replace(self): callback = lambda client: client.db.collection.replace_one({}, {}) self._test_network_error(callback) def test_network_error_on_delete(self): callback = lambda client: client.db.collection.delete_many({}) self._test_network_error(callback) if __name__ == "__main__": unittest.main()
unknown
codeparrot/codeparrot-clean
import unittest from robot.writer.rowsplitter import RowSplitter from robot.utils.asserts import assert_equals class TestRowSplitter(unittest.TestCase): def _test(self, data, expected, cols=3, table_type='settings'): splitter = RowSplitter(cols=cols) actual = list(splitter.split(data, table_type)) assert_equals(actual, expected) def test_escaping_empty_cells_at_eol(self): self._test(['First', 'second', ''], [['First', 'second', '${EMPTY}']]) self._test(['First', 'second', '', 'next line'], [['First', 'second', '${EMPTY}'], ['...', 'next line']]) self._test(['1.1', '1.2', '1.3', '', '2.1', '2.2', '', '3.1', '', ''], [['1.1', '1.2', '1.3', '${EMPTY}'], ['...', '2.1', '2.2', '${EMPTY}'], ['...', '3.1', '', '${EMPTY}']], cols=4) def test_splitting_inside_comment(self): self._test(['Kw', 'Arg', '#Comment in', 'many cells'], [['Kw', 'Arg', '#Comment in'], ['...', '# many cells']]) self._test(['Kw', 'Arg', '# Comment', 'in', 'very', 'many', 'cells', '!'], [['Kw', 'Arg', '# Comment'], ['...', '# in', 'very'], ['...', '# many', 'cells'], ['...', '# !']]) self._test(['Kw', 'Arg', '# Comment in', 'many cells'], [['Kw', 'Arg'], ['...', '# Comment in'], ['...', '# many cells']], cols=2) def test_no_extra_comment_marker(self): self._test(['1', '2', '3', '# Comment'], [['1', '2', '3'], ['...', '# Comment']]) self._test(['1', '2', '# C 1', '# C 2'], [['1', '2', '# C 1'], ['...', '# C 2']]) def test_splitting_whitespace_rows(self): data = ['', '', '', '', 'foo', '# Comment'] for cols, expected in [(4, [['', '', '', '${EMPTY}'], ['...', 'foo', '# Comment']]), (3, [['', '', '${EMPTY}'], ['...', '', 'foo'], ['...', '# Comment']]), (2, [['', '${EMPTY}'], ['...', '${EMPTY}'], ['...', '${EMPTY}'], ['...', 'foo'], ['...', '# Comment']])]: self._test(data, expected, cols) def test_min_indent(self): self._test(['1', '2', '3', '4'], [['1', '2', '3'], ['...', '4']]) self._test(['1', '2', '3', '4'], [['1', '2', '3'], ['', '...', '4']], table_type='keyword') self._test(['1', '2', '3', '4'], [['1', '2', '3'], ['', '...', '4']], table_type='test case') def test_split_else(self): self._test(['Run Keyword If', 'expression', 'Kw 1', 'ELSE', 'Kw 2'], [['Run Keyword If', 'expression', 'Kw 1'], ['...', 'ELSE', 'Kw 2']], cols=100) self._test(['Run Keyword If', 'e1', 'Kw 1', 'ELSE IF', 'e2', 'Kw 2'], [['Run Keyword If', 'e1', 'Kw 1'], ['...', 'ELSE IF', 'e2', 'Kw 2']], cols=100) self._test(['1', '2', 'ELSE IF', '3', '4', 'ELSE IF', '5', 'ELSE', '6'], [['1', '2'], ['...', 'ELSE IF', '3', '4'], ['...', 'ELSE IF', '5'], ['...', 'ELSE', '6']], cols=100) def test_split_also_and(self): self._test(['Run Keywords', 'k1', 'AND', 'k2', 'a', 'b', 'AND', 'k3'], [['Run Keywords', 'k1'], ['...', 'AND', 'k2', 'a', 'b'], ['...', 'AND', 'k3']], cols=100) self._test(['', '1', 'AND', '2', 'ELSE', '3', 'ELSE IF', '4', 'AND', '5'], [['', '1'], ['', '...', 'AND', '2'], ['', '...', 'ELSE', '3'], ['', '...', 'ELSE IF', '4'], ['', '...', 'AND', '5']], cols=100) def test_dont_split_else_or_and_in_first_cell(self): for data in (['ELSE', '1', '2'], ['ELSE IF', '1', '2'], ['AND', '1', '2']): for no_split in (data, [''] + data, ['', '', ''] + data, ['...'] + data, ['', '...'] + data, ['', '', '', '...'] + data): self._test(no_split, [no_split], cols=100) def test_split_internal_else_lines(self): data = ['1', '2', '3', '4', '5', '6', '7', '8'] self._test(data + ['ELSE IF'] + data + ['ELSE'] + data, [['1', '2', '3', '4'], ['...', '5', '6', '7'], ['...', '8'], ['...', 'ELSE IF', '1', '2'], ['...', '3', '4', '5'], ['...', '6', '7', '8'], ['...', 'ELSE', '1', '2'], ['...', '3', '4', '5'], ['...', '6', '7', '8']], cols=4) self._test([''] + data + ['ELSE IF'] + data + ['ELSE'] + data, [['', '1', '2', '3', '4', '5', '6', '7'], ['', '...', '8'], ['', '...', 'ELSE IF', '1', '2', '3', '4', '5'], ['', '...', '6', '7', '8'], ['', '...', 'ELSE', '1', '2', '3', '4', '5'], ['', '...', '6', '7', '8']], cols=8) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean