code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
# -*- coding:utf-8 -*- ############################################################################## # # Copyright (C) 2014 Savoir-faire Linux. All Rights Reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, orm from itertools import permutations class hr_hourly_rate_class(orm.Model): _name = 'hr.hourly.rate.class' _description = 'Hourly rate class' _columns = { 'name': fields.char( 'Class Name', required=True, ), 'line_ids': fields.one2many( 'hr.hourly.rate', 'class_id', 'Hourly Rates' ), 'contract_job_ids': fields.one2many( 'hr.contract.job', 'hourly_rate_class_id', 'Contract Jobs' ), } def _check_overlapping_rates(self, cr, uid, ids, context=None): """ Checks if a class has two rates that overlap in time. """ for hourly_rate_class in self.browse(cr, uid, ids, context): for r1, r2 in permutations(hourly_rate_class.line_ids, 2): if r1.date_end and ( r1.date_start <= r2.date_start <= r1.date_end): return False elif not r1.date_end and (r1.date_start <= r2.date_start): return False return True _constraints = [( _check_overlapping_rates, 'Error! You cannot have overlapping rates', ['line_ids'] )]
unknown
codeparrot/codeparrot-clean
"""Support for getting collected information from PVOutput.""" from collections import namedtuple from datetime import timedelta import logging import voluptuous as vol from homeassistant.components.rest.sensor import RestData from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( ATTR_DATE, ATTR_TEMPERATURE, ATTR_TIME, ATTR_VOLTAGE, CONF_API_KEY, CONF_NAME, ) import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) _ENDPOINT = "http://pvoutput.org/service/r2/getstatus.jsp" ATTR_ENERGY_GENERATION = "energy_generation" ATTR_POWER_GENERATION = "power_generation" ATTR_ENERGY_CONSUMPTION = "energy_consumption" ATTR_POWER_CONSUMPTION = "power_consumption" ATTR_EFFICIENCY = "efficiency" CONF_SYSTEM_ID = "system_id" DEFAULT_NAME = "PVOutput" DEFAULT_VERIFY_SSL = True SCAN_INTERVAL = timedelta(minutes=2) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_SYSTEM_ID): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the PVOutput sensor.""" name = config.get(CONF_NAME) api_key = config.get(CONF_API_KEY) system_id = config.get(CONF_SYSTEM_ID) method = "GET" payload = auth = None verify_ssl = DEFAULT_VERIFY_SSL headers = {"X-Pvoutput-Apikey": api_key, "X-Pvoutput-SystemId": system_id} rest = RestData(method, _ENDPOINT, auth, headers, payload, verify_ssl) rest.update() if rest.data is None: _LOGGER.error("Unable to fetch data from PVOutput") return False add_entities([PvoutputSensor(rest, name)], True) class PvoutputSensor(Entity): """Representation of a PVOutput sensor.""" def __init__(self, rest, name): """Initialize a PVOutput sensor.""" self.rest = rest self._name = name self.pvcoutput = None self.status = namedtuple( "status", [ ATTR_DATE, ATTR_TIME, ATTR_ENERGY_GENERATION, ATTR_POWER_GENERATION, ATTR_ENERGY_CONSUMPTION, ATTR_POWER_CONSUMPTION, ATTR_EFFICIENCY, ATTR_TEMPERATURE, ATTR_VOLTAGE, ], ) @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the device.""" if self.pvcoutput is not None: return self.pvcoutput.energy_generation return None @property def device_state_attributes(self): """Return the state attributes of the monitored installation.""" if self.pvcoutput is not None: return { ATTR_ENERGY_GENERATION: self.pvcoutput.energy_generation, ATTR_POWER_GENERATION: self.pvcoutput.power_generation, ATTR_ENERGY_CONSUMPTION: self.pvcoutput.energy_consumption, ATTR_POWER_CONSUMPTION: self.pvcoutput.power_consumption, ATTR_EFFICIENCY: self.pvcoutput.efficiency, ATTR_TEMPERATURE: self.pvcoutput.temperature, ATTR_VOLTAGE: self.pvcoutput.voltage, } def update(self): """Get the latest data from the PVOutput API and updates the state.""" try: self.rest.update() self.pvcoutput = self.status._make(self.rest.data.split(",")) except TypeError: self.pvcoutput = None _LOGGER.error("Unable to fetch data from PVOutput. %s", self.rest.data)
unknown
codeparrot/codeparrot-clean
import pytest from framework.auth.core import Auth from osf_tests.factories import ConferenceFactory, ProjectFactory, AuthUserFactory from api_tests import utils as api_utils @pytest.mark.django_db class TestMeetingDetail: @pytest.fixture() def meeting(self): return ConferenceFactory( name='OSF 2019', endpoint='osf2019', location='Boulder, CO', poster=True, talk=False, ) @pytest.fixture() def url(self, meeting): return '/_/meetings/{}/?related_counts=submissions'.format(meeting.endpoint) @pytest.fixture() def user(self): return AuthUserFactory() @pytest.fixture() def meeting_submission_one(self, meeting, user): submission = ProjectFactory(title='Submission One', is_public=True) meeting.submissions.add(submission) submission.add_tag('poster', Auth(user)) api_utils.create_test_file(submission, submission.creator, create_guid=False) return submission @pytest.fixture() def private_meeting_submission(self, meeting, user): submission = ProjectFactory(title='Submission One', is_public=False) meeting.submissions.add(submission) submission.add_tag('poster', Auth(user)) return submission def test_meeting_detail(self, app, meeting, url, meeting_submission_one, private_meeting_submission): res = app.get(url) assert res.status_code == 200 data = res.json['data'] assert data['id'] == meeting.endpoint assert data['type'] == 'meetings' assert data['attributes']['name'] == meeting.name assert data['attributes']['type_one_submission_email'] == 'osf2019-poster@osf.io' assert data['attributes']['type_two_submission_email'] == 'osf2019-talk@osf.io' assert data['attributes']['submissions_count'] == 1 assert data['attributes']['location'] == 'Boulder, CO' assert 'start_date' in data['attributes'] assert 'end_date' in data['attributes'] assert data['attributes']['active'] is True assert data['attributes']['is_accepting_type_one'] is True assert data['attributes']['is_accepting_type_two'] is False assert data['attributes']['field_names']['submission1'] == 'poster' assert data['attributes']['field_names']['submission2'] == 'talk' assert '_/meetings/{}/'.format(meeting.endpoint) in data['links']['self'] assert '_/meetings/{}/submissions'.format(meeting.endpoint) in data['relationships']['submissions']['links']['related']['href'] assert data['relationships']['submissions']['links']['related']['meta']['count'] == 1 # Inactive meetings do not serialize submission emails meeting.active = False meeting.save() res = app.get(url) data = res.json['data'] assert data['attributes']['type_one_submission_email'] == '' assert data['attributes']['type_two_submission_email'] == '' assert data['attributes']['active'] is False
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python3 import sys import re def entry_is_device(entry): first_arg_type = entry[1][1:].split(' ')[0] device_types = ['VkDevice', 'VkCommandBuffer', 'VkQueue'] return (first_arg_type in device_types) and (entry[0] != 'vkGetDeviceProcAddr') def main(): pure_entrypoints = [] entrypoints = [] extensions = [] pure_list = ['vkCreateInstance', 'vkEnumerateInstanceExtensionProperties', 'vkEnumerateInstanceLayerProperties'] with open(sys.argv[1], 'r') as f: header = f.readlines() for line in header: m = re.search('typedef \S+.*PFN_([^\)]+)\)(.*);$', line) if m and m.group(1)[-3:] != 'KHR' and m.group(1)[-3:] != 'EXT' and m.group(2) != '(void)': entry = m.group(1) if entry == 'vkGetInstanceProcAddr': continue if entry in pure_list: pure_entrypoints.append((m.group(1), m.group(2))) else: entrypoints.append((m.group(1), m.group(2))) elif m and (m.group(1)[-3:] == 'KHR' or m.group(1)[-3:] == 'EXT') and m.group(2) != '(void)': entry = m.group(1) if 'Android' in entry: continue if 'Xlib' in entry: continue if 'Xcb' in entry: continue if 'Win32' in entry: continue if 'Wayland' in entry: continue if 'Mir' in entry: continue extensions.append((m.group(1), m.group(2))) with open(sys.argv[2], 'w') as f: print(''' /* This header is autogenerated by vulkan_loader_generator.py */ #ifndef VULKAN_SYMBOL_WRAPPER_H #define VULKAN_SYMBOL_WRAPPER_H #define VK_NO_PROTOTYPES #include <vulkan/vulkan.h> #ifdef __cplusplus extern "C" { #endif ''', file = f) for entry in pure_entrypoints: s = entry[0] print('extern PFN_{} vulkan_symbol_wrapper_{};'.format(s, s), file = f) print('#define {} vulkan_symbol_wrapper_{}'.format(s, s), file = f) for entry in entrypoints: s = entry[0] print('extern PFN_{} vulkan_symbol_wrapper_{};'.format(s, s), file = f) print('#define {} vulkan_symbol_wrapper_{}'.format(s, s), file = f) for entry in extensions: s = entry[0] print('extern PFN_{} vulkan_symbol_wrapper_{};'.format(s, s), file = f) print('#define {} vulkan_symbol_wrapper_{}'.format(s, s), file = f) print(''' void vulkan_symbol_wrapper_init(PFN_vkGetInstanceProcAddr get_instance_proc_addr); PFN_vkGetInstanceProcAddr vulkan_symbol_wrapper_instance_proc_addr(void); VkBool32 vulkan_symbol_wrapper_load_global_symbols(void); VkBool32 vulkan_symbol_wrapper_load_core_instance_symbols(VkInstance instance); VkBool32 vulkan_symbol_wrapper_load_core_symbols(VkInstance instance); VkBool32 vulkan_symbol_wrapper_load_core_device_symbols(VkDevice device); VkBool32 vulkan_symbol_wrapper_load_instance_symbol(VkInstance instance, const char *name, PFN_vkVoidFunction *ppSymbol); VkBool32 vulkan_symbol_wrapper_load_device_symbol(VkDevice device, const char *name, PFN_vkVoidFunction *ppSymbol); #define VULKAN_SYMBOL_WRAPPER_LOAD_INSTANCE_SYMBOL(instance, name, pfn) vulkan_symbol_wrapper_load_instance_symbol(instance, name, (PFN_vkVoidFunction*) &(pfn)) #define VULKAN_SYMBOL_WRAPPER_LOAD_INSTANCE_EXTENSION_SYMBOL(instance, name) vulkan_symbol_wrapper_load_instance_symbol(instance, #name, (PFN_vkVoidFunction*) & name) #define VULKAN_SYMBOL_WRAPPER_LOAD_DEVICE_SYMBOL(device, name, pfn) vulkan_symbol_wrapper_load_device_symbol(device, name, (PFN_vkVoidFunction*) &(pfn)) #define VULKAN_SYMBOL_WRAPPER_LOAD_DEVICE_EXTENSION_SYMBOL(device, name) vulkan_symbol_wrapper_load_device_symbol(device, #name, (PFN_vkVoidFunction*) & name) ''', file = f) print(''' #ifdef __cplusplus } #endif #endif ''', file = f) with open(sys.argv[3], 'w') as f: print(''' /* This header is autogenerated by vulkan_loader_generator.py */ #include "vulkan_symbol_wrapper.h" ''', file = f) for entry in pure_entrypoints: s = entry[0] print('PFN_{} vulkan_symbol_wrapper_{};'.format(s, s), file = f) for entry in entrypoints: s = entry[0] print('PFN_{} vulkan_symbol_wrapper_{};'.format(s, s), file = f) for entry in extensions: s = entry[0] print('PFN_{} vulkan_symbol_wrapper_{};'.format(s, s), file = f) print(''' static PFN_vkGetInstanceProcAddr GetInstanceProcAddr; void vulkan_symbol_wrapper_init(PFN_vkGetInstanceProcAddr get_instance_proc_addr) { GetInstanceProcAddr = get_instance_proc_addr; } PFN_vkGetInstanceProcAddr vulkan_symbol_wrapper_instance_proc_addr(void) { return GetInstanceProcAddr; } ''', file = f) print(''' VkBool32 vulkan_symbol_wrapper_load_instance_symbol(VkInstance instance, const char *name, PFN_vkVoidFunction *ppSymbol) { *ppSymbol = GetInstanceProcAddr(instance, name); return *ppSymbol != NULL; }''', file = f) print(''' VkBool32 vulkan_symbol_wrapper_load_device_symbol(VkDevice device, const char *name, PFN_vkVoidFunction *ppSymbol) { *ppSymbol = vkGetDeviceProcAddr(device, name); return *ppSymbol != NULL; }''', file = f) print(''' VkBool32 vulkan_symbol_wrapper_load_global_symbols(void) {''', file = f) for pure in pure_entrypoints: print(' if (!VULKAN_SYMBOL_WRAPPER_LOAD_INSTANCE_SYMBOL(NULL, "{}", {})) return VK_FALSE;'.format(pure[0], pure[0]), file = f) print(' return VK_TRUE;', file = f) print('}', file = f) print(''' VkBool32 vulkan_symbol_wrapper_load_core_symbols(VkInstance instance) {''', file = f) for entry in entrypoints: print(' if (!VULKAN_SYMBOL_WRAPPER_LOAD_INSTANCE_SYMBOL(instance, "{}", {})) return VK_FALSE;'.format(entry[0], entry[0]), file = f) print(' return VK_TRUE;', file = f) print('}', file = f) print(''' VkBool32 vulkan_symbol_wrapper_load_core_instance_symbols(VkInstance instance) {''', file = f) for entry in entrypoints: if not entry_is_device(entry): print(' if (!VULKAN_SYMBOL_WRAPPER_LOAD_INSTANCE_SYMBOL(instance, "{}", {})) return VK_FALSE;'.format(entry[0], entry[0]), file = f) print(' return VK_TRUE;', file = f) print('}', file = f) print(''' VkBool32 vulkan_symbol_wrapper_load_core_device_symbols(VkDevice device) {''', file = f) for entry in entrypoints: if entry_is_device(entry): print(' if (!VULKAN_SYMBOL_WRAPPER_LOAD_DEVICE_SYMBOL(device, "{}", {})) return VK_FALSE;'.format(entry[0], entry[0]), file = f) print(' return VK_TRUE;', file = f) print('}', file = f) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ DataFrame-based machine learning APIs to let users quickly assemble and configure practical machine learning pipelines. """ from pyspark.ml.base import Estimator, Model, Predictor, PredictionModel, \ Transformer, UnaryTransformer from pyspark.ml.pipeline import Pipeline, PipelineModel from pyspark.ml import classification, clustering, evaluation, feature, fpm, \ image, recommendation, regression, stat, tuning, util, linalg, param __all__ = [ "Transformer", "UnaryTransformer", "Estimator", "Model", "Predictor", "PredictionModel", "Pipeline", "PipelineModel", "classification", "clustering", "evaluation", "feature", "fpm", "image", "recommendation", "regression", "stat", "tuning", "util", "linalg", "param", ]
unknown
codeparrot/codeparrot-clean
--- title: $bindable --- Ordinarily, props go one way, from parent to child. This makes it easy to understand how data flows around your app. In Svelte, component props can be _bound_, which means that data can also flow _up_ from child to parent. This isn't something you should do often — overuse can make your data flow unpredictable and your components harder to maintain — but it can simplify your code if used sparingly and carefully. It also means that a state proxy can be _mutated_ in the child. > [!NOTE] Mutation is also possible with normal props, but is strongly discouraged — Svelte will warn you if it detects that a component is mutating state it does not 'own'. To mark a prop as bindable, we use the `$bindable` rune: <!-- prettier-ignore --> ```svelte /// file: FancyInput.svelte <script> let { value = $bindable(), ...props } = $props(); </script> <input bind:value={value} {...props} /> <style> input { font-family: 'Comic Sans MS'; color: deeppink; } </style> ``` Now, a component that uses `<FancyInput>` can add the [`bind:`](bind) directive ([demo](/playground/untitled#H4sIAAAAAAAAE3WQwWrDMBBEf2URBSfg2nfFMZRCoYeecqx6UJx1IyqvhLUONcb_XqSkTUOSk1az7DBvJtEai0HI90nw6FHIJIhckO7i78n7IhzQctS2OuAtvXHESByEFFVoeuO5VqTYdN71DC-amvGV_MDQ9q6DrCjP0skkWymKJxYZOgxBfyKs4SGwZlxke7TWZcuVoqo8-1P1z3lraCcP2g64nk4GM5S1osrXf0JV-lrkgvGbheR-wDm_g30V8JL-1vpOCZFogpQsEsWcemtxscyhKArfOx9gjps0Lq4hzRVfemaYfu-PoIqqwKPFY_XpaIqj4tYRP7a6M3aUkD27zjSw0RTgbZN6Z8WNs66XsEP03tBXUueUJFlelvYx_wCuI3leNwIAAA==)): <!-- prettier-ignore --> ```svelte /// file: App.svelte <script> import FancyInput from './FancyInput.svelte'; let message = $state('hello'); </script> <FancyInput bind:value={message} /> <p>{message}</p> ``` The parent component doesn't _have_ to use `bind:` — it can just pass a normal prop. Some parents don't want to listen to what their children have to say. In this case, you can specify a fallback value for when no prop is passed at all: ```js /// file: FancyInput.svelte let { value = $bindable('fallback'), ...props } = $props(); ```
unknown
github
https://github.com/sveltejs/svelte
documentation/docs/02-runes/06-$bindable.md
from __future__ import unicode_literals from datetime import datetime, timedelta from urllib import quote from django.conf import settings from django.contrib.auth.models import User from django.db import transaction from django.utils import timezone import requests import waffle from remo.base.templatetags.helpers import urlparams from remo.base.utils import get_object_or_none from remo.celery import app from remo.remozilla.models import Bug from remo.remozilla.utils import get_last_updated_date, set_last_updated_date COMPONENTS = ['Budget Requests', 'Mentorship', 'Swag Requests', 'Planning'] BUGZILLA_FIELDS = ['is_confirmed', 'summary', 'creator', 'creation_time', 'component', 'whiteboard', 'op_sys', 'cc', 'id', 'status', 'assigned_to', 'resolution', 'last_change_time', 'flags'] URL = ('https://bugzilla.mozilla.org/rest/bug?api_key={api_key}' '&product=Mozilla%20Reps&component={component}&' 'include_fields={fields}&last_change_time={timestamp}&' 'offset={offset}&limit={limit}') COMMENT_URL = 'https://bugzilla.mozilla.org/rest/bug/{id}/comment?api_key={api_key}' LIMIT = 100 BUG_WHITEBOARD = 'Review Team approval needed' BUG_REVIEW = 'remo-review' BUG_APPROVAL = 'remo-approval' def parse_bugzilla_time(time): if not time: return None datetimeobj = datetime.strptime(time, '%Y-%m-%dT%H:%M:%SZ') datetimeobj = timezone.make_aware(datetimeobj, timezone.utc) return datetimeobj @app.task @transaction.atomic def fetch_bugs(components=COMPONENTS, days=None): """Fetch all bugs from Bugzilla. Loop over components and fetch bugs updated the last days. Link Bugzilla users with users on this website, when possible. # TODO: This can trigger a does not exist error because the task was picked # by the worker before the transaction was complete. Needs fixing after the # upgrade to a Django version > 1.8 """ now = timezone.now() if not days: changed_date = get_last_updated_date() else: changed_date = now - timedelta(int(days)) for component in components: offset = 0 url = URL.format(api_key=settings.REMOZILLA_API_KEY, component=quote(component), fields=','.join(BUGZILLA_FIELDS), timestamp=changed_date, offset=offset, limit=LIMIT) while True: bugs = requests.get(url).json() error = bugs.get('error') # Check the server response for errors if error: raise ValueError('Invalid response from server, {0}.'.format(bugs['message'])) remo_bugs = bugs.get('bugs', []) if not remo_bugs: break for bdata in remo_bugs: # Get comments for current bug comment_url = COMMENT_URL.format(id=bdata['id'], api_key=settings.REMOZILLA_API_KEY) comments = requests.get(comment_url).json() error = comments.get('error') if error: raise ValueError('Invalid response from server, {0}.' .format(comments['message'])) bug, created = Bug.objects.get_or_create(bug_id=bdata['id']) bug.summary = bdata.get('summary', '') creator_email = bdata['creator'] bug.creator = get_object_or_none(User, email=creator_email) bug.bug_creation_time = parse_bugzilla_time(bdata['creation_time']) bug.component = bdata['component'] bug.whiteboard = bdata.get('whiteboard', '') bug.cc.clear() for email in bdata.get('cc', []): cc_user = get_object_or_none(User, email=email) if cc_user: bug.cc.add(cc_user) bug.assigned_to = get_object_or_none( User, email=bdata['assigned_to']) bug.status = bdata['status'] bug.resolution = bdata.get('resolution', '') bug.bug_last_change_time = parse_bugzilla_time(bdata.get('last_change_time')) automated_voting_trigger = 0 bug.budget_needinfo.clear() bug.council_member_assigned = False bug.pending_mentor_validation = False for flag in bdata.get('flags', []): if flag['status'] == '?' and flag['name'] == BUG_APPROVAL: automated_voting_trigger += 1 if BUG_WHITEBOARD in bug.whiteboard: bug.council_member_assigned = True if ((flag['status'] == '?' and flag['name'] == 'needinfo' and 'requestee' in flag and flag['requestee'] == (settings.REPS_REVIEW_ALIAS))): automated_voting_trigger += 1 if flag['status'] == '?' and flag['name'] == BUG_REVIEW: bug.pending_mentor_validation = True if (flag['status'] == '?' and flag['name'] == 'needinfo' and 'requestee' in flag): email = flag['requestee'] user = get_object_or_none(User, email=email) if user: bug.budget_needinfo.add(user) if automated_voting_trigger == 2 and waffle.switch_is_active('automated_polls'): bug.council_vote_requested = True unicode_id = str(bdata['id']) bug_comments = comments['bugs'][unicode_id]['comments'] if bug_comments and bug_comments[0].get('text', ''): # Enforce unicode encoding. bug.first_comment = bug_comments[0]['text'] bug.save() offset += LIMIT url = urlparams(url, offset=offset) set_last_updated_date(now)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python -u # Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import time from os import listdir, unlink from os.path import join as path_join from unittest import main from uuid import uuid4 from swiftclient import client from swift.common import direct_client from swift.common.exceptions import ClientException from swift.common.utils import hash_path, readconf from swift.obj.diskfile import write_metadata, read_metadata, get_data_dir from test.probe.common import ReplProbeTest RETRIES = 5 def get_data_file_path(obj_dir): files = [] # We might need to try a few times if a request hasn't yet settled. For # instance, a PUT can return success when just 2 of 3 nodes has completed. for attempt in xrange(RETRIES + 1): try: files = sorted(listdir(obj_dir), reverse=True) break except Exception: if attempt < RETRIES: time.sleep(1) else: raise for filename in files: return path_join(obj_dir, filename) class TestObjectFailures(ReplProbeTest): def _setup_data_file(self, container, obj, data): client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': self.policy.name}) client.put_object(self.url, self.token, container, obj, data) odata = client.get_object(self.url, self.token, container, obj)[-1] self.assertEquals(odata, data) opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] node_id = (onode['port'] - 6000) / 10 device = onode['device'] hash_str = hash_path(self.account, container, obj) obj_server_conf = readconf(self.configs['object-server'][node_id]) devices = obj_server_conf['app:object-server']['devices'] obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device, get_data_dir(self.policy), opart, hash_str[-3:], hash_str) data_file = get_data_file_path(obj_dir) return onode, opart, data_file def run_quarantine(self): container = 'container-%s' % uuid4() obj = 'object-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'VERIFY') metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] self.assertEquals(odata, 'VERIFY') try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEquals(err.http_status, 404) def run_quarantine_range_etag(self): container = 'container-range-%s' % uuid4() obj = 'object-range-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'RANGE') metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx} for header, result in [({'Range': 'bytes=0-2'}, 'RAN'), ({'Range': 'bytes=1-11'}, 'ANGE'), ({'Range': 'bytes=0-11'}, 'RANGE')]: req_headers = base_headers.copy() req_headers.update(header) odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers=req_headers)[-1] self.assertEquals(odata, result) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEquals(err.http_status, 404) def run_quarantine_zero_byte_get(self): container = 'container-zbyte-%s' % uuid4() obj = 'object-zbyte-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'DATA') metadata = read_metadata(data_file) unlink(data_file) with open(data_file, 'w') as fpointer: write_metadata(fpointer, metadata) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, conn_timeout=1, response_timeout=1, headers={'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEquals(err.http_status, 404) def run_quarantine_zero_byte_head(self): container = 'container-zbyte-%s' % uuid4() obj = 'object-zbyte-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'DATA') metadata = read_metadata(data_file) unlink(data_file) with open(data_file, 'w') as fpointer: write_metadata(fpointer, metadata) try: direct_client.direct_head_object( onode, opart, self.account, container, obj, conn_timeout=1, response_timeout=1, headers={'X-Backend-Storage-Policy-Index': self.policy.idx}) raise Exception("Did not quarantine object") except ClientException as err: self.assertEquals(err.http_status, 404) def run_quarantine_zero_byte_post(self): container = 'container-zbyte-%s' % uuid4() obj = 'object-zbyte-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'DATA') metadata = read_metadata(data_file) unlink(data_file) with open(data_file, 'w') as fpointer: write_metadata(fpointer, metadata) try: headers = {'X-Object-Meta-1': 'One', 'X-Object-Meta-Two': 'Two', 'X-Backend-Storage-Policy-Index': self.policy.idx} direct_client.direct_post_object( onode, opart, self.account, container, obj, headers=headers, conn_timeout=1, response_timeout=1) raise Exception("Did not quarantine object") except ClientException as err: self.assertEquals(err.http_status, 404) def test_runner(self): self.run_quarantine() self.run_quarantine_range_etag() self.run_quarantine_zero_byte_get() self.run_quarantine_zero_byte_head() self.run_quarantine_zero_byte_post() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
/* * Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package io.ktor.client.webrtc.rs import io.ktor.client.webrtc.* import io.ktor.client.webrtc.rs.utils.* import io.ktor.utils.io.ExperimentalKtorApi import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Job import kotlinx.coroutines.channels.Channel import kotlinx.coroutines.delay import kotlinx.coroutines.withTimeout import kotlin.test.* import kotlin.time.Duration.Companion.milliseconds @OptIn(ExperimentalKtorApi::class) class WebRtcDataChannelTest { private lateinit var client: WebRtcClient @BeforeTest fun setup() { client = WebRtcClient(RustWebRtc) { mediaTrackFactory = MockMediaDevices() } } @AfterTest fun cleanup() { client.close() } private inline fun testDataChannel( realtime: Boolean = true, crossinline block: suspend CoroutineScope.(WebRtcPeerConnection, WebRtcPeerConnection, MutableList<Job>) -> Unit ) { runTestWithTimeout(realtime) { jobs -> client.createPeerConnection().use { pc1 -> client.createPeerConnection().use { pc2 -> block(pc1, pc2, jobs) } } } } private suspend fun waitForChannel(events: Channel<DataChannelEvent>) = withTimeout(5000) { val event = events.receive() assertTrue(event is DataChannelEvent.Open, "Expected DataChannelEvent.Open, got $event") assertEquals(WebRtc.DataChannel.State.OPEN, event.channel.state) event.channel } private suspend fun WebRtcDataChannel.waitForClose(events: Channel<DataChannelEvent>) { val closeEvent = withTimeout(2000) { while (true) { val event = events.receive() if (event is DataChannelEvent.Closed) { return@withTimeout event } } @Suppress("UNREACHABLE_CODE") error("Expected DataChannelEvent.Closed") } assertEquals(this, closeEvent.channel) assertEquals(WebRtc.DataChannel.State.CLOSED, state) } @Test fun testDataChannelCommunication() = testDataChannel { pc1, pc2, jobs -> val dataChannelEvents = pc2.dataChannelEvents.collectToChannel(this, jobs) // Create data channel on pc1 val dataChannel1 = pc1.createDataChannel("test-channel") { protocol = "test-protocol" } assertEquals(WebRtc.DataChannel.State.CONNECTING, dataChannel1.state) negotiate(pc1, pc2) val dataChannel2 = waitForChannel(dataChannelEvents) // Verify data channel properties assertTrue(dataChannel1.ordered) assertTrue(dataChannel2.ordered) assertEquals("test-channel", dataChannel1.label) assertEquals("test-channel", dataChannel2.label) assertEquals("test-protocol", dataChannel1.protocol) assertEquals("test-protocol", dataChannel2.protocol) assertEquals(WebRtc.DataChannel.State.OPEN, dataChannel1.state) assertEquals(WebRtc.DataChannel.State.OPEN, dataChannel2.state) assertEquals(null, dataChannel2.tryReceive()) assertEquals(null, dataChannel1.tryReceiveText()) assertEquals(null, dataChannel2.tryReceiveBinary()) // Test text message communication val testMessage = "Hello from pc1!" dataChannel1.send(testMessage) val receivedMessage = withTimeout(5000) { val message = dataChannel2.receive() assertTrue(message is WebRtc.DataChannel.Message.Text, "Expected string message") message.data } assertEquals(testMessage, receivedMessage) // Test binary message communication val testBinaryData = byteArrayOf(1, 2, 3, 4, 5) dataChannel2.send(testBinaryData) val receivedBinaryMessage = withTimeout(5000) { val message = dataChannel1.receive() assertTrue(message is WebRtc.DataChannel.Message.Binary) message.data } assertContentEquals(testBinaryData, receivedBinaryMessage) // Test bidirectional communication dataChannel1.send("Message from pc1") dataChannel2.send("Message from pc2") val msg1 = withTimeout(2000) { dataChannel2.receiveText() } val msg2 = withTimeout(2000) { dataChannel1.receiveText() } assertEquals("Message from pc1", msg1) assertEquals("Message from pc2", msg2) // Test channel closing dataChannel1.close() dataChannel2.waitForClose(dataChannelEvents) } @Test fun testDataChannelOptions() = testDataChannel { pc1, pc2, _ -> val dataChannel = pc1.createDataChannel(label = "options-test") { id = 42 ordered = false maxRetransmits = 3 negotiated = false protocol = "custom-protocol" } assertEquals(null, dataChannel.id, "Expected id to be null before negotiation") assertEquals("options-test", dataChannel.label) assertEquals("custom-protocol", dataChannel.protocol) assertFalse(dataChannel.ordered) assertEquals(3, dataChannel.maxRetransmits) assertFalse(dataChannel.negotiated) negotiate(pc1, pc2) withTimeout(5000) { // id is set asynchronously after negotiation while (dataChannel.id == null) { delay(duration = 10.milliseconds) } assertTrue(dataChannel.id!! >= 0, "Expected id to be non-negative after negotiation") } val dataChannel2 = pc1.createDataChannel(label = "options-test2") { id = 42 negotiated = true } // negotiated id is set immediately assertEquals(42, dataChannel2.id) assertFails { // maxRetransmits and maxPacketLifeTime can't be specified at the same time pc1.createDataChannel(label = "options-test") { maxRetransmits = 2 maxPacketLifeTime = 1000.milliseconds } } } @Test fun testDataChannelSendManyMessages() = testDataChannel { pc1, pc2, jobs -> val dataChannelEvents = pc2.dataChannelEvents.collectToChannel(this, jobs) val dataChannel1 = pc1.createDataChannel("multi-message-test") negotiate(pc1, pc2) val dataChannel2 = waitForChannel(dataChannelEvents) // Send multiple messages rapidly val messageCount = 1000 repeat(messageCount) { i -> dataChannel1.send("Message $i") } // Receive all messages withTimeout(10_000) { repeat(messageCount) { i -> assertEquals("Message $i", dataChannel2.receiveText()) } } } @Test fun testDataChannelCloseHandling() = testDataChannel { pc1, pc2, jobs -> val dataChannel1 = pc1.createDataChannel("close-test") val dataChannelEvents1 = pc1.dataChannelEvents.collectToChannel(this, jobs) val dataChannelEvents2 = pc2.dataChannelEvents.collectToChannel(this, jobs) negotiate(pc1, pc2) val dataChannel2 = waitForChannel(dataChannelEvents2) // Test sending on a closed channel dataChannel1.closeTransport() dataChannel1.waitForClose(dataChannelEvents1) dataChannel2.waitForClose(dataChannelEvents2) assertFails { dataChannel1.send("Hello") } assertFails { dataChannel2.send("Hello") } assertFails { dataChannel1.receive() } assertEquals(null, dataChannel2.tryReceive()) } @Test fun testDataChannelBufferedAmountLowEvent() = testDataChannel { pc1, pc2, jobs -> val dataChannelEvents1 = pc1.dataChannelEvents.collectToChannel(this, jobs) val dataChannelEvents2 = pc2.dataChannelEvents.collectToChannel(this, jobs) // Create data channel on pc1 val dataChannel1 = pc1.createDataChannel("buffered-amount-test") negotiate(pc1, pc2) val dataChannel2 = waitForChannel(dataChannelEvents2) val threshold = 1000L dataChannel1.setBufferedAmountLowThreshold(threshold) val largeData = List(1111) { it.toByte() }.toByteArray() dataChannel1.send(largeData) // Now wait for the BufferedAmountLow event val bufferedAmountLowEvent = withTimeout(5000) { while (true) { val event = dataChannelEvents1.receive() if (event is DataChannelEvent.BufferedAmountLow) { // assert there was only one event fired assertTrue(dataChannelEvents1.tryReceive().isFailure) return@withTimeout event } } @Suppress("UNREACHABLE_CODE") error("Expected DataChannelEvent.BufferedAmountLow") } // Verify the event assertEquals(dataChannel1, bufferedAmountLowEvent.channel) assertTrue( dataChannel1.bufferedAmount <= threshold, "Buffered amount should be below threshold when event is fired" ) // Clean up dataChannel1.close() dataChannel2.waitForClose(dataChannelEvents2) } }
kotlin
github
https://github.com/ktorio/ktor
ktor-client/ktor-client-webrtc/ktor-client-webrtc-rs/common/test/io/ktor/client/webrtc/rs/WebRtcDataChannelTest.kt
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_fastspeech2_conformer import * from .modeling_fastspeech2_conformer import * from .tokenization_fastspeech2_conformer import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
python
github
https://github.com/huggingface/transformers
src/transformers/models/fastspeech2_conformer/__init__.py
// Copyright 2018 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package mockserver provides mock implementations for etcdserver's server interface. package mockserver
go
github
https://github.com/etcd-io/etcd
client/v3/mock/mockserver/doc.go
use super::prelude::*; pub(crate) struct MustUseParser; impl<S: Stage> SingleAttributeParser<S> for MustUseParser { const PATH: &[Symbol] = &[sym::must_use]; const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost; const ON_DUPLICATE: OnDuplicate<S> = OnDuplicate::WarnButFutureError; const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowListWarnRest(&[ Allow(Target::Fn), Allow(Target::Enum), Allow(Target::Struct), Allow(Target::Union), Allow(Target::Method(MethodKind::Trait { body: false })), Allow(Target::Method(MethodKind::Trait { body: true })), Allow(Target::Method(MethodKind::Inherent)), Allow(Target::ForeignFn), // `impl Trait` in return position can trip // `unused_must_use` if `Trait` is marked as // `#[must_use]` Allow(Target::Trait), Error(Target::WherePredicate), ]); const TEMPLATE: AttributeTemplate = template!( Word, NameValueStr: "reason", "https://doc.rust-lang.org/reference/attributes/diagnostics.html#the-must_use-attribute" ); fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option<AttributeKind> { Some(AttributeKind::MustUse { span: cx.attr_span, reason: match args { ArgParser::NoArgs => None, ArgParser::NameValue(name_value) => { let Some(value_str) = name_value.value_as_str() else { cx.expected_string_literal( name_value.value_span, Some(&name_value.value_as_lit()), ); return None; }; Some(value_str) } ArgParser::List(list) => { cx.expected_nv_or_no_args(list.span); return None; } }, }) } }
rust
github
https://github.com/rust-lang/rust
compiler/rustc_attr_parsing/src/attributes/must_use.rs
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: anze@reciprocitylabs.com # Maintained By: anze@reciprocitylabs.com """ObjectOwner permissions changes. Revision ID: 169eef85896d Revises: 33a9ca4c32ac Create Date: 2013-10-24 02:58:42.263799 """ # revision identifiers, used by Alembic. revision = '169eef85896d' down_revision = '33a9ca4c32ac' import json import sqlalchemy as sa from alembic import op from datetime import datetime from sqlalchemy.sql import table, column roles_table = table('roles', column('id', sa.Integer), column('name', sa.String), column('permissions_json', sa.Text), column('description', sa.Text), column('modified_by_id', sa.Integer), column('created_at', sa.DateTime), column('updated_at', sa.DateTime), column('context_id', sa.Integer), ) def upgrade(): basic_objects_editable = [ 'Categorization', 'Category', 'Control', 'ControlControl', 'ControlSection', 'Cycle', 'DataAsset', 'Directive', 'Contract', 'Policy', 'Regulation', 'DirectiveControl', 'Document', 'Facility', 'Help', 'Market', 'Objective', 'ObjectiveControl', 'ObjectControl', 'ObjectDocument', 'ObjectObjective', 'ObjectPerson', 'ObjectSection', 'Option', 'OrgGroup', 'PopulationSample', 'Product', 'ProgramControl', 'ProgramDirective', 'Project', 'Relationship', 'RelationshipType', 'Section', 'SystemOrProcess', 'System', 'Process', 'SystemControl', 'SystemSystem', ] basic_objects_readable = list(basic_objects_editable) basic_objects_readable.extend([ 'Person', 'Program', 'Role', 'ObjectOwner', #'UserRole', ?? why? ]) basic_objects_creatable = list(basic_objects_editable) basic_objects_creatable.extend([ 'Person', ]) basic_objects_updateable = list(basic_objects_editable) basic_objects_updateable.extend([ 'Person', ]) basic_objects_deletable = list(basic_objects_editable) op.execute(roles_table.update()\ .where(roles_table.c.name == 'Reader')\ .values(permissions_json=json.dumps({ 'read': basic_objects_readable, }))) op.execute(roles_table.update()\ .where(roles_table.c.name == 'ObjectEditor')\ .values(permissions_json=json.dumps({ 'create': basic_objects_creatable, 'read': basic_objects_readable, 'update': basic_objects_updateable, 'delete': basic_objects_deletable, }))) def downgrade(): # No reason to downgrade this one pass
unknown
codeparrot/codeparrot-clean
/** * The default argument placeholder value for methods. * * @type {Object} */ module.exports = {};
javascript
github
https://github.com/lodash/lodash
fp/placeholder.js
# -*- coding:utf-8 -*- """ tests.backend ~~~~~~~~~~~~~ :copyright: (c) 2015 by Jason Lai. :license: BSD, see LICENSE for more details. """ from faker import Factory from functools import partial fake = Factory.create() def test_redis_backend_basic(rb, fake_manager, fake_coll): fake_manager.collmap = {'t1': fake_coll, 't2': fake_coll} for name, coll in fake_manager.collmap.items(): rb.set_collection_index(name, coll) for name, coll in fake_manager.collmap.items(): pair = rb.get_collection_index(name) assert pair == [name, fake_coll.__class__.__name__] # ---------------------- check get all indexes ---------------------- rv = rb.get_collection_indexes() matching = {'t1': '_t', 't2': '_t'} assert rv == matching # if name not exist get_collection_index should return None pair = rb.get_collection_index('not-exists') assert pair is None def test_redis_backend_metadata(rb, fake_coll): taggings = [fake.domain_name() for i in range(10)] ts_pairs = [(exp, exp-100) for exp in range(200, 300, 10)] first_ts, mid_ts, last_ts = ts_pairs[0][1], ts_pairs[4][1], ts_pairs[-1][1] args = ['hello', 'world', 42] # ---------------- check metadata set and query operation ---------------- for i, pair in enumerate(ts_pairs, 1): exp, ts = pair for t in taggings: rb.set_collection_metadata(fake_coll, t, exp, ts, *args) assert rb.get_collection_length(fake_coll) == [i] rv = rb.query_collection_metadata(fake_coll, t, 0, 1000) assert len(rv) == i assert rv[i-1] == ([exp] + args, ts) rv = rb.query_collection_metadata_tagging(fake_coll, 0, 1000) assert len(rv) == i assert len(rv[ts]) == len(taggings) rv = rb.query_collection_metadata_all(fake_coll, 0, 1000) assert len(rv) == i assert len(rv[ts]) == len(taggings) for info in rv[ts].values(): assert info == [exp] + args # ------------------- check metadata delete operations ------------------- # delete one tagging info in first ts rb.del_collection_metadata_by_range(fake_coll, taggings[0], first_ts, first_ts) rv = rb.query_collection_metadata(fake_coll, t, 0, 1000) assert len(rv) == len(ts_pairs) rv = rb.query_collection_metadata_tagging(fake_coll, 0, 1000) assert len(rv) == len(ts_pairs) assert len(rv[first_ts]) == len(taggings) - 1 assert len(rv[last_ts]) == len(rv[mid_ts]) == len(taggings) assert rb.get_collection_length(fake_coll) == [len(taggings)] # delete all the taggings in first ts for t in taggings[1:]: rb.del_collection_metadata_by_range(fake_coll, t, first_ts, first_ts) rv = rb.query_collection_metadata(fake_coll, t, 0, 1000) assert len(rv) == len(ts_pairs) - 1 rv = rb.query_collection_metadata_tagging(fake_coll, 0, 1000) assert len(rv) == len(ts_pairs) - 1 assert first_ts not in rv assert len(rv[last_ts]) == len(rv[mid_ts]) == len(taggings) assert rb.get_collection_length(fake_coll) == [len(taggings) - 1] # delete all taggings info in last five ts for exp, ts in ts_pairs[-5:]: for t in taggings: rb.del_collection_metadata_by_range(fake_coll, t, ts, ts) rv = rb.query_collection_metadata(fake_coll, t, 0, 1000) assert len(rv) == len(ts_pairs) - 6 rv = rb.query_collection_metadata_tagging(fake_coll, 0, 1000) assert len(rv) == len(ts_pairs) - 6 assert first_ts not in rv and last_ts not in rv assert len(rv[mid_ts]) == len(taggings) assert rb.get_collection_length(fake_coll) == [len(taggings) - 6] # ------------------ check no metadata exists situations ------------------ # delete a not exists ts rb.del_collection_metadata_by_range(fake_coll, taggings[4], 9999, 9999) # delete a not exists tagging in mid_ts rb.del_collection_metadata_by_range(fake_coll, taggings[4], mid_ts, mid_ts) rb.del_collection_metadata_by_range(fake_coll, taggings[4], mid_ts, mid_ts) # query a unexists ts assert rb.query_collection_metadata(fake_coll, mid_ts, 9999, 9999) is None assert rb.query_collection_metadata_tagging(fake_coll, 9999, 9999) is None assert rb.query_collection_metadata_all(fake_coll, 9999, 9999) is None def _add_inc_coll_item(rb, coll, tagging, ts, value): rb.set_collection_metadata(coll, tagging, ts+100, ts) rb.inc_coll_cache_set(coll, _mk_inc_coll_field(tagging, ts), value) def _mk_inc_coll_field(tagging, ts): field_key = '{}:{}'.format(ts, tagging) return field_key def _assert_inc_coll_cache_size(rb, coll, cache_len, md_len): _md_len, _cache_len = rb.get_collection_length(coll, klass="IncreaseCollection") assert _md_len == md_len assert _cache_len == cache_len def test_redis_backend_inc_coll(rb, fake_coll): tagging, other_tagging = 'day', 'for_diff' v = {i: i for i in range(20)} timestamps = [100, 110, 120, 130, 140] assert_cache_size = partial(_assert_inc_coll_cache_size, rb, fake_coll) # ---------------- check the operation of item adding ---------------- for ts in timestamps: _add_inc_coll_item(rb, fake_coll, tagging, ts, v) # double adding for checking the logic of duplacate handle for ts in timestamps: _add_inc_coll_item(rb, fake_coll, tagging, ts, v) # adding the other_tagging for the cache size check below for ts in timestamps: _add_inc_coll_item(rb, fake_coll, other_tagging, ts, v) print('Success Adding datas...\n\n\n') assert_cache_size(10, 5) # ------------------ check the cache data get operations ------------------ fields = [_mk_inc_coll_field(tagging, ts) for ts in timestamps] rv = rb.inc_coll_caches_get(fake_coll, *fields) for r in rv: assert r == v rb.inc_coll_caches_del(fake_coll, *fields) rv = rb.inc_coll_caches_get(fake_coll, *fields) for r in rv: assert r is None assert_cache_size(5, 5) # if no fields specified assert rb.inc_coll_caches_get(fake_coll) == [] # ---------------- check for the inc_coll_keys_delete ---------------- assert_cache_size(5, 5) rb.delete_collection_keys(fake_coll, klass="IncreaseCollection") assert_cache_size(0, 0) def test_redis_backend_unique_count_coll(rb, fake_coll): items_num = 200 tagging = 'day' v = {fake.uuid4() for i in range(items_num)} timestamps = [100, 200, 300] # ----------- check the operation of item adding and getting ---------- for ts in timestamps: rv = rb.uniq_count_coll_cache_set(fake_coll, ts, tagging, v) assert rv == items_num rv = rb.uniq_count_coll_cache_set(fake_coll, ts, tagging, v) assert rv == 0 rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps) for item in rv: assert item == v assert len(item) == items_num rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps, count_only=True) for count in rv: assert count == items_num # ---------------- check for the operation of deleting ---------------- rv = rb.uniq_count_coll_cache_del(fake_coll, tagging, timestamps[0:1]) assert rv == 1 rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps[0:1]) assert rv == [set()] rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps[1:]) for item in rv: assert item == v assert len(item) == items_num # uniq_count_coll_cache_pop 50 items rv = rb.uniq_count_coll_cache_pop(fake_coll, tagging, timestamps[1:], 50) for item in rv: assert len(item) == 50 rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps[1:]) for item in rv: assert len(item) == items_num - 50 # delete remain items rv = rb.uniq_count_coll_cache_del(fake_coll, tagging, timestamps[1:]) assert rv == 2 rv = rb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps) assert rv == [set(), set(), set()] def test_redis_backend_sorted_count_coll(rb, fake_coll): tagging = 'day' v = {fake.uuid4(): i for i in range(200)} v2 = [(member, score) for member, score in v.items()] v2 = sorted(v2, key=lambda x: x[1]) timestamps = [100, 200, 300] # ----------- check the operation of item adding and getting ---------- for ts in timestamps: rv = rb.sorted_count_coll_cache_set(fake_coll, ts, tagging, v) assert rv == 200 rv = rb.sorted_count_coll_cache_get(fake_coll, tagging, timestamps) for item in rv: assert item == v2 rv = rb.sorted_count_coll_cache_get(fake_coll, tagging, timestamps, topN=100) for item in rv: assert item == v2[100:] # ---------------- check for the operation of deleting ---------------- rv = rb.sorted_count_coll_cache_del(fake_coll, tagging, timestamps[0:1]) assert rv == 1 rv = rb.sorted_count_coll_cache_get(fake_coll, tagging, timestamps[0:1]) assert rv == [[]] rv = rb.sorted_count_coll_cache_get(fake_coll, tagging, timestamps[1:]) for item in rv: assert item == v2 rv = rb.sorted_count_coll_cache_del(fake_coll, tagging, timestamps[1:]) assert rv == 2 rv = rb.sorted_count_coll_cache_get(fake_coll, tagging, timestamps) assert rv == [[], [], []]
unknown
codeparrot/codeparrot-clean
# -*- coding: utf8 -*- from __future__ import print_function import os import json import logging import time import slacker from websocket import ( create_connection, WebSocketException, WebSocketConnectionClosedException ) from slackbot.utils import to_utf8 logger = logging.getLogger(__name__) class SlackClient(object): def __init__(self, token, bot_icon=None, bot_emoji=None, connect=True): self.token = token self.bot_icon = bot_icon self.bot_emoji = bot_emoji self.username = None self.domain = None self.login_data = None self.websocket = None self.users = {} self.channels = {} self.connected = False self.webapi = slacker.Slacker(self.token) if connect: self.rtm_connect() def rtm_connect(self): reply = self.webapi.rtm.start().body self.parse_slack_login_data(reply) def reconnect(self): while True: try: self.rtm_connect() logger.warning('reconnected to slack rtm websocket') return except: logger.exception('failed to reconnect') time.sleep(1) def parse_slack_login_data(self, login_data): self.login_data = login_data self.domain = self.login_data['team']['domain'] self.username = self.login_data['self']['name'] self.users = dict((u['id'], u) for u in login_data['users']) self.parse_channel_data(login_data['channels']) self.parse_channel_data(login_data['groups']) self.parse_channel_data(login_data['ims']) try: self.websocket = create_connection(self.login_data['url']) self.websocket.sock.setblocking(0) except: raise SlackConnectionError def parse_channel_data(self, channel_data): self.channels.update({c['id']: c for c in channel_data}) def send_to_websocket(self, data): """Send (data) directly to the websocket.""" data = json.dumps(data) self.websocket.send(data) def ping(self): return self.send_to_websocket({'type': 'ping'}) def websocket_safe_read(self): """Returns data if available, otherwise ''. Newlines indicate multiple messages """ data = '' while True: try: data += '{0}\n'.format(self.websocket.recv()) except WebSocketException, e: if isinstance(e, WebSocketConnectionClosedException): logger.warning('lost websocket connection, try to reconnect now') else: logger.warning('websocket exception: %s', e) self.reconnect() except: return data.rstrip() def rtm_read(self): json_data = self.websocket_safe_read() data = [] if json_data != '': for d in json_data.split('\n'): data.append(json.loads(d)) return data def rtm_send_message(self, channel, message, attachments=None): message_json = { 'type': 'message', 'channel': channel, 'text': message, 'attachments': attachments } self.send_to_websocket(message_json) def upload_file(self, channel, fname, fpath, comment): fname = fname or to_utf8(os.path.basename(fpath)) self.webapi.files.upload(fpath, channels=channel, filename=fname, initial_comment=comment) def send_message(self, channel, message, attachments=None): self.webapi.chat.post_message( channel, message, username=self.login_data['self']['name'], icon_url=self.bot_icon, icon_emoji=self.bot_emoji, attachments=attachments) def get_channel(self, channel_id): return Channel(self, self.channels[channel_id]) def find_user_by_name(self, username): for userid, user in self.users.iteritems(): if user['name'] == username: return userid class SlackConnectionError(Exception): pass class Channel(object): def __init__(self, slackclient, body): self._body = body self._client = slackclient def upload_file(self, fname, fpath, initial_comment=''): self._client.upload_file( self._body['id'], to_utf8(fname), to_utf8(fpath), to_utf8(initial_comment) )
unknown
codeparrot/codeparrot-clean
''' ''' # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os Test.Summary = ''' Test experimental/multiplexer. ''' # need Curl Test.SkipUnless( Condition.PluginExists('multiplexer.so') ) Test.ContinueOnFail = False # Define default ATS ts = Test.MakeATSProcess("ts") server = Test.MakeOriginServer("server") request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header, response_header) ts.Disk.records_config.update({ 'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'multiplexer', }) ts.Disk.remap_config.AddLine( 'map http://www.example.com http://127.0.0.1:{0} @plugin=multiplexer.so'.format(server.Variables.Port) ) # For now, just make sure the plugin loads without error. tr = Test.AddTestRun() tr.Processes.Default.Command = 'curl --silent --proxy 127.0.0.1:{0} "http://www.example.com" -H "Proxy-Connection: close"'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) ts.Streams.stderr = "gold/multiplexer.gold" tr.StillRunningAfter = ts
unknown
codeparrot/codeparrot-clean
# Copyright David Abrahams 2004. Distributed under the Boost # Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) r'''>>> import pickle4_ext >>> import pickle >>> def world_getinitargs(self): ... return (self.get_country(),) >>> pickle4_ext.world.__getinitargs__ = world_getinitargs >>> pickle4_ext.world.__module__ 'pickle4_ext' >>> pickle4_ext.world.__safe_for_unpickling__ 1 >>> pickle4_ext.world.__name__ 'world' >>> pickle4_ext.world('Hello').__reduce__() (<class 'pickle4_ext.world'>, ('Hello',)) >>> wd = pickle4_ext.world('California') >>> pstr = pickle.dumps(wd) >>> wl = pickle.loads(pstr) >>> print wd.greet() Hello from California! >>> print wl.greet() Hello from California! ''' def run(args = None): import sys import doctest if args is not None: sys.argv = args return doctest.testmod(sys.modules.get(__name__)) if __name__ == '__main__': print "running..." import sys status = run()[0] if (status == 0): print "Done." sys.exit(status)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com> # This file is released under the GNU GPL, version 3 or a later revision. # For further details see the COPYING file from datetime import timedelta from datetime import datetime from collections import deque import subprocess import shlex import email import os import re from email.mime.audio import MIMEAudio from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email.mime.text import MIMEText import urwid import magic from twisted.internet import reactor from twisted.internet.protocol import ProcessProtocol from twisted.internet.defer import Deferred import StringIO import logging def split_commandline(s, comments=False, posix=True): """ splits semi-colon separated commandlines """ # shlex seems to remove unescaped quotes s = s.replace('\'', '\\\'') # encode s to utf-8 for shlex if isinstance(s, unicode): s = s.encode('utf-8') lex = shlex.shlex(s, posix=posix) lex.whitespace_split = True lex.whitespace = ';' if not comments: lex.commenters = '' return list(lex) def split_commandstring(cmdstring): """ split command string into a list of strings to pass on to subprocess.Popen and the like. This simply calls shlex.split but works also with unicode bytestrings. """ if isinstance(cmdstring, unicode): cmdstring = cmdstring.encode('utf-8', errors='ignore') return shlex.split(cmdstring) def safely_get(clb, E, on_error=''): """ returns result of :func:`clb` and falls back to `on_error` in case exception `E` is raised. :param clb: function to evaluate :type clb: callable :param E: exception to catch :type E: Exception :param on_error: default string returned when exception is caught :type on_error: str """ try: return clb() except E: return on_error def string_sanitize(string, tab_width=8): r""" strips, and replaces non-printable characters :param tab_width: number of spaces to replace tabs with. Read from `globals.tabwidth` setting if `None` :type tab_width: int or `None` >>> string_sanitize(' foo\rbar ', 8) 'foobar' >>> string_sanitize('foo\tbar', 8) 'foo bar' >>> string_sanitize('foo\t\tbar', 8) 'foo bar' """ string = string.strip() string = string.replace('\r', '') lines = list() for line in string.split('\n'): tab_count = line.count('\t') if tab_count > 0: line_length = 0 new_line = list() for i, chunk in enumerate(line.split('\t')): line_length += len(chunk) new_line.append(chunk) if i < tab_count: next_tab_stop_in = tab_width - (line_length % tab_width) new_line.append(' ' * next_tab_stop_in) line_length += next_tab_stop_in lines.append(''.join(new_line)) else: lines.append(line) return '\n'.join(lines) def string_decode(string, enc='ascii'): """ safely decodes string to unicode bytestring, respecting `enc` as a hint. """ if enc is None: enc = 'ascii' try: string = unicode(string, enc, errors='replace') except LookupError: # malformed enc string string = string.decode('ascii', errors='replace') except TypeError: # already unicode pass return string def shorten(string, maxlen): """shortens string if longer than maxlen, appending ellipsis""" if maxlen > 1 and len(string) > maxlen: string = string[:maxlen - 1] + u'\u2026' return string[:maxlen] def shorten_author_string(authors_string, maxlength): """ Parse a list of authors concatenated as a text string (comma separated) and smartly adjust them to maxlength. 1) If the complete list of sender names does not fit in maxlength, it tries to shorten names by using only the first part of each. 2) If the list is still too long, hide authors according to the following priority: - First author is always shown (if too long is shorten with ellipsis) - If possible, last author is also shown (if too long, uses ellipsis) - If there are more than 2 authors in the thread, show the maximum of them. More recent senders have higher priority. - If it is finally necessary to hide any author, an ellipsis between first and next authors is added. >>> authors = u'King Kong, Mucho Muchacho, Jaime Huerta, Flash Gordon' >>> print shorten_author_string(authors, 60) King Kong, Mucho Muchacho, Jaime Huerta, Flash Gordon >>> print shorten_author_string(authors, 40) King, Mucho, Jaime, Flash >>> print shorten_author_string(authors, 20) King, …, Jai…, Flash >>> print shorten_author_string(authors, 10) King, … >>> print shorten_author_string(authors, 2) K… >>> print shorten_author_string(authors, 1) K """ # I will create a list of authors by parsing author_string. I use # deque to do popleft without performance penalties authors = deque() # If author list is too long, it uses only the first part of each # name (gmail style) short_names = len(authors_string) > maxlength for au in authors_string.split(", "): if short_names: author_as_list = au.split() if len(author_as_list) > 0: authors.append(author_as_list[0]) else: authors.append(au) # Author chain will contain the list of author strings to be # concatenated using commas for the final formatted author_string. authors_chain = deque() if len(authors) == 0: return u'' # reserve space for first author first_au = shorten(authors.popleft(), maxlength) remaining_length = maxlength - len(first_au) # Tries to add an ellipsis if no space to show more than 1 author if authors and maxlength > 3 and remaining_length < 3: first_au = shorten(first_au, maxlength - 3) remaining_length += 3 # Tries to add as more authors as possible. It takes into account # that if any author will be hidden, and ellipsis should be added while authors and remaining_length >= 3: au = authors.pop() if len(au) > 1 and (remaining_length == 3 or (authors and remaining_length < 7)): authors_chain.appendleft(u'\u2026') break else: if authors: # 5= ellipsis + 2 x comma and space used as separators au_string = shorten(au, remaining_length - 5) else: # 2 = comma and space used as separator au_string = shorten(au, remaining_length - 2) remaining_length -= len(au_string) + 2 authors_chain.appendleft(au_string) # Add the first author to the list and concatenate list authors_chain.appendleft(first_au) authorsstring = ', '.join(authors_chain) return authorsstring def pretty_datetime(d): """ translates :class:`datetime` `d` to a "sup-style" human readable string. >>> now = datetime.now() >>> now.strftime('%c') 'Sat 31 Mar 2012 14:47:26 ' >>> pretty_datetime(now) u'just now' >>> pretty_datetime(now - timedelta(minutes=1)) u'1min ago' >>> pretty_datetime(now - timedelta(hours=5)) u'5h ago' >>> pretty_datetime(now - timedelta(hours=12)) u'02:54am' >>> pretty_datetime(now - timedelta(days=1)) u'yest 02pm' >>> pretty_datetime(now - timedelta(days=2)) u'Thu 02pm' >>> pretty_datetime(now - timedelta(days=7)) u'Mar 24' >>> pretty_datetime(now - timedelta(days=356)) u'Apr 2011' """ ampm = d.strftime('%P') if len(ampm): hourfmt = '%I' + ampm hourminfmt = '%I:%M' + ampm else: hourfmt = '%Hh' hourminfmt = '%H:%M' now = datetime.now() today = now.date() if d.date() == today or d > now - timedelta(hours=6): delta = datetime.now() - d if delta.seconds < 60: string = 'just now' elif delta.seconds < 3600: string = '%dmin ago' % (delta.seconds / 60) elif delta.seconds < 6 * 3600: string = '%dh ago' % (delta.seconds / 3600) else: string = d.strftime(hourminfmt) elif d.date() == today - timedelta(1): string = d.strftime('yest ' + hourfmt) elif d.date() > today - timedelta(7): string = d.strftime('%a ' + hourfmt) elif d.year != today.year: string = d.strftime('%b %Y') else: string = d.strftime('%b %d') return string_decode(string, 'UTF-8') def call_cmd(cmdlist, stdin=None): """ get a shell commands output, error message and return value and immediately return. .. warning:: This returns with the first screen content for interctive commands. :param cmdlist: shellcommand to call, already splitted into a list accepted by :meth:`subprocess.Popen` :type cmdlist: list of str :param stdin: string to pipe to the process :type stdin: str :return: triple of stdout, error msg, return value of the shell command :rtype: str, str, int """ out, err, ret = '', '', 0 try: if stdin: proc = subprocess.Popen(cmdlist, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate(stdin) ret = proc.poll() else: out = subprocess.check_output(cmdlist) # todo: get error msg. rval except (subprocess.CalledProcessError, OSError), e: err = str(e) ret = -1 out = string_decode(out, urwid.util.detected_encoding) err = string_decode(err, urwid.util.detected_encoding) return out, err, ret def call_cmd_async(cmdlist, stdin=None, env=None): """ get a shell commands output, error message and return value as a deferred. :type cmdlist: list of str :param stdin: string to pipe to the process :type stdin: str :return: deferred that calls back with triple of stdout, stderr and return value of the shell command :rtype: `twisted.internet.defer.Deferred` """ class _EverythingGetter(ProcessProtocol): def __init__(self, deferred): self.deferred = deferred self.outBuf = StringIO.StringIO() self.errBuf = StringIO.StringIO() self.outReceived = self.outBuf.write self.errReceived = self.errBuf.write def processEnded(self, status): termenc = urwid.util.detected_encoding out = string_decode(self.outBuf.getvalue(), termenc) err = string_decode(self.errBuf.getvalue(), termenc) if status.value.exitCode == 0: self.deferred.callback(out) else: terminated_obj = status.value terminated_obj.stderr = err self.deferred.errback(terminated_obj) d = Deferred() environment = os.environ if env is not None: environment.update(env) logging.debug('ENV = %s' % environment) logging.debug('CMD = %s' % cmdlist) proc = reactor.spawnProcess(_EverythingGetter(d), executable=cmdlist[0], env=environment, args=cmdlist) if stdin: logging.debug('writing to stdin') proc.write(stdin) proc.closeStdin() return d def guess_mimetype(blob): """ uses file magic to determine the mime-type of the given data blob. :param blob: file content as read by file.read() :type blob: data :returns: mime-type, falls back to 'application/octet-stream' :rtype: str """ mimetype = 'application/octet-stream' m = magic.open(magic.MAGIC_MIME_TYPE) m.load() magictype = m.buffer(blob) # libmagic does not always return proper mimetype strings, cf. issue #459 if re.match(r'\w+\/\w+', magictype): mimetype = magictype return mimetype def guess_encoding(blob): """ uses file magic to determine the encoding of the given data blob. :param blob: file content as read by file.read() :type blob: data :returns: encoding :rtype: str """ m = magic.open(magic.MAGIC_MIME_ENCODING) m.load() return m.buffer(blob) # TODO: make this work on blobs, not paths def mimewrap(path, filename=None, ctype=None): content = open(path, 'rb').read() ctype = ctype or guess_mimetype(content) maintype, subtype = ctype.split('/', 1) if maintype == 'text': part = MIMEText(content.decode(guess_encoding(content), 'replace'), _subtype=subtype, _charset='utf-8') elif maintype == 'image': part = MIMEImage(content, _subtype=subtype) elif maintype == 'audio': part = MIMEAudio(content, _subtype=subtype) else: part = MIMEBase(maintype, subtype) part.set_payload(content) # Encode the payload using Base64 email.encoders.encode_base64(part) # Set the filename parameter if not filename: filename = os.path.basename(path) part.add_header('Content-Disposition', 'attachment', filename=filename) return part def shell_quote(text): r''' >>> print(shell_quote("hello")) 'hello' >>> print(shell_quote("hello'there")) 'hello'"'"'there' ''' return "'%s'" % text.replace("'", """'"'"'""") def tag_cmp(a, b): r''' Sorting tags using this function puts all tags of length 1 at the beginning. This groups all tags mapped to unicode characters. ''' if min(len(a), len(b)) == 1 and max(len(a), len(b)) > 1: return cmp(len(a), len(b)) else: return cmp(a.lower(), b.lower()) def humanize_size(size): r''' >>> humanize_size(1) '1' >>> humanize_size(123) '123' >>> humanize_size(1234) '1K' >>> humanize_size(1234 * 1024) '1.2M' >>> humanize_size(1234 * 1024 * 1024) '1234.0M' ''' for factor, format_string in ((1, '%i'), (1024, '%iK'), (1024 * 1024, '%.1fM')): if size / factor < 1024: return format_string % (float(size) / factor) return format_string % (size / factor) def parse_mailcap_nametemplate(tmplate='%s'): """this returns a prefix and suffix to be used in the tempfile module for a given mailcap nametemplate string""" nt_list = tmplate.split('%s') template_prefix = '' template_suffix = '' if len(nt_list) == 2: template_suffix = nt_list[1] template_prefix = nt_list[0] else: template_suffix = tmplate return (template_prefix, template_suffix)
unknown
codeparrot/codeparrot-clean
from .state import EOF from .tokens import TokenEof from .tokens_base import TOKEN_COMMAND_WRITE_AND_QUIT_COMMAND from .tokens_base import TokenOfCommand from Vintageous import ex plus_plus_translations = { 'ff': 'fileformat', 'bin': 'binary', 'enc': 'fileencoding', 'nobin': 'nobinary', } @ex.command('wq', 'wq') class TokenWriteAndQuitCommand(TokenOfCommand): def __init__(self, params, *args, **kwargs): super().__init__(params, TOKEN_COMMAND_WRITE_AND_QUIT_COMMAND, 'wq', *args, **kwargs) self.target_command = 'ex_write_and_quit' def scan_command_write_and_quit_command(state): params = { '++': None, 'file': None, } c = state.consume() if c == EOF: return None, [TokenWriteAndQuitCommand(params), TokenEof()] bang == c == '!' if not bang: state.backup() c = state.consume() if c == '+': state.expect('+') state.ignore() # TODO: expect_match should work with emit() # http://vimdoc.sourceforge.net/htmldoc/editing.html#[++opt] m = state.expect_match( r'(?:f(?:ile)?f(?:ormat)?|(?:file)?enc(?:oding)?|(?:no)?bin(?:ary)?|bad|edit)(?=\s|$)', lambda: VimError(ERR_INVALID_ARGUMENT)) name = m.group(0) params['++'] = plus_plus_translations.get(name, name) state.ignore() raise NotImplementedError('param not implemented') if c == EOF: return None, [TokenWriteAndQuitCommand(params), TokenEof()] m = state.expect_match(r'.+$') params['file'] = m.group(0).strip() return None, [TokenWriteAndQuitCommand(params), TokenEof()]
unknown
codeparrot/codeparrot-clean
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for checkpoints tools.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tensorflow as tf def _create_checkpoints(sess, checkpoint_dir): checkpoint_prefix = os.path.join(checkpoint_dir, "model") checkpoint_state_name = "checkpoint" v1 = tf.get_variable("var1", [1, 10]) v2 = tf.get_variable("var2", [10, 10]) v3 = tf.get_variable("var3", [100, 100]) with tf.variable_scope("useful_scope"): v4 = tf.get_variable("var4", [9, 9]) sess.run(tf.global_variables_initializer()) v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4]) saver = tf.train.Saver() saver.save(sess, checkpoint_prefix, global_step=0, latest_filename=checkpoint_state_name) return v1_value, v2_value, v3_value, v4_value def _create_partition_checkpoints(sess, checkpoint_dir): checkpoint_prefix = os.path.join(checkpoint_dir, "model") checkpoint_state_name = "checkpoint" v1 = tf.get_variable( name="var1", shape=[100, 100], initializer=tf.truncated_normal_initializer(0.5), partitioner=tf.min_max_variable_partitioner(max_partitions=5, axis=0, min_slice_size=8 << 10)) sess.run(tf.global_variables_initializer()) v1_value = sess.run(v1._get_variable_list()) saver = tf.train.Saver() saver.save(sess, checkpoint_prefix, global_step=0, latest_filename=checkpoint_state_name) return v1_value class CheckpointsTest(tf.test.TestCase): def testNoCheckpoints(self): checkpoint_dir = self.get_temp_dir() + "/no_checkpoints" with self.assertRaises(tf.errors.OpError): self.assertAllEqual(tf.contrib.framework.load_variable( checkpoint_dir, "var1"), []) def testNoTensor(self): checkpoint_dir = self.get_temp_dir() with self.test_session() as session: _, _, _, _ = _create_checkpoints(session, checkpoint_dir) with self.assertRaises(tf.errors.OpError): self.assertAllEqual(tf.contrib.framework.load_variable( checkpoint_dir, "var5"), []) def testGetTensor(self): checkpoint_dir = self.get_temp_dir() with self.test_session() as session: v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir) self.assertAllEqual(tf.contrib.framework.load_variable( checkpoint_dir, "var1"), v1) self.assertAllEqual(tf.contrib.framework.load_variable( checkpoint_dir, "var2"), v2) self.assertAllEqual(tf.contrib.framework.load_variable( checkpoint_dir, "var3"), v3) self.assertAllEqual( tf.contrib.framework.load_variable( checkpoint_dir, "useful_scope/var4"), v4) def testGetAllVariables(self): checkpoint_dir = self.get_temp_dir() with self.test_session() as session: _create_checkpoints(session, checkpoint_dir) self.assertEqual(tf.contrib.framework.list_variables(checkpoint_dir), [("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]), ("var3", [100, 100])]) def testInitFromCheckpoint(self): checkpoint_dir = self.get_temp_dir() with self.test_session() as session: v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir) # New graph and session. with tf.Graph().as_default() as g: with self.test_session(graph=g) as session: with tf.variable_scope("some_scope"): my1 = tf.get_variable("my1", [1, 10]) with tf.variable_scope("some_other_scope"): my2 = tf.get_variable("my2", [10, 10]) with tf.variable_scope("other_useful_scope"): my4 = tf.get_variable("var4", [9, 9]) my3 = tf.get_variable("my3", [100, 100]) tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "var1": "some_scope/my1", "useful_scope/": "some_scope/some_other_scope/other_useful_scope/", }) tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "var2": "some_scope/some_other_scope/my2", "var3": my3, }) session.run(tf.global_variables_initializer()) self.assertAllEqual(my1.eval(session), v1) self.assertAllEqual(my2.eval(session), v2) self.assertAllEqual(my3.eval(session), v3) self.assertAllEqual(my4.eval(session), v4) # Check that tensors are not explicitly in the graph. self.assertLess(len(str(session.graph.as_graph_def())), 27000) def testInitFromRootCheckpoint(self): checkpoint_dir = self.get_temp_dir() with self.test_session() as session: v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir) # New graph and session. with tf.Graph().as_default() as g: with self.test_session(graph=g) as session: with tf.variable_scope("some_scope"): my1 = tf.get_variable("var1", [1, 10]) my2 = tf.get_variable("var2", [10, 10]) my3 = tf.get_variable("var3", [100, 100]) with tf.variable_scope("useful_scope"): my4 = tf.get_variable("var4", [9, 9]) tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "/": "some_scope/", }) session.run(tf.global_variables_initializer()) self.assertAllEqual(my1.eval(session), v1) self.assertAllEqual(my2.eval(session), v2) self.assertAllEqual(my3.eval(session), v3) self.assertAllEqual(my4.eval(session), v4) def testInitFromPartitionVar(self): checkpoint_dir = self.get_temp_dir() with self.test_session() as session: v1 = _create_partition_checkpoints(session, checkpoint_dir) # New graph and session. with tf.Graph().as_default() as g: with self.test_session(graph=g) as session: with tf.variable_scope("some_scope"): my1 = tf.get_variable( name="my1", shape=[100, 100], initializer=tf.truncated_normal_initializer(0.5), partitioner=tf.min_max_variable_partitioner( max_partitions=5, axis=0, min_slice_size=8 << 10)) my1_var_list = my1._get_variable_list() tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "var1": "some_scope/my1", }) session.run(tf.global_variables_initializer()) my1_values = session.run(my1_var_list) self.assertAllEqual(my1_values, v1) # New graph and session. with tf.Graph().as_default() as g: with self.test_session(graph=g) as session: with tf.variable_scope("some_scope"): my1 = tf.get_variable( name="my1", shape=[100, 100], initializer=tf.truncated_normal_initializer(0.5), partitioner=tf.min_max_variable_partitioner( max_partitions=5, axis=0, min_slice_size=8 << 10)) my1_var_list = my1._get_variable_list() tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "var1": my1_var_list, }) session.run(tf.global_variables_initializer()) my1_values = session.run(my1_var_list) self.assertAllEqual(my1_values, v1) def testInitFromCheckpointMissing(self): checkpoint_dir = self.get_temp_dir() with self.test_session() as session: _, _, _, _ = _create_checkpoints(session, checkpoint_dir) # New graph and session. with tf.Graph().as_default() as g: with self.test_session(graph=g) as session: with tf.variable_scope("some_scope"): _ = tf.get_variable("my1", [10, 10]) _ = tf.get_variable("my2", [1, 10], dtype=tf.int64, initializer=tf.zeros_initializer) # No directory. with self.assertRaises(tf.errors.OpError): tf.contrib.framework.init_from_checkpoint("no_dir", { "var1": "some_scope/my1"}) # No variable in checkpoint. with self.assertRaises(ValueError): tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "no_var": "some_scope/my1"}) # No variable in the graph. with self.assertRaises(ValueError): tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "var3": "some_scope/no_var"}) # Shape mismatch. with self.assertRaises(ValueError): tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "var1": "some_scope/my1"}) # Variable 'my1' and 'my2' are missing in given checkpoint scope. with self.assertRaises(ValueError): tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "useful_scope/": "some_scope/"}) # Mapping is not to scope name. with self.assertRaises(ValueError): tf.contrib.framework.init_from_checkpoint(checkpoint_dir, { "useful_scope": "some_scope/"}) if __name__ == "__main__": tf.test.main()
unknown
codeparrot/codeparrot-clean
/* Copyright 2025 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This must be package main package main import ( "bytes" "encoding/json" "fmt" "golang.org/x/tools/go/analysis" "k8s.io/kubernetes/hack/tools/golangci-lint/sorted/pkg" ) type analyzerPlugin struct{} func (*analyzerPlugin) GetAnalyzers() []*analysis.Analyzer { return []*analysis.Analyzer{pkg.NewAnalyzer()} } // AnalyzerPlugin is the entry point for golangci-lint. var AnalyzerPlugin analyzerPlugin // settings defines the configuration options for the sorted linter type settings struct { // Debug enables debug logging Debug bool `json:"debug"` // Files specifies which files to check Files []string `json:"files"` } // List of default files to check for feature gate sorting var defaultTargetFiles = []string{ "pkg/features/kube_features.go", "staging/src/k8s.io/apiserver/pkg/features/kube_features.go", "staging/src/k8s.io/client-go/features/known_features.go", "staging/src/k8s.io/controller-manager/pkg/features/kube_features.go", "staging/src/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go", "test/e2e/feature/feature.go", "test/e2e/environment/environment.go", } // New is the entry point for golangci-lint plugin system func New(pluginSettings interface{}) ([]*analysis.Analyzer, error) { // Create default config config := pkg.Config{} // Parse settings if provided if pluginSettings != nil { var s settings // Convert settings to JSON and back to our struct for easier handling var buffer bytes.Buffer if err := json.NewEncoder(&buffer).Encode(pluginSettings); err != nil { return nil, fmt.Errorf("encoding settings as internal JSON buffer: %v", err) } decoder := json.NewDecoder(&buffer) decoder.DisallowUnknownFields() if err := decoder.Decode(&s); err != nil { return nil, fmt.Errorf("decoding settings from internal JSON buffer: %v", err) } // Apply settings to config config.Debug = s.Debug config.Files = append(config.Files, s.Files...) if len(config.Files) == 0 { // If no files are specified, use the default target files config.Files = defaultTargetFiles } if config.Debug { fmt.Printf("sorted settings: %+v\n", s) fmt.Printf("final config: %+v\n", config) } } // Get the analyzer with config analyzer := pkg.NewAnalyzerWithConfig(config) // Return the analyzer return []*analysis.Analyzer{analyzer}, nil }
go
github
https://github.com/kubernetes/kubernetes
hack/tools/golangci-lint/sorted/plugin/plugin.go
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Stack analyzer module.""" from config import local_config from lib.clusterfuzz import stacktraces from platforms.android import kernel_utils as android_kernel from platforms.linux.lkl import kernel_utils as lkl_kernel from system import environment MAX_REDZONE_SIZE_FOR_OOMS_AND_HANGS = 64 def linkify_kernel_or_lkl_stacktrace_if_needed(crash_info): """Linkify Android Kernel or lkl stacktrace.""" kernel_prefix = '' kernel_hash = '' if (environment.is_android_kernel() and not environment.is_android_cuttlefish() and (crash_info.found_android_kernel_crash or crash_info.is_kasan)): kernel_prefix, kernel_hash = \ android_kernel.get_kernel_prefix_and_full_hash() elif (environment.is_lkl_job() and crash_info.is_lkl and crash_info.lkl_kernel_build_id): kernel_prefix, kernel_hash = \ lkl_kernel.get_kernel_prefix_and_full_hash(crash_info.lkl_kernel_build_id) if kernel_prefix and kernel_hash: _linkify_android_kernel_stacktrace(crash_info, kernel_prefix, kernel_hash) def _linkify_android_kernel_stacktrace(crash_info, kernel_prefix, kernel_hash): """Linkify Android Kernel or lkl stacktrace.""" temp_crash_stacktrace = '' for line in crash_info.crash_stacktrace.splitlines(): temp_crash_stacktrace += android_kernel.get_kernel_stack_frame_link( line, kernel_prefix, kernel_hash) + '\n' crash_info.crash_stacktrace = temp_crash_stacktrace def get_crash_data(crash_data, symbolize_flag=True, fuzz_target=None, already_symbolized=False, detect_ooms_and_hangs=None): """Get crash parameters from crash data. Crash parameters include crash type, address, state and stacktrace. If the stacktrace is not already symbolized, we will try to symbolize it unless |symbolize| flag is set to False. Symbolized stacktrace will contain inline frames, but we do exclude them for purposes of crash state generation (helps in testcase deduplication).""" # Decide whether to symbolize or not symbolize the input stacktrace. # Note that Fuchsia logs are always symbolized. if symbolize_flag: # Defer imports since stack_symbolizer pulls in a lot of things. from crash_analysis.stack_parsing import stack_symbolizer crash_stacktrace_with_inlines = stack_symbolizer.symbolize_stacktrace( crash_data, enable_inline_frames=True) crash_stacktrace_without_inlines = stack_symbolizer.symbolize_stacktrace( crash_data, enable_inline_frames=False) else: # We are explicitly indicated to not symbolize using |symbolize_flag|. There # is no distinction between inline and non-inline frames for an unsymbolized # stacktrace. crash_stacktrace_with_inlines = crash_data crash_stacktrace_without_inlines = crash_data # Additional stack frame ignore regexes. custom_stack_frame_ignore_regexes = ( local_config.ProjectConfig().get('stacktrace.stack_frame_ignore_regexes', [])) if environment.get_value('TASK_NAME') == 'analyze': detect_v8_runtime_errors = True else: detect_v8_runtime_errors = environment.get_value('DETECT_V8_RUNTIME_ERRORS', False) fuzz_target = fuzz_target or environment.get_value('FUZZ_TARGET') redzone_size = environment.get_value('REDZONE') if detect_ooms_and_hangs is None: detect_ooms_and_hangs = ( environment.get_value('REPORT_OOMS_AND_HANGS') and (not redzone_size or redzone_size <= MAX_REDZONE_SIZE_FOR_OOMS_AND_HANGS)) include_ubsan = 'halt_on_error=0' not in environment.get_value( 'UBSAN_OPTIONS', '') stack_parser = stacktraces.StackParser( symbolized=symbolize_flag or already_symbolized, detect_ooms_and_hangs=detect_ooms_and_hangs, detect_v8_runtime_errors=detect_v8_runtime_errors, custom_stack_frame_ignore_regexes=custom_stack_frame_ignore_regexes, fuzz_target=fuzz_target, include_ubsan=include_ubsan) result = stack_parser.parse(crash_stacktrace_without_inlines) # Use stacktrace with inlines for the result. if result.crash_stacktrace: result.crash_stacktrace = crash_stacktrace_with_inlines # Linkify Android Kernel or lkl stacktrace. linkify_kernel_or_lkl_stacktrace_if_needed(result) return result
unknown
codeparrot/codeparrot-clean
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple cross-platform helper to create an RPM package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import contextlib import fileinput import os import pprint import re import shutil import subprocess import sys import tempfile from string import Template from private import helpers # Setup to safely create a temporary directory and clean it up when done. @contextlib.contextmanager def Cd(newdir, cleanup=lambda: True): """Change the current working directory. This will run the provided cleanup function when the context exits and the previous working directory is restored. Args: newdir: The directory to change to. This must already exist. cleanup: An optional cleanup function to be executed when the context exits. Yields: Nothing. """ prevdir = os.getcwd() os.chdir(os.path.expanduser(newdir)) try: yield finally: os.chdir(prevdir) cleanup() @contextlib.contextmanager def Tempdir(): """Create a new temporary directory and change to it. The temporary directory will be removed when the context exits. Yields: The full path of the temporary directory. """ dirpath = tempfile.mkdtemp() def Cleanup(): shutil.rmtree(dirpath) with Cd(dirpath, Cleanup): yield dirpath WROTE_FILE_RE = re.compile(r'Wrote: (?P<rpm_path>.+)', re.MULTILINE) def FindOutputFile(log): """Find the written file from the log information.""" m = WROTE_FILE_RE.search(log) if m: return m.group('rpm_path') return None def SlurpFile(input_path): with open(input_path, 'r') as input: return input.read() def CopyAndRewrite(input_file, output_file, replacements=None, template_replacements=None): """Copies the given file and optionally rewrites with replacements. Args: input_file: The file to copy. output_file: The file to write to. replacements: A dictionary of replacements. Keys are prefixes scan for, values are the replacements to write after the prefix. template_replacements: A dictionary of in-place replacements. Keys are variable names, values are replacements. Used with string.Template. """ with open(output_file, 'w') as output: for line in fileinput.input(input_file): if replacements: for prefix, text in replacements.items(): if line.startswith(prefix): line = prefix + ' ' + text + '\n' break if template_replacements: template = Template(line) line = template.safe_substitute(template_replacements) output.write(line) def IsExe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) def Which(program): """Search for the given program in the PATH. Args: program: The program to search for. Returns: The full path to the program. """ for path in os.environ['PATH'].split(os.pathsep): filename = os.path.join(path, program) if IsExe(filename): return filename return None class NoRpmbuildFoundError(Exception): pass class InvalidRpmbuildError(Exception): pass def FindRpmbuild(rpmbuild_path): """Finds absolute path to rpmbuild. Args: rpmbuild_path: path to the rpmbuild_binary. If None, find 'rpmbuild' by walking $PATH. """ if rpmbuild_path: if not rpmbuild_path.startswith(os.path.sep): return os.path.join(os.getcwd(), rpmbuild_path) return rpmbuild_path path = Which('rpmbuild') if path: return path raise NoRpmbuildFoundError() class RpmBuilder(object): """A helper class to manage building the RPM file.""" SOURCE_DIR = 'SOURCES' BUILD_DIR = 'BUILD' BUILDROOT_DIR = 'BUILDROOT' TEMP_DIR = 'TMP' RPMS_DIR = 'RPMS' DIRS = [SOURCE_DIR, BUILD_DIR, RPMS_DIR, TEMP_DIR] def __init__(self, name, version, release, arch, rpmbuild_path, source_date_epoch=None, debug=False): self.name = name self.version = helpers.GetFlagValue(version) self.release = helpers.GetFlagValue(release) self.arch = arch self.files = [] self.rpmbuild_path = FindRpmbuild(rpmbuild_path) self.rpm_path = None self.source_date_epoch = helpers.GetFlagValue(source_date_epoch) self.debug = debug # The below are initialized in SetupWorkdir() self.spec_file = None self.preamble_file = None self.description_file = None self.install_script_file = None self.file_list_path = None self.pre_scriptlet = None self.post_scriptlet = None self.preun_scriptlet = None self.postun_scriptlet = None def AddFiles(self, paths, root=''): """Add a set of files to the current RPM. If an item in paths is a directory, its files are recursively added. Args: paths: The files to add. root: The root of the filesystem to search for files. Defaults to ''. """ for path in paths: full_path = os.path.join(root, path) if os.path.isdir(full_path): self.AddFiles(os.listdir(full_path), full_path) else: self.files.append(full_path) def SetupWorkdir(self, spec_file, original_dir, preamble_file=None, description_file=None, install_script_file=None, pre_scriptlet_path=None, post_scriptlet_path=None, preun_scriptlet_path=None, postun_scriptlet_path=None, file_list_path=None): """Create the needed structure in the workdir.""" # Create the rpmbuild-expected directory structure. for name in RpmBuilder.DIRS: if not os.path.exists(name): os.makedirs(name, 0o777) # Copy the to-be-packaged files into the BUILD directory for f in self.files: dst_dir = os.path.join(RpmBuilder.BUILD_DIR, os.path.dirname(f)) if not os.path.exists(dst_dir): os.makedirs(dst_dir, 0o777) shutil.copy(os.path.join(original_dir, f), dst_dir) # The code below is related to assembling the RPM spec template and # everything else it needs to produce a valid RPM package. # # There two different types of substitution going on here: textual, directly # into the spec file, and macro; done when we call rpmbuild(8). # # Plans to clean this up are tracked in #209. # Slurp in the scriptlets... self.pre_scriptlet = \ SlurpFile(os.path.join(original_dir, pre_scriptlet_path)) if pre_scriptlet_path is not None else '' self.post_scriptlet = \ SlurpFile(os.path.join(original_dir, post_scriptlet_path)) if post_scriptlet_path is not None else '' self.preun_scriptlet = \ SlurpFile(os.path.join(original_dir, preun_scriptlet_path)) if preun_scriptlet_path is not None else '' self.postun_scriptlet = \ SlurpFile(os.path.join(original_dir, postun_scriptlet_path)) if postun_scriptlet_path is not None else '' # Then prepare for textual substitution. This is typically only the case for the # experimental `pkg_rpm`. tpl_replacements = { 'PRE_SCRIPTLET': "%pre\n" + self.pre_scriptlet, 'POST_SCRIPTLET': "%post\n" + self.post_scriptlet, 'PREUN_SCRIPTLET': "%preun\n" + self.preun_scriptlet, 'POSTUN_SCRIPTLET': "%postun\n" + self.postun_scriptlet, } # If the spec file has "Version" and "Release" tags specified in the spec # file's preamble, the values are filled in immediately afterward. These go # into "replacements". This is typically only the case for the "original" # `pkg_rpm`. # # The "tpl_replacements" are used for direct text substitution of scriptlets # into the spec file, typically only for the "experimental" `pkg_rpm`. spec_origin = os.path.join(original_dir, spec_file) self.spec_file = os.path.basename(spec_file) replacements = {} if self.version: replacements['Version:'] = self.version if self.release: replacements['Release:'] = self.release CopyAndRewrite(spec_origin, self.spec_file, replacements=replacements, template_replacements=tpl_replacements) # "Preamble" template substitutions. Currently only support values for the # "Version" and "Release" tags. # # This is only the case for `pkg_rpm` in experimental/rpm.bzl. # # This is substituted by rpmbuild(8) via macro expansion. if preamble_file: # Copy in the various other files needed to build the RPM self.preamble_file = os.path.basename(preamble_file) tpl_replacements = {} if self.version: tpl_replacements['VERSION_FROM_FILE'] = self.version if self.release: tpl_replacements['RELEASE_FROM_FILE'] = self.release CopyAndRewrite(os.path.join(original_dir, preamble_file), self.preamble_file, template_replacements=tpl_replacements) # The below are all copied into place within the RPM spec root. It may be # possible to directly some, if not all, of these out of the Bazel build # root instead. "file_list_path" may be the problematic one here, # as it must be there. # # These are substituted by rpmbuild(8) via macro expansion. # Used in %description if description_file: shutil.copy(os.path.join(original_dir, description_file), os.getcwd()) self.description_file = os.path.basename(description_file) # Used in %install if install_script_file: shutil.copy(os.path.join(original_dir, install_script_file), os.getcwd()) self.install_script_file = os.path.basename(install_script_file) # Used in %files -f if file_list_path: shutil.copy(os.path.join(original_dir, file_list_path), RpmBuilder.BUILD_DIR) self.file_list_path = os.path.join(RpmBuilder.BUILD_DIR, os.path.basename(file_list_path)) def CallRpmBuild(self, dirname, rpmbuild_args): """Call rpmbuild with the correct arguments.""" buildroot = os.path.join(dirname, RpmBuilder.BUILDROOT_DIR) # For reference, E121 is a hanging indent flake8 issue. It really wants # four space indents, but properly fixing that will require re-indenting the # entire file. # Further, the use of disabling yapf and friends is to allow argument names # to be associated with their values neatly. args = [ self.rpmbuild_path, # noqa: E121 ] if self.debug: args.append('-vv') # Common options args += [ '--define', '_topdir %s' % dirname, '--define', '_tmppath %s/TMP' % dirname, '--bb', '--buildroot=%s' % buildroot, ] # yapf: disable # Macro-based RPM parameter substitution, if necessary inputs provided. if self.preamble_file: args += ['--define', 'build_rpm_options %s' % self.preamble_file] if self.description_file: args += ['--define', 'build_rpm_description %s' % self.description_file] if self.install_script_file: args += ['--define', 'build_rpm_install %s' % self.install_script_file] if self.file_list_path: # %files -f is taken relative to the package root args += ['--define', 'build_rpm_files %s' % os.path.basename(self.file_list_path)] args.extend(rpmbuild_args) args.append(self.spec_file) env = { 'LANG': 'C', 'RPM_BUILD_ROOT': buildroot, } if self.source_date_epoch: env['SOURCE_DATE_EPOCH'] = self.source_date_epoch args += ["--define", "clamp_mtime_to_source_date_epoch Y"] if self.debug: print('Running rpmbuild as:', ' '.join(["'" + a + "'" for a in args])) print('With environment:') pprint.pprint(env) p = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) output = p.communicate()[0].decode() if p.returncode == 0: # Find the created file. self.rpm_path = FindOutputFile(output) if p.returncode != 0 or not self.rpm_path: print('Error calling rpmbuild:') print(output) elif self.debug: print(output) # Return the status. return p.returncode def SaveResult(self, out_file): """Save the result RPM out of the temporary working directory.""" if self.rpm_path: shutil.copy(self.rpm_path, out_file) if self.debug: print('Saved RPM file to %s' % out_file) else: print('No RPM file created.') def Build(self, spec_file, out_file, preamble_file=None, description_file=None, install_script_file=None, pre_scriptlet_path=None, post_scriptlet_path=None, preun_scriptlet_path=None, postun_scriptlet_path=None, file_list_path=None, rpmbuild_args=None): """Build the RPM described by the spec_file, with other metadata in keyword arguments""" if self.debug: print('Building RPM for %s at %s' % (self.name, out_file)) original_dir = os.getcwd() spec_file = os.path.join(original_dir, spec_file) out_file = os.path.join(original_dir, out_file) with Tempdir() as dirname: self.SetupWorkdir(spec_file, original_dir, preamble_file=preamble_file, description_file=description_file, install_script_file=install_script_file, file_list_path=file_list_path, pre_scriptlet_path=pre_scriptlet_path, post_scriptlet_path=post_scriptlet_path, preun_scriptlet_path=preun_scriptlet_path, postun_scriptlet_path=postun_scriptlet_path) status = self.CallRpmBuild(dirname, rpmbuild_args or []) self.SaveResult(out_file) return status def main(argv): parser = argparse.ArgumentParser( description='Helper for building rpm packages', fromfile_prefix_chars='@') parser.add_argument('--name', help='The name of the software being packaged.') parser.add_argument('--version', help='The version of the software being packaged.') parser.add_argument('--release', help='The release of the software being packaged.') parser.add_argument( '--arch', help='The CPU architecture of the software being packaged.') parser.add_argument('--spec_file', required=True, help='The file containing the RPM specification.') parser.add_argument('--out_file', required=True, help='The destination to save the resulting RPM file to.') parser.add_argument('--rpmbuild', help='Path to rpmbuild executable.') parser.add_argument('--source_date_epoch', help='Value for the SOURCE_DATE_EPOCH rpmbuild ' 'environment variable') parser.add_argument('--debug', action='store_true', default=False, help='Print debug messages.') # Options currently used experimental/rpm.bzl: parser.add_argument('--install_script', help='Installer script') parser.add_argument('--file_list', help='File containing a list of files to include with rpm spec %files -f') parser.add_argument('--preamble', help='File containing the RPM Preamble') parser.add_argument('--description', help='File containing the RPM %description text') parser.add_argument('--pre_scriptlet', help='File containing the RPM %pre scriptlet, if to be substituted') parser.add_argument('--post_scriptlet', help='File containing the RPM %post scriptlet, if to be substituted') parser.add_argument('--preun_scriptlet', help='File containing the RPM %preun scriptlet, if to be substituted') parser.add_argument('--postun_scriptlet', help='File containing the RPM %postun scriptlet, if to be substituted') parser.add_argument('--rpmbuild_arg', dest='rpmbuild_args', action='append', help='Any additional arguments to pass to rpmbuild') parser.add_argument('files', nargs='*') options = parser.parse_args(argv or ()) try: builder = RpmBuilder(options.name, options.version, options.release, options.arch, options.rpmbuild, source_date_epoch=options.source_date_epoch, debug=options.debug) builder.AddFiles(options.files) return builder.Build(options.spec_file, options.out_file, preamble_file=options.preamble, description_file=options.description, install_script_file=options.install_script, file_list_path=options.file_list, pre_scriptlet_path=options.pre_scriptlet, post_scriptlet_path=options.post_scriptlet, preun_scriptlet_path=options.preun_scriptlet, postun_scriptlet_path=options.postun_scriptlet, rpmbuild_args=options.rpmbuild_args) except NoRpmbuildFoundError: print('ERROR: rpmbuild is required but is not present in PATH') return 1 if __name__ == '__main__': main(sys.argv[1:]) # vim: ts=2:sw=2:
unknown
codeparrot/codeparrot-clean
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Flac(AutotoolsPackage): """Encoder/decoder for the Free Lossless Audio Codec""" homepage = "https://xiph.org/flac/index.html" url = "http://downloads.xiph.org/releases/flac/flac-1.3.2.tar.xz" version('1.3.2', '454f1bfa3f93cc708098d7890d0499bd') version('1.3.1', 'b9922c9a0378c88d3e901b234f852698') version('1.3.0', '13b5c214cee8373464d3d65dee362cdd') depends_on('libvorbis') depends_on('id3lib')
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com> # Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com> # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ini_file short_description: Tweak settings in INI files extends_documentation_fragment: files description: - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, say, M(template) or M(assemble). Adds missing sections if they don't exist. - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. - Since version 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when no other modifications need to be applied. version_added: "0.9" options: path: description: - Path to the INI-style file; this file is created if required. - Before 2.3 this option was only usable as I(dest). aliases: [ dest ] required: true section: description: - Section name in INI file. This is added if C(state=present) automatically when a single value is being set. - If left empty or set to `null`, the I(option) will be placed before the first I(section). Using `null` is also required if the config format does not support sections. required: true option: description: - If set (required for changing a I(value)), this is the name of the option. - May be omitted if adding/removing a whole I(section). value: description: - The string value to be associated with an I(option). May be omitted when removing an I(option). backup: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. type: bool default: 'no' others: description: - All arguments accepted by the M(file) module also work here state: description: - If set to C(absent) the option or section will be removed if present instead of created. choices: [ absent, present ] default: present no_extra_spaces: description: - Do not insert spaces before and after '=' symbol type: bool default: 'no' version_added: "2.1" create: description: - If set to 'no', the module will fail if the file does not already exist. By default it will create the file if it is missing. type: bool default: 'yes' version_added: "2.2" notes: - While it is possible to add an I(option) without specifying a I(value), this makes no sense. - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. author: - Jan-Piet Mens (@jpmens) - Ales Nosek (@noseka1) ''' EXAMPLES = ''' # Before 2.3, option 'dest' was used instead of 'path' - name: Ensure "fav=lemonade is in section "[drinks]" in specified file ini_file: path: /etc/conf section: drinks option: fav value: lemonade mode: 0600 backup: yes - ini_file: path: /etc/anotherconf section: drinks option: temperature value: cold backup: yes ''' import os import re from ansible.module_utils.basic import AnsibleModule def match_opt(option, line): option = re.escape(option) return re.match('( |\t)*%s( |\t)*=' % option, line) \ or re.match('#( |\t)*%s( |\t)*=' % option, line) \ or re.match(';( |\t)*%s( |\t)*=' % option, line) def match_active_opt(option, line): option = re.escape(option) return re.match('( |\t)*%s( |\t)*=' % option, line) def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False, create=True): diff = dict( before='', after='', before_header='%s (content)' % filename, after_header='%s (content)' % filename, ) if not os.path.exists(filename): if not create: module.fail_json(rc=257, msg='Destination %s does not exist !' % filename) destpath = os.path.dirname(filename) if not os.path.exists(destpath) and not module.check_mode: os.makedirs(destpath) ini_lines = [] else: ini_file = open(filename, 'r') try: ini_lines = ini_file.readlines() finally: ini_file.close() if module._diff: diff['before'] = ''.join(ini_lines) changed = False # ini file could be empty if not ini_lines: ini_lines.append('\n') # last line of file may not contain a trailing newline if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n': ini_lines[-1] += '\n' changed = True # append a fake section line to simplify the logic ini_lines.append('[') within_section = not section section_start = 0 msg = 'OK' if no_extra_spaces: assignment_format = '%s=%s\n' else: assignment_format = '%s = %s\n' for index, line in enumerate(ini_lines): if line.startswith('[%s]' % section): within_section = True section_start = index elif line.startswith('['): if within_section: if state == 'present': # insert missing option line at the end of the section for i in range(index, 0, -1): # search backwards for previous non-blank or non-comment line if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]): ini_lines.insert(i, assignment_format % (option, value)) msg = 'option added' changed = True break elif state == 'absent' and not option: # remove the entire section del ini_lines[section_start:index] msg = 'section removed' changed = True break else: if within_section and option: if state == 'present': # change the existing option line if match_opt(option, line): newline = assignment_format % (option, value) option_changed = ini_lines[index] != newline changed = changed or option_changed if option_changed: msg = 'option changed' ini_lines[index] = newline if option_changed: # remove all possible option occurrences from the rest of the section index = index + 1 while index < len(ini_lines): line = ini_lines[index] if line.startswith('['): break if match_active_opt(option, line): del ini_lines[index] else: index = index + 1 break elif state == 'absent': # delete the existing line if match_active_opt(option, line): del ini_lines[index] changed = True msg = 'option changed' break # remove the fake section line del ini_lines[-1:] if not within_section and option and state == 'present': ini_lines.append('[%s]\n' % section) ini_lines.append(assignment_format % (option, value)) changed = True msg = 'section and option added' if module._diff: diff['after'] = ''.join(ini_lines) backup_file = None if changed and not module.check_mode: if backup: backup_file = module.backup_local(filename) ini_file = open(filename, 'w') try: ini_file.writelines(ini_lines) finally: ini_file.close() return (changed, backup_file, diff, msg) def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', required=True, aliases=['dest']), section=dict(type='str', required=True), option=dict(type='str'), value=dict(type='str'), backup=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'present']), no_extra_spaces=dict(type='bool', default=False), create=dict(type='bool', default=True) ), add_file_common_args=True, supports_check_mode=True, ) path = module.params['path'] section = module.params['section'] option = module.params['option'] value = module.params['value'] state = module.params['state'] backup = module.params['backup'] no_extra_spaces = module.params['no_extra_spaces'] create = module.params['create'] (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create) if not module.check_mode and os.path.exists(path): file_args = module.load_file_common_arguments(module.params) changed = module.set_fs_attributes_if_different(file_args, changed) results = dict( changed=changed, diff=diff, msg=msg, path=path, ) if backup_file is not None: results['backup_file'] = backup_file # Mission complete module.exit_json(**results) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# BSD 3-Clause License # # Copyright (c) 2019, Elasticsearch BV # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. def init_execution_context(): # If _threading_local has been monkeypatched (by gevent or eventlet), then # we should assume it's use as this will be the most "green-thread safe" if threading_local_monkey_patched(): from elasticapm.context.threadlocal import execution_context return execution_context try: from elasticapm.context.contextvars import execution_context except ImportError: from elasticapm.context.threadlocal import execution_context return execution_context def threading_local_monkey_patched(): # Returns True if thread locals have been patched by either gevent of # eventlet try: from gevent.monkey import is_object_patched except ImportError: pass else: if is_object_patched("threading", "local"): return True try: from eventlet.patcher import is_monkey_patched except ImportError: pass else: if is_monkey_patched("thread"): return True return False
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- """ *************************************************************************** RasterOptionsWidget.py --------------------- Date : December 2016 Copyright : (C) 2016 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'December 2016' __copyright__ = '(C) 2016, Alexander Bruy' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.PyQt.QtWidgets import QLineEdit, QComboBox from qgis.gui import QgsRasterFormatSaveOptionsWidget from processing.core.parameters import ParameterString from processing.core.outputs import OutputString from processing.gui.wrappers import WidgetWrapper, DIALOG_MODELER, DIALOG_BATCH class RasterOptionsWidgetWrapper(WidgetWrapper): def createWidget(self): if self.dialogType == DIALOG_MODELER: widget = QComboBox() widget.setEditable(True) strings = self.dialog.getAvailableValuesOfType(ParameterString, OutputString) options = [(self.dialog.resolveValueDescription(s), s) for s in strings] for desc, val in options: widget.addItem(desc, val) widget.setEditText(self.param.default or '') return widget elif self.dialogType == DIALOG_BATCH: widget = QLineEdit() if self.param.default: widget.setText(self.param.default) else: return QgsRasterFormatSaveOptionsWidget() def setValue(self, value): if value is None: value = '' if self.dialogType == DIALOG_MODELER: self.setComboValue(value) elif self.dialogType == DIALOG_BATCH: self.widget.setText(value) else: self.widget.setValue(value) def value(self): if self.dialogType == DIALOG_MODELER: return self.comboValue() elif self.dialogType == DIALOG_BATCH: return self.widget.text() else: return ' '.join(self.widget.options())
unknown
codeparrot/codeparrot-clean
package local import ( "io" "sync" "github.com/Microsoft/hcsshim" "github.com/moby/moby/v2/pkg/ioutils" ) type autoClosingReader struct { io.ReadCloser sync.Once } func (r *autoClosingReader) Read(b []byte) (int, error) { n, err := r.ReadCloser.Read(b) if err != nil { r.Once.Do(func() { r.ReadCloser.Close() }) } return n, err } func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) io.WriteCloser { return ioutils.NewWriteCloserWrapper(pipe, func() error { if err := pipe.Close(); err != nil { return err } err := process.CloseStdin() if err != nil && !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyClosed(err) { // This error will occur if the compute system is currently shutting down if perr, ok := err.(*hcsshim.ProcessError); ok && perr.Err != hcsshim.ErrVmcomputeOperationInvalidState { return err } } return nil }) }
go
github
https://github.com/moby/moby
daemon/internal/libcontainerd/local/process_windows.go
# -*- coding: utf-8 -*- # Copyright (c) 2015, ESS and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.model.document import Document from frappe.utils import getdate, cstr, get_link_to_form class LabTest(Document): def validate(self): if not self.is_new(): self.set_secondary_uom_result() def on_submit(self): self.validate_result_values() self.db_set('submitted_date', getdate()) self.db_set('status', 'Completed') def on_cancel(self): self.db_set('status', 'Cancelled') self.reload() def on_update(self): if self.sensitivity_test_items: sensitivity = sorted(self.sensitivity_test_items, key=lambda x: x.antibiotic_sensitivity) for i, item in enumerate(sensitivity): item.idx = i + 1 self.sensitivity_test_items = sensitivity def after_insert(self): if self.prescription: frappe.db.set_value('Lab Prescription', self.prescription, 'lab_test_created', 1) if frappe.db.get_value('Lab Prescription', self.prescription, 'invoiced'): self.invoiced = True if not self.lab_test_name and self.template: self.load_test_from_template() self.reload() def load_test_from_template(self): lab_test = self create_test_from_template(lab_test) self.reload() def set_secondary_uom_result(self): for item in self.normal_test_items: if item.result_value and item.secondary_uom and item.conversion_factor: try: item.secondary_uom_result = float(item.result_value) * float(item.conversion_factor) except: item.secondary_uom_result = '' frappe.msgprint(_('Row #{0}: Result for Secondary UOM not calculated'.format(item.idx)), title = _('Warning')) def validate_result_values(self): if self.normal_test_items: for item in self.normal_test_items: if not item.result_value and not item.allow_blank and item.require_result_value: frappe.throw(_('Row #{0}: Please enter the result value for {1}').format( item.idx, frappe.bold(item.lab_test_name)), title=_('Mandatory Results')) if self.descriptive_test_items: for item in self.descriptive_test_items: if not item.result_value and not item.allow_blank and item.require_result_value: frappe.throw(_('Row #{0}: Please enter the result value for {1}').format( item.idx, frappe.bold(item.lab_test_particulars)), title=_('Mandatory Results')) def create_test_from_template(lab_test): template = frappe.get_doc('Lab Test Template', lab_test.template) patient = frappe.get_doc('Patient', lab_test.patient) lab_test.lab_test_name = template.lab_test_name lab_test.result_date = getdate() lab_test.department = template.department lab_test.lab_test_group = template.lab_test_group lab_test.legend_print_position = template.legend_print_position lab_test.result_legend = template.result_legend lab_test.worksheet_instructions = template.worksheet_instructions lab_test = create_sample_collection(lab_test, template, patient, None) lab_test = load_result_format(lab_test, template, None, None) @frappe.whitelist() def update_status(status, name): if name and status: frappe.db.set_value('Lab Test', name, { 'status': status, 'approved_date': getdate() }) @frappe.whitelist() def create_multiple(doctype, docname): if not doctype or not docname: frappe.throw(_('Sales Invoice or Patient Encounter is required to create Lab Tests'), title=_('Insufficient Data')) lab_test_created = False if doctype == 'Sales Invoice': lab_test_created = create_lab_test_from_invoice(docname) elif doctype == 'Patient Encounter': lab_test_created = create_lab_test_from_encounter(docname) if lab_test_created: frappe.msgprint(_('Lab Test(s) {0} created successfully').format(lab_test_created), indicator='green') else: frappe.msgprint(_('No Lab Tests created')) def create_lab_test_from_encounter(encounter): lab_test_created = False encounter = frappe.get_doc('Patient Encounter', encounter) if encounter and encounter.lab_test_prescription: patient = frappe.get_doc('Patient', encounter.patient) for item in encounter.lab_test_prescription: if not item.lab_test_created: template = get_lab_test_template(item.lab_test_code) if template: lab_test = create_lab_test_doc(item.invoiced, encounter.practitioner, patient, template, encounter.company) lab_test.save(ignore_permissions = True) frappe.db.set_value('Lab Prescription', item.name, 'lab_test_created', 1) if not lab_test_created: lab_test_created = lab_test.name else: lab_test_created += ', ' + lab_test.name return lab_test_created def create_lab_test_from_invoice(sales_invoice): lab_tests_created = False invoice = frappe.get_doc('Sales Invoice', sales_invoice) if invoice and invoice.patient: patient = frappe.get_doc('Patient', invoice.patient) for item in invoice.items: lab_test_created = 0 if item.reference_dt == 'Lab Prescription': lab_test_created = frappe.db.get_value('Lab Prescription', item.reference_dn, 'lab_test_created') elif item.reference_dt == 'Lab Test': lab_test_created = 1 if lab_test_created != 1: template = get_lab_test_template(item.item_code) if template: lab_test = create_lab_test_doc(True, invoice.ref_practitioner, patient, template, invoice.company) if item.reference_dt == 'Lab Prescription': lab_test.prescription = item.reference_dn lab_test.save(ignore_permissions = True) if item.reference_dt != 'Lab Prescription': frappe.db.set_value('Sales Invoice Item', item.name, 'reference_dt', 'Lab Test') frappe.db.set_value('Sales Invoice Item', item.name, 'reference_dn', lab_test.name) if not lab_tests_created: lab_tests_created = lab_test.name else: lab_tests_created += ', ' + lab_test.name return lab_tests_created def get_lab_test_template(item): template_id = frappe.db.exists('Lab Test Template', {'item': item}) if template_id: return frappe.get_doc('Lab Test Template', template_id) return False def create_lab_test_doc(invoiced, practitioner, patient, template, company): lab_test = frappe.new_doc('Lab Test') lab_test.invoiced = invoiced lab_test.practitioner = practitioner lab_test.patient = patient.name lab_test.patient_age = patient.get_age() lab_test.patient_sex = patient.sex lab_test.email = patient.email lab_test.mobile = patient.mobile lab_test.report_preference = patient.report_preference lab_test.department = template.department lab_test.template = template.name lab_test.lab_test_group = template.lab_test_group lab_test.result_date = getdate() lab_test.company = company return lab_test def create_normals(template, lab_test): lab_test.normal_toggle = 1 normal = lab_test.append('normal_test_items') normal.lab_test_name = template.lab_test_name normal.lab_test_uom = template.lab_test_uom normal.secondary_uom = template.secondary_uom normal.conversion_factor = template.conversion_factor normal.normal_range = template.lab_test_normal_range normal.require_result_value = 1 normal.allow_blank = 0 normal.template = template.name def create_compounds(template, lab_test, is_group): lab_test.normal_toggle = 1 for normal_test_template in template.normal_test_templates: normal = lab_test.append('normal_test_items') if is_group: normal.lab_test_event = normal_test_template.lab_test_event else: normal.lab_test_name = normal_test_template.lab_test_event normal.lab_test_uom = normal_test_template.lab_test_uom normal.secondary_uom = normal_test_template.secondary_uom normal.conversion_factor = normal_test_template.conversion_factor normal.normal_range = normal_test_template.normal_range normal.require_result_value = 1 normal.allow_blank = normal_test_template.allow_blank normal.template = template.name def create_descriptives(template, lab_test): lab_test.descriptive_toggle = 1 if template.sensitivity: lab_test.sensitivity_toggle = 1 for descriptive_test_template in template.descriptive_test_templates: descriptive = lab_test.append('descriptive_test_items') descriptive.lab_test_particulars = descriptive_test_template.particulars descriptive.require_result_value = 1 descriptive.allow_blank = descriptive_test_template.allow_blank descriptive.template = template.name def create_sample_doc(template, patient, invoice, company = None): if template.sample: sample_exists = frappe.db.exists({ 'doctype': 'Sample Collection', 'patient': patient.name, 'docstatus': 0, 'sample': template.sample }) if sample_exists: # update sample collection by adding quantity sample_collection = frappe.get_doc('Sample Collection', sample_exists[0][0]) quantity = int(sample_collection.sample_qty) + int(template.sample_qty) if template.sample_details: sample_details = sample_collection.sample_details + '\n-\n' + _('Test: ') sample_details += (template.get('lab_test_name') or template.get('template')) + '\n' sample_details += _('Collection Details: ') + '\n\t' + template.sample_details frappe.db.set_value('Sample Collection', sample_collection.name, 'sample_details', sample_details) frappe.db.set_value('Sample Collection', sample_collection.name, 'sample_qty', quantity) else: # Create Sample Collection for template, copy vals from Invoice sample_collection = frappe.new_doc('Sample Collection') if invoice: sample_collection.invoiced = True sample_collection.patient = patient.name sample_collection.patient_age = patient.get_age() sample_collection.patient_sex = patient.sex sample_collection.sample = template.sample sample_collection.sample_uom = template.sample_uom sample_collection.sample_qty = template.sample_qty sample_collection.company = company if template.sample_details: sample_collection.sample_details = _('Test :') + (template.get('lab_test_name') or template.get('template')) + '\n' + 'Collection Detials:\n\t' + template.sample_details sample_collection.save(ignore_permissions=True) return sample_collection def create_sample_collection(lab_test, template, patient, invoice): if frappe.get_cached_value('Healthcare Settings', None, 'create_sample_collection_for_lab_test'): sample_collection = create_sample_doc(template, patient, invoice, lab_test.company) if sample_collection: lab_test.sample = sample_collection.name sample_collection_doc = get_link_to_form('Sample Collection', sample_collection.name) frappe.msgprint(_('Sample Collection {0} has been created').format(sample_collection_doc), title=_('Sample Collection'), indicator='green') return lab_test def load_result_format(lab_test, template, prescription, invoice): if template.lab_test_template_type == 'Single': create_normals(template, lab_test) elif template.lab_test_template_type == 'Compound': create_compounds(template, lab_test, False) elif template.lab_test_template_type == 'Descriptive': create_descriptives(template, lab_test) elif template.lab_test_template_type == 'Grouped': # Iterate for each template in the group and create one result for all. for lab_test_group in template.lab_test_groups: # Template_in_group = None if lab_test_group.lab_test_template: template_in_group = frappe.get_doc('Lab Test Template', lab_test_group.lab_test_template) if template_in_group: if template_in_group.lab_test_template_type == 'Single': create_normals(template_in_group, lab_test) elif template_in_group.lab_test_template_type == 'Compound': normal_heading = lab_test.append('normal_test_items') normal_heading.lab_test_name = template_in_group.lab_test_name normal_heading.require_result_value = 0 normal_heading.allow_blank = 1 normal_heading.template = template_in_group.name create_compounds(template_in_group, lab_test, True) elif template_in_group.lab_test_template_type == 'Descriptive': descriptive_heading = lab_test.append('descriptive_test_items') descriptive_heading.lab_test_name = template_in_group.lab_test_name descriptive_heading.require_result_value = 0 descriptive_heading.allow_blank = 1 descriptive_heading.template = template_in_group.name create_descriptives(template_in_group, lab_test) else: # Lab Test Group - Add New Line normal = lab_test.append('normal_test_items') normal.lab_test_name = lab_test_group.group_event normal.lab_test_uom = lab_test_group.group_test_uom normal.secondary_uom = lab_test_group.secondary_uom normal.conversion_factor = lab_test_group.conversion_factor normal.normal_range = lab_test_group.group_test_normal_range normal.allow_blank = lab_test_group.allow_blank normal.require_result_value = 1 normal.template = template.name if template.lab_test_template_type != 'No Result': if prescription: lab_test.prescription = prescription if invoice: frappe.db.set_value('Lab Prescription', prescription, 'invoiced', True) lab_test.save(ignore_permissions=True) # Insert the result return lab_test @frappe.whitelist() def get_employee_by_user_id(user_id): emp_id = frappe.db.exists('Employee', { 'user_id': user_id }) if emp_id: return frappe.get_doc('Employee', emp_id) return None @frappe.whitelist() def get_lab_test_prescribed(patient): return frappe.db.sql( ''' select lp.name, lp.lab_test_code, lp.parent, lp.invoiced, pe.practitioner, pe.practitioner_name, pe.encounter_date from `tabPatient Encounter` pe, `tabLab Prescription` lp where pe.patient=%s and lp.parent=pe.name and lp.lab_test_created=0 ''', (patient))
unknown
codeparrot/codeparrot-clean
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # (c) 2018 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # from ansible.module_utils.connection import Connection def get(module, path=None, content=None, fields=None, output='json'): if path is None: raise ValueError('path value must be provided') if content: path += '?' + 'content=%s' % content if fields: path += '?' + 'field=%s' % fields accept = None if output == 'xml': accept = 'application/yang-data+xml' connection = Connection(module._socket_path) return connection.send_request(None, path=path, method='GET', accept=accept) def edit_config(module, path=None, content=None, method='GET', format='json'): if path is None: raise ValueError('path value must be provided') content_type = None if format == 'xml': content_type = 'application/yang-data+xml' connection = Connection(module._socket_path) return connection.send_request(content, path=path, method=method, content_type=content_type)
unknown
codeparrot/codeparrot-clean
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.platform import test class MatrixBandPartTest(test.TestCase): pass # Filled in below def _GetMatrixBandPartTest(dtype_, batch_shape_, shape_): def Test(self): mat = np.ones(shape_).astype(dtype_) batch_mat = np.tile(mat, batch_shape + (1, 1)) with self.test_session(use_gpu=True): for lower in -1, 0, 1, shape_[-2] - 1: for upper in -1, 0, 1, shape_[-1] - 1: band_np = mat if lower >= 0: band_np = np.triu(band_np, -lower) if upper >= 0: band_np = np.tril(band_np, upper) if batch_shape is not (): band_np = np.tile(band_np, batch_shape + (1, 1)) band = array_ops.matrix_band_part(batch_mat, lower, upper) self.assertAllEqual(band_np, band.eval()) return Test class MatrixBandPartGradTest(test.TestCase): pass # Filled in below def _GetMatrixBandPartGradTest(dtype_, batch_shape_, shape_): def Test(self): shape = batch_shape_ + shape_ x = constant_op.constant(np.random.rand(*shape), dtype=dtype_) with self.test_session(use_gpu=True): for lower in -1, 0, 1, shape_[-2] - 1: for upper in -1, 0, 1, shape_[-1] - 1: y = array_ops.matrix_band_part(x, lower, upper) error = gradient_checker.compute_gradient_error( x, x.get_shape().as_list(), y, y.get_shape().as_list()) self.assertLess(error, 1e-4) return Test if __name__ == '__main__': for dtype in np.int32, np.int64, np.float32, np.float64: for batch_shape in ((), (2,), (1, 3, 2)): for rows in 1, 2, 7: for cols in 1, 2, 7: shape = (rows, cols) name = '%s_%s' % (dtype.__name__, '_'.join(map(str, shape))) setattr(MatrixBandPartTest, 'testMatrixBandPart_' + name, _GetMatrixBandPartTest(dtype, batch_shape, shape)) if dtype == np.float32 or dtype == np.float64: setattr(MatrixBandPartGradTest, 'testMatrixBandPartGrad_' + name, _GetMatrixBandPartGradTest(dtype, batch_shape, shape)) test.main()
unknown
codeparrot/codeparrot-clean
from urllib.request import urlopen from urllib.parse import urlencode, quote from json import dumps from codecs import encode from os import getenv from subprocess import check_output def post_n_pin(token, oid, message, link): ## Fallback method post_json = dumps({ "owner_id": oid, "from_group": 1, "message": message, "signed": 0, "attachments": link, "access_token": token }, ensure_ascii=False) pin_json = dumps({ "owner_id": oid, "access_token": token }) vkscript = """ var post = API.wall.post(%s); if (post != null && post.post_id != null) { var post_id = post.post_id; var pin_json = %s; pin_json.post_id = post_id; var result = API.wall.pin(pin_json); return [result, post]; } """ % (post_json, pin_json) data = urlencode({"code": vkscript, "V": "5.65", "access_token": token}, encoding='utf-8', quote_via=quote) return urlopen("https://api.vk.com/method/execute", data.encode('utf-8')) def post_n_pin_app(token, oid, message, link): ## Default publication method data = urlencode({"owner_id": oid, "message": message, "link": link, "access_token": token}).encode('utf-8') return urlopen("https://api.vk.com/method/execute.announce", data) atoken = getenv("VK_TOKEN") group_id = getenv("GROUP_ID") message = check_output(["git", "log", "-1", "--pretty=%B"]) result = post_n_pin_app(atoken, group_id, message, "https://github.com/sbt-community/Starbound_RU/releases/latest/") print(result.read())
unknown
codeparrot/codeparrot-clean
# # This file is part of pyasn1-modules software. # # Created by Russ Housley # Copyright (c) 2019, Vigil Security, LLC # License: http://snmplabs.com/pyasn1/license.html # import sys import unittest from pyasn1.codec.der.decoder import decode as der_decoder from pyasn1.codec.der.encoder import encode as der_encoder from pyasn1_modules import pem from pyasn1_modules import rfc5280 from pyasn1_modules import rfc7229 class CertificatePolicyTestCase(unittest.TestCase): pem_text = """\ MIIDJDCCAqqgAwIBAgIJAKWzVCgbsG5AMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n dXMgQ0EwHhcNMTkxMDEzMTkwNTUzWhcNMjAxMDEyMTkwNTUzWjBTMQswCQYDVQQG EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xJTAjBgNVBAoTHFRF U1QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNi AATwUXZUseiOaqWdrClDCMbp9YFAM87LTmFirygpzKDU9cfqSCg7zBDIphXCwMcS 9zVWDoStCbcvN0jw5CljHcffzpHYX91P88SZRJ1w4hawHjOsWxvM3AkYgZ5nfdlL 7EajggFcMIIBWDAdBgNVHQ4EFgQU8jXbNATapVXyvWkDmbBi7OIVCMEwbwYDVR0j BGgwZoAU8jXbNATapVXyvWkDmbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQsw CQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GC CQDokdYGkU/O8jAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgB hvhCAQ0ENRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3Ig YW55IHB1cnBvc2UuMCEGA1UdIAQaMBgwCgYIKwYBBQUHDQEwCgYIKwYBBQUHDQIw CgYDVR02BAMCAQIwNQYDVR0hBC4wLDAUBggrBgEFBQcNAQYIKwYBBQUHDQcwFAYI KwYBBQUHDQIGCCsGAQUFBw0IMAoGCCqGSM49BAMDA2gAMGUCMHaWskjS7MKQCMcn zEKFOV3LWK8pL57vrECJd8ywKdwBJUNw9HhvSKkfUwL6rjlLpQIxAL2QO3CNoZRP PZs8K3IjUA5+U73pA8lpaTOPscLY22WL9pAGmyVUyEJ8lM7E+r4iDg== """ def setUp(self): self.asn1Spec = rfc5280.Certificate() def testDerCodec(self): test_oids = [ rfc7229.id_TEST_certPolicyOne, rfc7229.id_TEST_certPolicyTwo, rfc7229.id_TEST_certPolicyThree, rfc7229.id_TEST_certPolicyFour, rfc7229.id_TEST_certPolicyFive, rfc7229.id_TEST_certPolicySix, rfc7229.id_TEST_certPolicySeven, rfc7229.id_TEST_certPolicyEight, ] substrate = pem.readBase64fromText(self.pem_text) asn1Object, rest = der_decoder( substrate, asn1Spec=self.asn1Spec) self.assertFalse(rest) self.assertTrue(asn1Object.prettyPrint()) self.assertEqual(substrate, der_encoder(asn1Object)) count = 0 for extn in asn1Object['tbsCertificate']['extensions']: if extn['extnID'] in rfc5280.certificateExtensionsMap.keys(): s = extn['extnValue'] ev, rest = der_decoder( s, rfc5280.certificateExtensionsMap[extn['extnID']]) self.assertFalse(rest) self.assertTrue(ev.prettyPrint()) self.assertEqual(s, der_encoder(ev)) if extn['extnID'] == rfc5280.id_ce_certificatePolicies: for pol in ev: if pol['policyIdentifier'] in test_oids: count += 1 if extn['extnID'] == rfc5280.id_ce_policyMappings: for pmap in ev: if pmap['issuerDomainPolicy'] in test_oids: count += 1 if pmap['subjectDomainPolicy'] in test_oids: count += 1 self.assertEqual(6, count) suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__]) if __name__ == '__main__': result = unittest.TextTestRunner(verbosity=2).run(suite) sys.exit(not result.wasSuccessful())
unknown
codeparrot/codeparrot-clean
import base64 class Base64Converter: regex = r"[a-zA-Z0-9+/]*={0,2}" def to_python(self, value): return base64.b64decode(value) def to_url(self, value): return base64.b64encode(value).decode("ascii") class DynamicConverter: _dynamic_to_python = None _dynamic_to_url = None @property def regex(self): return r"[0-9a-zA-Z]+" @regex.setter def regex(self): raise Exception("You can't modify the regular expression.") def to_python(self, value): return type(self)._dynamic_to_python(value) def to_url(self, value): return type(self)._dynamic_to_url(value) @classmethod def register_to_python(cls, value): cls._dynamic_to_python = value @classmethod def register_to_url(cls, value): cls._dynamic_to_url = value
python
github
https://github.com/django/django
tests/urlpatterns/converters.py
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- from __future__ import absolute_import class AbstractBackboneStorage(object): """ Abstract base class for Backbone model stores. Modeled after Redis API. """ def mget(self, doc_keys): raise NotImplementedError("abstract method") def mset(self, data): raise NotImplementedError("abstract method") def sadd(self, doc_key, *keys): raise NotImplementedError("abstract method") def srem(self, doc_key, member_key): raise NotImplementedError("abstract method") def smembers(self, doc_key): raise NotImplementedError("abstract method") def set(self, key, data): raise NotImplementedError("abstract method") def delete(self, key): raise NotImplementedError("abstract method")
unknown
codeparrot/codeparrot-clean
""" A HTML 4.0 target. """ from targets import _ import targets from config import HTML_LOWER NAME = _('HTML page') TYPE = 'html' HEADER = """\ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <HTML> <HEAD> <META NAME="generator" CONTENT="http://txt2tags.org"> <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=%(ENCODING)s"> <LINK REL="stylesheet" TYPE="text/css" HREF="%(STYLE)s"> <TITLE>%(HEADER1)s</TITLE> </HEAD><BODY BGCOLOR="white" TEXT="black"> <CENTER> <H1>%(HEADER1)s</H1> <FONT SIZE="4"><I>%(HEADER2)s</I></FONT><BR> <FONT SIZE="4">%(HEADER3)s</FONT> </CENTER> """ HEADERCSS = """\ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <HTML> <HEAD> <META NAME="generator" CONTENT="http://txt2tags.org"> <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=%(ENCODING)s"> <LINK REL="stylesheet" TYPE="text/css" HREF="%(STYLE)s"> <TITLE>%(HEADER1)s</TITLE> </HEAD> <BODY> <DIV CLASS="header" ID="header"> <H1>%(HEADER1)s</H1> <H2>%(HEADER2)s</H2> <H3>%(HEADER3)s</H3> </DIV> """ TAGS = { 'paragraphOpen' : '<P>' , 'paragraphClose' : '</P>' , 'title1' : '<H1~A~>\a</H1>' , 'title2' : '<H2~A~>\a</H2>' , 'title3' : '<H3~A~>\a</H3>' , 'title4' : '<H4~A~>\a</H4>' , 'title5' : '<H5~A~>\a</H5>' , 'anchor' : ' ID="\a"', 'blockVerbOpen' : '<PRE>' , 'blockVerbClose' : '</PRE>' , 'blockQuoteOpen' : '<BLOCKQUOTE>' , 'blockQuoteClose' : '</BLOCKQUOTE>' , 'fontMonoOpen' : '<CODE>' , 'fontMonoClose' : '</CODE>' , 'fontBoldOpen' : '<B>' , 'fontBoldClose' : '</B>' , 'fontItalicOpen' : '<I>' , 'fontItalicClose' : '</I>' , 'fontUnderlineOpen' : '<U>' , 'fontUnderlineClose' : '</U>' , 'fontStrikeOpen' : '<S>' , 'fontStrikeClose' : '</S>' , 'listOpen' : '<UL>' , 'listClose' : '</UL>' , 'listItemOpen' : '<LI>' , 'numlistOpen' : '<OL>' , 'numlistClose' : '</OL>' , 'numlistItemOpen' : '<LI>' , 'deflistOpen' : '<DL>' , 'deflistClose' : '</DL>' , 'deflistItem1Open' : '<DT>' , 'deflistItem1Close' : '</DT>' , 'deflistItem2Open' : '<DD>' , 'bar1' : '<HR NOSHADE SIZE=1>' , 'bar2' : '<HR NOSHADE SIZE=5>' , 'url' : '<A HREF="\a">\a</A>' , 'urlMark' : '<A HREF="\a">\a</A>' , 'email' : '<A HREF="mailto:\a">\a</A>' , 'emailMark' : '<A HREF="mailto:\a">\a</A>' , 'img' : '<IMG~A~ SRC="\a" BORDER="0" ALT="">', 'imgEmbed' : '<IMG~A~ SRC="\a" BORDER="0" ALT="">', '_imgAlignLeft' : ' ALIGN="left"' , '_imgAlignCenter' : ' ALIGN="middle"', '_imgAlignRight' : ' ALIGN="right"' , 'tableOpen' : '<TABLE~A~~B~ CELLPADDING="4">', 'tableClose' : '</TABLE>' , 'tableRowOpen' : '<TR>' , 'tableRowClose' : '</TR>' , 'tableCellOpen' : '<TD~A~~S~>' , 'tableCellClose' : '</TD>' , 'tableTitleCellOpen' : '<TH~S~>' , 'tableTitleCellClose' : '</TH>' , '_tableBorder' : ' BORDER="1"' , '_tableAlignCenter' : ' ALIGN="center"', '_tableCellAlignRight' : ' ALIGN="right"' , '_tableCellAlignCenter': ' ALIGN="center"', '_tableCellColSpan' : ' COLSPAN="\a"' , 'cssOpen' : '<STYLE TYPE="text/css">', 'cssClose' : '</STYLE>' , 'comment' : '<!-- \a -->' , 'EOD' : '</BODY></HTML>' } if targets.CONF['css-sugar']: # Table with no cellpadding TAGS['tableOpen'] = TAGS['tableOpen'].replace(' CELLPADDING="4"', '') # DIVs TAGS['tocOpen'] = '<DIV CLASS="toc">' TAGS['tocClose'] = '</DIV>' TAGS['bodyOpen'] = '<DIV CLASS="body" ID="body">' TAGS['bodyClose'] = '</DIV>' # Some like HTML tags as lowercase, some don't... (headers out) if HTML_LOWER: for tag in TAGS: TAGS[tag] = TAGS[tag].lower() RULES = { 'escapexmlchars': 1, 'indentverbblock': 1, 'linkable': 1, 'stylable': 1, 'escapeurl': 1, 'imglinkable': 1, 'imgalignable': 1, 'imgasdefterm': 1, 'autonumberlist': 1, 'spacedlistitem': 1, 'parainsidelist': 1, 'tableable': 1, 'tablecellstrip': 1, 'breaktablecell': 1, 'breaktablelineopen': 1, 'keeplistindent': 1, 'keepquoteindent': 1, 'barinsidequote': 1, 'autotocwithbars': 1, 'tablecellspannable': 1, 'tablecellaligntype': 'cell', # 'blanksaroundpara': 1, 'blanksaroundverb': 1, # 'blanksaroundquote': 1, 'blanksaroundlist': 1, 'blanksaroundnumlist': 1, 'blanksarounddeflist': 1, 'blanksaroundtable': 1, 'blanksaroundbar': 1, 'blanksaroundtitle': 1, 'blanksaroundnumtitle': 1, 'confdependenttags':1, 'confdependentrules':1, } if targets.CONF['css-sugar']: RULES['indentverbblock'] = 0 RULES['autotocwithbars'] = 0
unknown
codeparrot/codeparrot-clean
# Test: random name: pandas-dev channels: - conda-forge dependencies: - python=3.8 # build dependencies - versioneer[toml] - cython>=0.29.32 # test dependencies - pytest>=7.3.2 - pytest-cov - pytest-xdist>=3.4.0 - psutil - boto3 # required dependencies - python-dateutil - numpy - pytz # optional dependencies - beautifulsoup4 - bottleneck>=1.3.2 - fastparquet>=0.6.3 - fsspec>=2021.07.0 - html5lib>=1.1 - hypothesis - gcsfs>=2021.07.0 - jinja2 - lxml>=4.6.3 - matplotlib>=3.6.1 - numba - numexpr>=2.7.3 - openpyxl>=3.0.7 - odfpy>=1.4.1 - psycopg2 - pyarrow<11, >=7.0.0 - pymysql>=1.1.0 - pyreadstat>=1.1.2 - pytables>=3.6.1 - python-calamine>=0.1.7 - pyxlsb>=1.0.8 - s3fs>=2021.08.0 - scipy>=1.7.1 - sqlalchemy>=1.4.16 - tabulate>=0.8.9 - xarray>=0.21.0 - xlrd>=2.0.1 - xlsxwriter>=1.4.3 - zstandard>=0.15.2
unknown
github
https://github.com/pandas-dev/pandas
scripts/tests/data/deps_unmodified_random.yaml
/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cronjob import ( "context" "fmt" "reflect" "sort" "strings" "testing" "time" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2/ktesting" "k8s.io/utils/ptr" _ "k8s.io/kubernetes/pkg/apis/batch/install" _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/kubernetes/pkg/controller" ) var ( shortDead int64 = 10 mediumDead int64 = 2 * 60 * 60 longDead int64 = 1000000 noDead int64 = -12345 errorSchedule = "obvious error schedule" // schedule is hourly on the hour onTheHour = "0 * * * ?" everyHour = "@every 1h" errorTimeZone = "bad timezone" newYork = "America/New_York" ) // returns a cronJob with some fields filled in. func cronJob() batchv1.CronJob { return batchv1.CronJob{ ObjectMeta: metav1.ObjectMeta{ Name: "mycronjob", Namespace: "snazzycats", UID: types.UID("1a2b3c"), CreationTimestamp: metav1.Time{Time: justBeforeTheHour()}, }, Spec: batchv1.CronJobSpec{ Schedule: "* * * * ?", ConcurrencyPolicy: "Allow", JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, } } func jobSpec() batchv1.JobSpec { one := int32(1) return batchv1.JobSpec{ Parallelism: &one, Completions: &one, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "foo": "bar", }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ {Image: "foo/bar"}, }, }, }, } } func justASecondBeforeTheHour() time.Time { T1, err := time.Parse(time.RFC3339, "2016-05-19T09:59:59Z") if err != nil { panic("test setup error") } return T1 } func justAfterThePriorHour() time.Time { T1, err := time.Parse(time.RFC3339, "2016-05-19T09:01:00Z") if err != nil { panic("test setup error") } return T1 } func justBeforeThePriorHour() time.Time { T1, err := time.Parse(time.RFC3339, "2016-05-19T08:59:00Z") if err != nil { panic("test setup error") } return T1 } func justAfterTheHour() *time.Time { T1, err := time.Parse(time.RFC3339, "2016-05-19T10:01:00Z") if err != nil { panic("test setup error") } return &T1 } func justAfterTheHourInZone(tz string) time.Time { location, err := time.LoadLocation(tz) if err != nil { panic("tz error: " + err.Error()) } T1, err := time.ParseInLocation(time.RFC3339, "2016-05-19T10:01:00Z", location) if err != nil { panic("test setup error: " + err.Error()) } return T1 } func justBeforeTheHour() time.Time { T1, err := time.Parse(time.RFC3339, "2016-05-19T09:59:00Z") if err != nil { panic("test setup error") } return T1 } func justBeforeTheNextHour() time.Time { T1, err := time.Parse(time.RFC3339, "2016-05-19T10:59:00Z") if err != nil { panic("test setup error") } return T1 } func weekAfterTheHour() time.Time { T1, err := time.Parse(time.RFC3339, "2016-05-26T10:00:00Z") if err != nil { panic("test setup error") } return T1 } func TestControllerV2SyncCronJob(t *testing.T) { // Check expectations on deadline parameters if shortDead/60/60 >= 1 { t.Errorf("shortDead should be less than one hour") } if mediumDead/60/60 < 1 || mediumDead/60/60 >= 24 { t.Errorf("mediumDead should be between one hour and one day") } if longDead/60/60/24 < 10 { t.Errorf("longDead should be at least ten days") } testCases := map[string]struct { // cj spec concurrencyPolicy batchv1.ConcurrencyPolicy suspend bool schedule string timeZone *string deadline int64 successfulJobsHistoryLimit *int32 // cj status ranPreviously bool stillActive bool // environment cronjobCreationTime time.Time jobCreationTime time.Time lastScheduleTime time.Time now time.Time jobCreateError error jobGetErr error // expectations expectCreate bool expectDelete bool expectCompleted bool expectActive int expectedWarnings int expectErr bool expectRequeueAfter bool expectedRequeueDuration time.Duration expectUpdateStatus bool jobStillNotFoundInLister bool jobPresentInCJActiveStatus bool }{ "never ran, not valid schedule, A": { concurrencyPolicy: "Allow", schedule: errorSchedule, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectedWarnings: 1, jobPresentInCJActiveStatus: true, }, "never ran, not valid schedule, F": { concurrencyPolicy: "Forbid", schedule: errorSchedule, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectedWarnings: 1, jobPresentInCJActiveStatus: true, }, "never ran, not valid schedule, R": { concurrencyPolicy: "Forbid", schedule: errorSchedule, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectedWarnings: 1, jobPresentInCJActiveStatus: true, }, "never ran, not valid time zone": { concurrencyPolicy: "Allow", schedule: onTheHour, timeZone: &errorTimeZone, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectedWarnings: 1, jobPresentInCJActiveStatus: true, }, "never ran, not time, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true}, "never ran, not time, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "never ran, not time, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "never ran, not time in zone": { concurrencyPolicy: "Allow", schedule: onTheHour, timeZone: &newYork, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "never ran, is time, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "never ran, is time, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "never ran, is time, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "never ran, is time in zone, but time zone disabled": { concurrencyPolicy: "Allow", schedule: onTheHour, timeZone: &newYork, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justAfterTheHourInZone(newYork), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "never ran, is time in zone": { concurrencyPolicy: "Allow", schedule: onTheHour, timeZone: &newYork, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justAfterTheHourInZone(newYork), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "never ran, is time in zone, but TZ is also set in schedule": { concurrencyPolicy: "Allow", schedule: "TZ=UTC " + onTheHour, timeZone: &newYork, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: justAfterTheHourInZone(newYork), expectCreate: true, expectedWarnings: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "never ran, is time, suspended": { concurrencyPolicy: "Allow", suspend: true, schedule: onTheHour, deadline: noDead, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), jobPresentInCJActiveStatus: true, }, "never ran, is time, past deadline": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: shortDead, jobCreationTime: justAfterThePriorHour(), now: justAfterTheHour().Add(time.Minute * time.Duration(shortDead+1)), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute - time.Minute*time.Duration(shortDead+1) + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "never ran, is time, not past deadline": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: longDead, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "prev ran but done, not time, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, not time, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, not time, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, is time, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, is time, create job failed, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), jobCreateError: errors.NewAlreadyExists(schema.GroupResource{Resource: "job", Group: "batch"}, ""), expectErr: false, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, is time, job not present in CJ active status, create job failed, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), jobCreateError: errors.NewAlreadyExists(schema.GroupResource{Resource: "job", Group: "batch"}, ""), expectErr: false, expectUpdateStatus: true, jobPresentInCJActiveStatus: false, }, "prev ran but done, is time, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, is time, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, is time, suspended": { concurrencyPolicy: "Allow", suspend: true, schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, is time, past deadline": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: shortDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, is time, not past deadline": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: longDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "still active, not time, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "still active, not time, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: noDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "still active, not time, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: noDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: justBeforeTheHour(), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "still active, is time, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 2, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "still active, is time, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: noDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "still active, is time, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: noDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectDelete: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "still active, is time, get job failed, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: noDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), jobGetErr: errors.NewBadRequest("request is invalid"), expectActive: 1, expectedWarnings: 1, jobPresentInCJActiveStatus: true, }, "still active, is time, suspended": { concurrencyPolicy: "Allow", suspend: true, schedule: onTheHour, deadline: noDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectActive: 1, jobPresentInCJActiveStatus: true, }, "still active, is time, past deadline": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: shortDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "still active, is time, not past deadline": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: longDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 2, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, // Controller should fail to schedule these, as there are too many missed starting times // and either no deadline or a too long deadline. "prev ran but done, long overdue, not past deadline, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: longDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectedWarnings: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, not past deadline, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: longDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectedWarnings: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, not past deadline, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: longDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectedWarnings: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, no deadline, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectedWarnings: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, no deadline, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectedWarnings: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, no deadline, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectedWarnings: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, past medium deadline, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: mediumDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, past short deadline, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: shortDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, past medium deadline, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: mediumDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, past short deadline, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: shortDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, past medium deadline, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: mediumDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "prev ran but done, long overdue, past short deadline, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: shortDead, ranPreviously: true, jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, // Tests for time skews // the controller sees job is created, takes no actions "this ran but done, time drifted back, F": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: noDead, ranPreviously: true, jobCreationTime: *justAfterTheHour(), now: justBeforeTheHour(), jobCreateError: errors.NewAlreadyExists(schema.GroupResource{Resource: "jobs", Group: "batch"}, ""), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, }, // Tests for slow job lister "this started but went missing, not past deadline, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: longDead, ranPreviously: true, stillActive: true, jobCreationTime: topOfTheHour().Add(time.Millisecond * 100), now: justAfterTheHour().Add(time.Millisecond * 100), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute - time.Millisecond*100 + nextScheduleDelta, jobStillNotFoundInLister: true, jobPresentInCJActiveStatus: true, }, "this started but went missing, not past deadline, f": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: longDead, ranPreviously: true, stillActive: true, jobCreationTime: topOfTheHour().Add(time.Millisecond * 100), now: justAfterTheHour().Add(time.Millisecond * 100), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute - time.Millisecond*100 + nextScheduleDelta, jobStillNotFoundInLister: true, jobPresentInCJActiveStatus: true, }, "this started but went missing, not past deadline, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: longDead, ranPreviously: true, stillActive: true, jobCreationTime: topOfTheHour().Add(time.Millisecond * 100), now: justAfterTheHour().Add(time.Millisecond * 100), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute - time.Millisecond*100 + nextScheduleDelta, jobStillNotFoundInLister: true, jobPresentInCJActiveStatus: true, }, // Tests for slow cronjob list "this started but is not present in cronjob active list, not past deadline, A": { concurrencyPolicy: "Allow", schedule: onTheHour, deadline: longDead, ranPreviously: true, stillActive: true, jobCreationTime: topOfTheHour().Add(time.Millisecond * 100), now: justAfterTheHour().Add(time.Millisecond * 100), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute - time.Millisecond*100 + nextScheduleDelta, }, "this started but is not present in cronjob active list, not past deadline, f": { concurrencyPolicy: "Forbid", schedule: onTheHour, deadline: longDead, ranPreviously: true, stillActive: true, jobCreationTime: topOfTheHour().Add(time.Millisecond * 100), now: justAfterTheHour().Add(time.Millisecond * 100), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute - time.Millisecond*100 + nextScheduleDelta, }, "this started but is not present in cronjob active list, not past deadline, R": { concurrencyPolicy: "Replace", schedule: onTheHour, deadline: longDead, ranPreviously: true, stillActive: true, jobCreationTime: topOfTheHour().Add(time.Millisecond * 100), now: justAfterTheHour().Add(time.Millisecond * 100), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute - time.Millisecond*100 + nextScheduleDelta, }, // Tests for @every-style schedule "with @every schedule, never ran, not time": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: noDead, cronjobCreationTime: justBeforeTheHour(), jobCreationTime: justBeforeTheHour(), now: *topOfTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "with @every schedule, never ran, is time": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: noDead, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeThePriorHour(), now: justBeforeTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour + nextScheduleDelta, jobPresentInCJActiveStatus: true, expectCreate: true, expectActive: 1, expectUpdateStatus: true, }, "with @every schedule, never ran, is time, past deadline": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: shortDead, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeThePriorHour(), now: justBeforeTheHour().Add(time.Second * time.Duration(shortDead+1)), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - time.Second*time.Duration(shortDead+1) + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "with @every schedule, never ran, is time, not past deadline": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: longDead, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeThePriorHour(), now: justBeforeTheHour().Add(time.Second * time.Duration(shortDead-1)), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - time.Second*time.Duration(shortDead-1) + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "with @every schedule, prev ran but done, not time": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: noDead, ranPreviously: true, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeThePriorHour(), lastScheduleTime: justBeforeTheHour(), now: *topOfTheHour(), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "with @every schedule, prev ran but done, is time": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: noDead, ranPreviously: true, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeThePriorHour(), lastScheduleTime: justBeforeTheHour(), now: topOfTheHour().Add(1 * time.Hour), expectCreate: true, expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "with @every schedule, prev ran but done, is time, past deadline": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: shortDead, ranPreviously: true, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeThePriorHour(), lastScheduleTime: justBeforeTheHour(), now: justBeforeTheNextHour().Add(time.Second * time.Duration(shortDead+1)), expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - time.Second*time.Duration(shortDead+1) + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, // This test will fail: the logic around StartingDeadlineSecond in getNextScheduleTime messes up // the time that calculating schedule.Next(earliestTime) is based on. While this works perfectly // well for classic cron scheduled, with @every X, schedule.Next(earliestTime) just returns the time // offset by X relative to the earliestTime. // "with @every schedule, prev ran but done, is time, not past deadline": { // concurrencyPolicy: "Allow", // schedule: everyHour, // deadline: shortDead, // ranPreviously: true, // cronjobCreationTime: justBeforeThePriorHour(), // jobCreationTime: justBeforeThePriorHour(), // lastScheduleTime: justBeforeTheHour(), // now: justBeforeTheNextHour().Add(time.Second * time.Duration(shortDead-1)), // expectCreate: true, // expectActive: 1, // expectRequeueAfter: true, // expectedRequeueDuration: 1*time.Hour - time.Second*time.Duration(shortDead-1) + nextScheduleDelta, // expectUpdateStatus: true, // jobPresentInCJActiveStatus: true, // }, "with @every schedule, still active, not time": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: noDead, ranPreviously: true, stillActive: true, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeTheHour(), lastScheduleTime: justBeforeTheHour(), now: *topOfTheHour(), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 1*time.Minute + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "with @every schedule, still active, is time": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: noDead, ranPreviously: true, stillActive: true, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeThePriorHour(), lastScheduleTime: justBeforeThePriorHour(), now: *justAfterTheHour(), expectCreate: true, expectActive: 2, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - 2*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "with @every schedule, still active, is time, past deadline": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: shortDead, ranPreviously: true, stillActive: true, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeTheHour(), lastScheduleTime: justBeforeTheHour(), now: justBeforeTheNextHour().Add(time.Second * time.Duration(shortDead+1)), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - time.Second*time.Duration(shortDead+1) + nextScheduleDelta, jobPresentInCJActiveStatus: true, }, "with @every schedule, still active, is time, not past deadline": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: longDead, ranPreviously: true, stillActive: true, cronjobCreationTime: justBeforeThePriorHour(), jobCreationTime: justBeforeTheHour(), lastScheduleTime: justBeforeTheHour(), now: justBeforeTheNextHour().Add(time.Second * time.Duration(shortDead-1)), expectCreate: true, expectActive: 2, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - time.Second*time.Duration(shortDead-1) + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, }, "with @every schedule, prev ran but done, long overdue, no deadline": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: noDead, ranPreviously: true, cronjobCreationTime: justAfterThePriorHour(), lastScheduleTime: *justAfterTheHour(), jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour(), expectCreate: true, expectActive: 1, expectedWarnings: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Minute + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "with @every schedule, prev ran but done, long overdue, past deadline": { concurrencyPolicy: "Allow", schedule: everyHour, deadline: shortDead, ranPreviously: true, cronjobCreationTime: justAfterThePriorHour(), lastScheduleTime: *justAfterTheHour(), jobCreationTime: justAfterThePriorHour(), now: weekAfterTheHour().Add(1 * time.Minute).Add(time.Second * time.Duration(shortDead+1)), expectActive: 1, expectRequeueAfter: true, expectedRequeueDuration: 1*time.Hour - time.Second*time.Duration(shortDead+1) + nextScheduleDelta, expectUpdateStatus: true, jobPresentInCJActiveStatus: true, expectCompleted: true, }, "do nothing if the namespace is terminating": { jobCreateError: &errors.StatusError{ErrStatus: metav1.Status{Details: &metav1.StatusDetails{Causes: []metav1.StatusCause{ { Type: v1.NamespaceTerminatingCause, Message: fmt.Sprintf("namespace %s is being terminated", metav1.NamespaceDefault), Field: "metadata.namespace", }}}}}, concurrencyPolicy: "Allow", schedule: onTheHour, deadline: noDead, ranPreviously: true, stillActive: true, jobCreationTime: justAfterThePriorHour(), now: *justAfterTheHour(), expectActive: 0, expectRequeueAfter: false, expectUpdateStatus: false, expectErr: true, jobPresentInCJActiveStatus: false, }, "set lastsuccessfultime if successfulJobHistoryLimit is zero": { successfulJobsHistoryLimit: ptr.To[int32](0), ranPreviously: true, schedule: onTheHour, expectUpdateStatus: true, expectCompleted: true, jobPresentInCJActiveStatus: true, }, "set lastsuccessfultime if successfulJobHistoryLimit is ten": { successfulJobsHistoryLimit: ptr.To[int32](10), ranPreviously: true, schedule: onTheHour, expectUpdateStatus: true, expectCompleted: true, jobPresentInCJActiveStatus: true, }, "set lastsuccessfultime if successfulJobHistoryLimit is nil": { ranPreviously: true, schedule: onTheHour, expectUpdateStatus: true, expectCompleted: true, jobPresentInCJActiveStatus: true, }, } for name, tc := range testCases { name := name tc := tc t.Run(name, func(t *testing.T) { cj := cronJob() cj.Spec.ConcurrencyPolicy = tc.concurrencyPolicy cj.Spec.Suspend = &tc.suspend cj.Spec.Schedule = tc.schedule cj.Spec.TimeZone = tc.timeZone cj.Spec.SuccessfulJobsHistoryLimit = tc.successfulJobsHistoryLimit if tc.deadline != noDead { cj.Spec.StartingDeadlineSeconds = &tc.deadline } var ( job *batchv1.Job err error ) js := []*batchv1.Job{} realCJ := cj.DeepCopy() if tc.ranPreviously { cj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeThePriorHour()} if !tc.cronjobCreationTime.IsZero() { cj.ObjectMeta.CreationTimestamp = metav1.Time{Time: tc.cronjobCreationTime} } cj.Status.LastScheduleTime = &metav1.Time{Time: justAfterThePriorHour()} if !tc.lastScheduleTime.IsZero() { cj.Status.LastScheduleTime = &metav1.Time{Time: tc.lastScheduleTime} } job, err = getJobFromTemplate2(&cj, tc.jobCreationTime) if err != nil { t.Fatalf("%s: unexpected error creating a job from template: %v", name, err) } job.UID = "1234" job.Namespace = cj.Namespace ref, err := getRef(job) if err != nil { t.Fatalf("%s: unexpected error getting the job object reference: %v", name, err) } if tc.jobPresentInCJActiveStatus { cj.Status.Active = []v1.ObjectReference{*ref} } if tc.stillActive { realCJ.Status.Active = []v1.ObjectReference{*ref} if !tc.jobStillNotFoundInLister { js = append(js, job) } } else { job.Status.CompletionTime = &metav1.Time{Time: job.ObjectMeta.CreationTimestamp.Add(time.Second * 10)} job.Status.Conditions = append(job.Status.Conditions, batchv1.JobCondition{ Type: batchv1.JobComplete, Status: v1.ConditionTrue, }) if !tc.jobStillNotFoundInLister { js = append(js, job) } } } else { cj.ObjectMeta.CreationTimestamp = metav1.Time{Time: justBeforeTheHour()} if !tc.cronjobCreationTime.IsZero() { cj.ObjectMeta.CreationTimestamp = metav1.Time{Time: tc.cronjobCreationTime} } if tc.stillActive { t.Errorf("%s: test setup error: this case makes no sense", name) } } jc := &fakeJobControl{Job: job, CreateErr: tc.jobCreateError, Err: tc.jobGetErr} cjc := &fakeCJControl{CronJob: realCJ} recorder := record.NewFakeRecorder(10) jm := ControllerV2{ jobControl: jc, cronJobControl: cjc, recorder: recorder, now: func() time.Time { return tc.now }, } cjCopy := cj.DeepCopy() requeueAfter, updateStatus, err := jm.syncCronJob(context.TODO(), cjCopy, js) if tc.expectErr && err == nil { t.Errorf("%s: expected error got none with requeueAfter time: %#v", name, requeueAfter) } if tc.expectRequeueAfter { if !reflect.DeepEqual(requeueAfter, &tc.expectedRequeueDuration) { t.Errorf("%s: expected requeueAfter: %+v, got requeueAfter time: %+v", name, tc.expectedRequeueDuration, requeueAfter) } } if updateStatus != tc.expectUpdateStatus { t.Errorf("%s: expected updateStatus: %t, actually: %t", name, tc.expectUpdateStatus, updateStatus) } expectedCreates := 0 if tc.expectCreate { expectedCreates = 1 } if tc.ranPreviously && !tc.stillActive { completionTime := tc.jobCreationTime.Add(10 * time.Second) if cjCopy.Status.LastSuccessfulTime == nil || !cjCopy.Status.LastSuccessfulTime.Time.Equal(completionTime) { t.Errorf("cj.status.lastSuccessfulTime: %s expected, got %#v", completionTime, cj.Status.LastSuccessfulTime) } } if len(jc.Jobs) != expectedCreates { t.Errorf("%s: expected %d job started, actually %v", name, expectedCreates, len(jc.Jobs)) } for i := range jc.Jobs { job := &jc.Jobs[i] controllerRef := metav1.GetControllerOf(job) if controllerRef == nil { t.Errorf("%s: expected job to have ControllerRef: %#v", name, job) } else { if got, want := controllerRef.APIVersion, "batch/v1"; got != want { t.Errorf("%s: controllerRef.APIVersion = %q, want %q", name, got, want) } if got, want := controllerRef.Kind, "CronJob"; got != want { t.Errorf("%s: controllerRef.Kind = %q, want %q", name, got, want) } if got, want := controllerRef.Name, cj.Name; got != want { t.Errorf("%s: controllerRef.Name = %q, want %q", name, got, want) } if got, want := controllerRef.UID, cj.UID; got != want { t.Errorf("%s: controllerRef.UID = %q, want %q", name, got, want) } if controllerRef.Controller == nil || *controllerRef.Controller != true { t.Errorf("%s: controllerRef.Controller is not set to true", name) } } } expectedDeletes := 0 if tc.expectDelete { expectedDeletes = 1 } if len(jc.DeleteJobName) != expectedDeletes { t.Errorf("%s: expected %d job deleted, actually %v", name, expectedDeletes, len(jc.DeleteJobName)) } // Status update happens once when ranging through job list, and another one if create jobs. expectUpdates := 1 expectedEvents := 0 if tc.expectCreate { expectedEvents++ expectUpdates++ } if tc.expectDelete { expectedEvents++ } if tc.expectCompleted { expectedEvents++ } if name == "still active, is time, F" { // this is the only test case where we would raise an event for not scheduling expectedEvents++ } expectedEvents += tc.expectedWarnings if len(recorder.Events) != expectedEvents { t.Errorf("%s: expected %d event, actually %v", name, expectedEvents, len(recorder.Events)) } numWarnings := 0 for i := 1; i <= len(recorder.Events); i++ { e := <-recorder.Events if strings.HasPrefix(e, v1.EventTypeWarning) { numWarnings++ } } if numWarnings != tc.expectedWarnings { t.Errorf("%s: expected %d warnings, actually %v", name, tc.expectedWarnings, numWarnings) } if len(cjc.Updates) == expectUpdates && tc.expectActive != len(cjc.Updates[expectUpdates-1].Status.Active) { t.Errorf("%s: expected Active size %d, got %d", name, tc.expectActive, len(cjc.Updates[expectUpdates-1].Status.Active)) } if &cj == cjCopy { t.Errorf("syncCronJob is not creating a copy of the original cronjob") } }) } } type fakeQueue struct { workqueue.TypedRateLimitingInterface[string] delay time.Duration key interface{} } func (f *fakeQueue) AddAfter(key string, delay time.Duration) { f.delay = delay f.key = key } // this test will take around 61 seconds to complete func TestControllerV2UpdateCronJob(t *testing.T) { tests := []struct { name string oldCronJob *batchv1.CronJob newCronJob *batchv1.CronJob expectedDelay time.Duration }{ { name: "spec.template changed", oldCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, }, newCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "foo"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, }, expectedDelay: 0 * time.Second, }, { name: "spec.schedule changed", oldCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ Schedule: "30 * * * *", JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, Status: batchv1.CronJobStatus{ LastScheduleTime: &metav1.Time{Time: justBeforeTheHour()}, }, }, newCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ Schedule: "*/1 * * * *", JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "foo"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, Status: batchv1.CronJobStatus{ LastScheduleTime: &metav1.Time{Time: justBeforeTheHour()}, }, }, expectedDelay: 1*time.Second + nextScheduleDelta, }, { name: "spec.schedule with @every changed - cadence decrease", oldCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ Schedule: "@every 1m", JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, Status: batchv1.CronJobStatus{ LastScheduleTime: &metav1.Time{Time: justBeforeTheHour()}, }, }, newCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ Schedule: "@every 3m", JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "foo"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, Status: batchv1.CronJobStatus{ LastScheduleTime: &metav1.Time{Time: justBeforeTheHour()}, }, }, expectedDelay: 2*time.Minute + 1*time.Second + nextScheduleDelta, }, { name: "spec.schedule with @every changed - cadence increase", oldCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ Schedule: "@every 3m", JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, Status: batchv1.CronJobStatus{ LastScheduleTime: &metav1.Time{Time: justBeforeTheHour()}, }, }, newCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ Schedule: "@every 1m", JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "foo"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, Status: batchv1.CronJobStatus{ LastScheduleTime: &metav1.Time{Time: justBeforeTheHour()}, }, }, expectedDelay: 1*time.Second + nextScheduleDelta, }, { name: "spec.timeZone not changed", oldCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ TimeZone: &newYork, JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, }, newCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ TimeZone: &newYork, JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "foo"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, }, expectedDelay: 0 * time.Second, }, { name: "spec.timeZone changed", oldCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ TimeZone: &newYork, JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "b"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, }, newCronJob: &batchv1.CronJob{ Spec: batchv1.CronJobSpec{ TimeZone: nil, JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a": "foo"}, Annotations: map[string]string{"x": "y"}, }, Spec: jobSpec(), }, }, }, expectedDelay: 0 * time.Second, }, // TODO: Add more test cases for updating scheduling. } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { logger, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() kubeClient := fake.NewSimpleClientset() sharedInformers := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) jm, err := NewControllerV2(ctx, sharedInformers.Batch().V1().Jobs(), sharedInformers.Batch().V1().CronJobs(), kubeClient) if err != nil { t.Errorf("unexpected error %v", err) return } jm.now = justASecondBeforeTheHour queue := &fakeQueue{TypedRateLimitingInterface: workqueue.NewTypedRateLimitingQueueWithConfig( workqueue.DefaultTypedControllerRateLimiter[string](), workqueue.TypedRateLimitingQueueConfig[string]{ Name: "test-update-cronjob", }, )} jm.queue = queue jm.jobControl = &fakeJobControl{} jm.cronJobControl = &fakeCJControl{} jm.recorder = record.NewFakeRecorder(10) jm.updateCronJob(logger, tt.oldCronJob, tt.newCronJob) if queue.delay.Seconds() != tt.expectedDelay.Seconds() { t.Errorf("Expected delay %#v got %#v", tt.expectedDelay.Seconds(), queue.delay.Seconds()) } }) } } func TestControllerV2GetJobsToBeReconciled(t *testing.T) { trueRef := true tests := []struct { name string cronJob *batchv1.CronJob jobs []runtime.Object expected []*batchv1.Job }{ { name: "test getting jobs in namespace without controller reference", cronJob: &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Namespace: "foo-ns", Name: "fooer"}}, jobs: []runtime.Object{ &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns"}}, &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: "foo-ns"}}, &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "foo2", Namespace: "foo-ns"}}, }, expected: []*batchv1.Job{}, }, { name: "test getting jobs in namespace with a controller reference", cronJob: &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Namespace: "foo-ns", Name: "fooer"}}, jobs: []runtime.Object{ &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns"}}, &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: "foo-ns", OwnerReferences: []metav1.OwnerReference{{Name: "fooer", Controller: &trueRef}}}}, &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "foo2", Namespace: "foo-ns"}}, }, expected: []*batchv1.Job{ {ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: "foo-ns", OwnerReferences: []metav1.OwnerReference{{Name: "fooer", Controller: &trueRef}}}}, }, }, { name: "test getting jobs in other namespaces", cronJob: &batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{Namespace: "foo-ns", Name: "fooer"}}, jobs: []runtime.Object{ &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar-ns"}}, &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: "bar-ns"}}, &batchv1.Job{ObjectMeta: metav1.ObjectMeta{Name: "foo2", Namespace: "bar-ns"}}, }, expected: []*batchv1.Job{}, }, { name: "test getting jobs whose labels do not match job template", cronJob: &batchv1.CronJob{ ObjectMeta: metav1.ObjectMeta{Namespace: "foo-ns", Name: "fooer"}, Spec: batchv1.CronJobSpec{JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"key": "value"}}, }}, }, jobs: []runtime.Object{ &batchv1.Job{ObjectMeta: metav1.ObjectMeta{ Namespace: "foo-ns", Name: "foo-fooer-owner-ref", Labels: map[string]string{"key": "different-value"}, OwnerReferences: []metav1.OwnerReference{{Name: "fooer", Controller: &trueRef}}}, }, &batchv1.Job{ObjectMeta: metav1.ObjectMeta{ Namespace: "foo-ns", Name: "foo-other-owner-ref", Labels: map[string]string{"key": "different-value"}, OwnerReferences: []metav1.OwnerReference{{Name: "another-cronjob", Controller: &trueRef}}}, }, }, expected: []*batchv1.Job{{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo-ns", Name: "foo-fooer-owner-ref", Labels: map[string]string{"key": "different-value"}, OwnerReferences: []metav1.OwnerReference{{Name: "fooer", Controller: &trueRef}}}, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { _, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() kubeClient := fake.NewSimpleClientset() sharedInformers := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) for _, job := range tt.jobs { sharedInformers.Batch().V1().Jobs().Informer().GetIndexer().Add(job) } jm, err := NewControllerV2(ctx, sharedInformers.Batch().V1().Jobs(), sharedInformers.Batch().V1().CronJobs(), kubeClient) if err != nil { t.Errorf("unexpected error %v", err) return } actual, err := jm.getJobsToBeReconciled(tt.cronJob) if err != nil { t.Errorf("unexpected error %v", err) return } if !reflect.DeepEqual(actual, tt.expected) { t.Errorf("\nExpected %#v,\nbut got %#v", tt.expected, actual) } }) } } func TestControllerV2CleanupFinishedJobs(t *testing.T) { tests := []struct { name string now time.Time cronJob *batchv1.CronJob finishedJobs []*batchv1.Job jobCreateError error expectedDeletedJobs []string }{ { name: "jobs are still deleted when a cronjob can't create jobs due to jobs quota being reached (avoiding a deadlock)", now: *justAfterTheHour(), cronJob: &batchv1.CronJob{ ObjectMeta: metav1.ObjectMeta{Namespace: "foo-ns", Name: "fooer"}, Spec: batchv1.CronJobSpec{ Schedule: onTheHour, SuccessfulJobsHistoryLimit: ptr.To[int32](1), JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"key": "value"}}, }, }, Status: batchv1.CronJobStatus{LastScheduleTime: &metav1.Time{Time: justAfterThePriorHour()}}, }, finishedJobs: []*batchv1.Job{ { ObjectMeta: metav1.ObjectMeta{ Namespace: "foo-ns", Name: "finished-job-started-hour-ago", OwnerReferences: []metav1.OwnerReference{{Name: "fooer", Controller: ptr.To(true)}}, }, Status: batchv1.JobStatus{StartTime: &metav1.Time{Time: justBeforeThePriorHour()}}, }, { ObjectMeta: metav1.ObjectMeta{ Namespace: "foo-ns", Name: "finished-job-started-minute-ago", OwnerReferences: []metav1.OwnerReference{{Name: "fooer", Controller: ptr.To(true)}}, }, Status: batchv1.JobStatus{StartTime: &metav1.Time{Time: justBeforeTheHour()}}, }, }, jobCreateError: errors.NewInternalError(fmt.Errorf("quota for # of jobs reached")), expectedDeletedJobs: []string{"finished-job-started-hour-ago"}, }, { name: "jobs are not deleted if history limit not reached", now: justBeforeTheHour(), cronJob: &batchv1.CronJob{ ObjectMeta: metav1.ObjectMeta{Namespace: "foo-ns", Name: "fooer"}, Spec: batchv1.CronJobSpec{ Schedule: onTheHour, SuccessfulJobsHistoryLimit: ptr.To[int32](2), JobTemplate: batchv1.JobTemplateSpec{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"key": "value"}}, }, }, Status: batchv1.CronJobStatus{LastScheduleTime: &metav1.Time{Time: justAfterThePriorHour()}}, }, finishedJobs: []*batchv1.Job{ { ObjectMeta: metav1.ObjectMeta{ Namespace: "foo-ns", Name: "finished-job-started-hour-ago", OwnerReferences: []metav1.OwnerReference{{Name: "fooer", Controller: ptr.To(true)}}, }, Status: batchv1.JobStatus{StartTime: &metav1.Time{Time: justBeforeThePriorHour()}}, }, }, jobCreateError: nil, expectedDeletedJobs: []string{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { _, ctx := ktesting.NewTestContext(t) for _, job := range tt.finishedJobs { job.Status.Conditions = []batchv1.JobCondition{{Type: batchv1.JobComplete, Status: v1.ConditionTrue}} } client := fake.NewSimpleClientset() informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) _ = informerFactory.Batch().V1().CronJobs().Informer().GetIndexer().Add(tt.cronJob) for _, job := range tt.finishedJobs { _ = informerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job) } jm, err := NewControllerV2(ctx, informerFactory.Batch().V1().Jobs(), informerFactory.Batch().V1().CronJobs(), client) if err != nil { t.Errorf("unexpected error %v", err) return } jobControl := &fakeJobControl{CreateErr: tt.jobCreateError} jm.jobControl = jobControl jm.now = func() time.Time { return tt.now } jm.enqueueController(tt.cronJob) jm.processNextWorkItem(ctx) if len(tt.expectedDeletedJobs) != len(jobControl.DeleteJobName) { t.Fatalf("expected '%v' jobs to be deleted, instead deleted '%s'", tt.expectedDeletedJobs, jobControl.DeleteJobName) } sort.Strings(jobControl.DeleteJobName) sort.Strings(tt.expectedDeletedJobs) for i, deletedJob := range jobControl.DeleteJobName { if deletedJob != tt.expectedDeletedJobs[i] { t.Fatalf("expected '%v' jobs to be deleted, instead deleted '%s'", tt.expectedDeletedJobs, jobControl.DeleteJobName) } } }) } } // TestControllerV2JobAlreadyExistsButNotInActiveStatus validates that an already created job that was not added to the status // of a CronJob initially will be added back on the next sync. Previously, if we failed to update the status after creating a job, // cronjob controller would retry continuously because it would attempt to create a job that already exists. func TestControllerV2JobAlreadyExistsButNotInActiveStatus(t *testing.T) { _, ctx := ktesting.NewTestContext(t) cj := cronJob() cj.Spec.ConcurrencyPolicy = "Forbid" cj.Spec.Schedule = everyHour cj.Status.LastScheduleTime = &metav1.Time{Time: justBeforeThePriorHour()} cj.Status.Active = []v1.ObjectReference{} cjCopy := cj.DeepCopy() job, err := getJobFromTemplate2(&cj, justAfterThePriorHour()) if err != nil { t.Fatalf("Unexpected error creating a job from template: %v", err) } job.UID = "1234" job.Namespace = cj.Namespace client := fake.NewSimpleClientset(cjCopy, job) informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) _ = informerFactory.Batch().V1().CronJobs().Informer().GetIndexer().Add(cjCopy) jm, err := NewControllerV2(ctx, informerFactory.Batch().V1().Jobs(), informerFactory.Batch().V1().CronJobs(), client) if err != nil { t.Fatalf("unexpected error %v", err) } jobControl := &fakeJobControl{Job: job, CreateErr: errors.NewAlreadyExists(schema.GroupResource{Resource: "job", Group: "batch"}, "")} jm.jobControl = jobControl cronJobControl := &fakeCJControl{} jm.cronJobControl = cronJobControl jm.now = justBeforeTheHour jm.enqueueController(cjCopy) jm.processNextWorkItem(ctx) if len(cronJobControl.Updates) != 1 { t.Fatalf("Unexpected updates to cronjob, got: %d, expected 1", len(cronJobControl.Updates)) } if len(cronJobControl.Updates[0].Status.Active) != 1 { t.Errorf("Unexpected active jobs count, got: %d, expected 1", len(cronJobControl.Updates[0].Status.Active)) } expectedActiveRef, err := getRef(job) if err != nil { t.Fatalf("Error getting expected job ref: %v", err) } if !reflect.DeepEqual(cronJobControl.Updates[0].Status.Active[0], *expectedActiveRef) { t.Errorf("Unexpected job reference in cronjob active list, got: %v, expected: %v", cronJobControl.Updates[0].Status.Active[0], expectedActiveRef) } } // TestControllerV2JobAlreadyExistsButDifferentOwnner validates that an already created job // not owned by the cronjob controller is ignored. func TestControllerV2JobAlreadyExistsButDifferentOwner(t *testing.T) { _, ctx := ktesting.NewTestContext(t) cj := cronJob() cj.Spec.ConcurrencyPolicy = "Forbid" cj.Spec.Schedule = everyHour cj.Status.LastScheduleTime = &metav1.Time{Time: justBeforeThePriorHour()} cj.Status.Active = []v1.ObjectReference{} cjCopy := cj.DeepCopy() job, err := getJobFromTemplate2(&cj, justAfterThePriorHour()) if err != nil { t.Fatalf("Unexpected error creating a job from template: %v", err) } job.UID = "1234" job.Namespace = cj.Namespace // remove owners for this test since we are testing that jobs not belonging to cronjob // controller are safely ignored job.OwnerReferences = []metav1.OwnerReference{} client := fake.NewSimpleClientset(cjCopy, job) informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) _ = informerFactory.Batch().V1().CronJobs().Informer().GetIndexer().Add(cjCopy) jm, err := NewControllerV2(ctx, informerFactory.Batch().V1().Jobs(), informerFactory.Batch().V1().CronJobs(), client) if err != nil { t.Fatalf("unexpected error %v", err) } jobControl := &fakeJobControl{Job: job, CreateErr: errors.NewAlreadyExists(schema.GroupResource{Resource: "job", Group: "batch"}, "")} jm.jobControl = jobControl cronJobControl := &fakeCJControl{} jm.cronJobControl = cronJobControl jm.now = justBeforeTheHour jm.enqueueController(cjCopy) jm.processNextWorkItem(ctx) if len(cronJobControl.Updates) != 0 { t.Fatalf("Unexpected updates to cronjob, got: %d, expected 0", len(cronJobControl.Updates)) } }
go
github
https://github.com/kubernetes/kubernetes
pkg/controller/cronjob/cronjob_controllerv2_test.go
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as log_config from alembic import context from magnum.db.sqlalchemy import api as sqla_api from magnum.db.sqlalchemy import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. log_config.fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel target_metadata = models.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = sqla_api.get_engine() with engine.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() run_migrations_online()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # # Copyright 2004-2006 Zuza Software Foundation # # This file is part of translate. # # translate is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # translate is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. """Convert Mozilla .dtd and .properties files to Gettext PO localization files. See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/moz2po.html for examples and usage instructions. """ from translate.convert import (convert, dtd2po, mozfunny2prop, mozlang2po, prop2po) def main(argv=None): formats = { (None, "*"): ("*", convert.copytemplate), ("*", "*"): ("*", convert.copyinput), "*": ("*", convert.copyinput), } # handle formats that convert to .po files converters = [ ("dtd", dtd2po.convertdtd), ("properties", prop2po.convertmozillaprop), ("it", mozfunny2prop.it2po), ("ini", mozfunny2prop.ini2po), ("inc", mozfunny2prop.inc2po), ("lang", mozlang2po.run_converter), ] for format, converter in converters: formats[(format, format)] = (format + ".po", converter) formats[format] = (format + ".po", converter) # handle search and replace replacer = convert.Replacer("en-US", "${locale}") for replaceformat in ("js", "rdf", "manifest"): formats[(None, replaceformat)] = (replaceformat, replacer.searchreplacetemplate) formats[(replaceformat, replaceformat)] = (replaceformat, replacer.searchreplaceinput) formats[replaceformat] = (replaceformat, replacer.searchreplaceinput) parser = convert.ConvertOptionParser(formats, usetemplates=True, usepots=True, description=__doc__) parser.add_duplicates_option() parser.passthrough.append("pot") parser.run(argv) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from __future__ import print_function import os import locale import textwrap import collections from subprocess import Popen, PIPE from kids.txt import indent ShellOutput = collections.namedtuple('ShellOutput', ["out", "err", "errlvl"]) try: basestring # attempt to evaluate basestring def isstr(s): return isinstance(s, basestring) except NameError: def isstr(s): return isinstance(s, str) class ShellError(Exception): def __init__(self, msg, command=None, env=None, outputs=None): self.command = command self.outputs = outputs self.env = env self.message = msg def __str__(self): out, err, errlvl = self.outputs formatted = [] if "\n" in self.command: formatted.append("command:\n%s" % indent(self.command, "| ")) else: formatted.append("command: %r" % self.command) formatted.append("errlvl: %d" % errlvl) if out: if out.endswith('\n'): out = out[:-1] formatted.append("stdout:\n%s" % indent(out, "| ")) if err: if err.endswith('\n'): err = err[:-1] formatted.append("stderr:\n%s" % indent(err, "| ")) formatted = '\n'.join(formatted) return "%s\n%s" % (self.message, indent(formatted, prefix=" ")) def cmd(command, env=None): """Execute a shell command and return (stdout, stdin, errlvl) tuple. This command is synchronous. """ p = Popen(command, shell=isstr(command), stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True, env=env, universal_newlines=False) stdout, stderr = p.communicate() return ShellOutput( stdout.decode(locale.getpreferredencoding()), stderr.decode(locale.getpreferredencoding()), p.returncode) def wrap(command, ignore_errlvls=[0], env=None, strip=True): """Execute a shell command and return stdout as a string Please note that it'll also cast an exception on unexpected errlvl:: >>> wrap('builtin lsdjflk') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ShellError: Wrapped command returned with unexpected errorlevel. command: 'builtin lsdjflk' errlvl: 127 stderr: | /bin/sh: 1: builtin: not found The command is passed as-is to the underlying default shell, so you can use builtins, pipes and all the machinery available in your shell:: >>> print(wrap('echo hello | cat'), end='') hello """ res = cmd(command, env=env) if res.errlvl not in ignore_errlvls: raise ShellError( msg="Wrapped command returned with unexpected errorlevel.", command=command, env=env, outputs=res) return res.out.strip() if strip else res.out def set_env(**se_kwargs): def decorator(f): def _wrapped(*args, **kwargs): kenv = kwargs.get("env", {}) env = dict(kenv or os.environ) for key, value in se_kwargs.items(): if key not in kenv: env[key] = value kwargs["env"] = env return f(*args, **kwargs) return _wrapped return decorator
unknown
codeparrot/codeparrot-clean
# coding: utf-8 import logging import flask from google.appengine.api import mail from google.appengine.ext import deferred import config import util ############################################################################### # Helpers ############################################################################### def send_mail_notification(subject, body, to=None, **kwargs): if not config.CONFIG_DB.feedback_email: return brand_name = config.CONFIG_DB.brand_name sender = '%s <%s>' % (brand_name, config.CONFIG_DB.feedback_email) subject = '[%s] %s' % (brand_name, subject) if config.DEVELOPMENT: logging.info( '\n' '######### Deferring to send this email: #############################' '\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n' '#####################################################################', sender, to or sender, subject, body ) deferred.defer(mail.send_mail, sender, to or sender, subject, body, **kwargs) ############################################################################### # Admin Notifications ############################################################################### def new_user_notification(user_db): if not config.CONFIG_DB.notify_on_new_user: return body = 'name: %s\nusername: %s\nemail: %s\n%s\n%s' % ( user_db.name, user_db.username, user_db.email, ''.join([': '.join(('%s\n' % a).split('_')) for a in user_db.auth_ids]), flask.url_for('user_update', user_id=user_db.key.id(), _external=True), ) send_mail_notification('New user: %s' % user_db.name, body) ############################################################################### # User Related ############################################################################### def verify_email_notification(user_db): if not (config.CONFIG_DB.verify_email and user_db.email) or user_db.verified: return user_db.token = util.uuid() user_db.put() to = '%s <%s>' % (user_db.name, user_db.email) body = '''Hello %(name)s, it seems someone (hopefully you) tried to verify your email with %(brand)s. In case it was you, please verify it by following this link: %(link)s If it wasn't you, we apologize. You can either ignore this email or reply to it so we can take a look. Best regards, %(brand)s ''' % { 'name': user_db.name, 'link': flask.url_for('user_verify', token=user_db.token, _external=True), 'brand': config.CONFIG_DB.brand_name, } flask.flash( 'A verification link has been sent to your email address.', category='success', ) send_mail_notification('Verify your email.', body, to) def reset_password_notification(user_db): if not user_db.email: return user_db.token = util.uuid() user_db.put() to = '%s <%s>' % (user_db.name, user_db.email) body = '''Hello %(name)s, it seems someone (hopefully you) tried to reset your password with %(brand)s. In case it was you, please reset it by following this link: %(link)s If it wasn't you, we apologize. You can either ignore this email or reply to it so we can take a look. Best regards, %(brand)s ''' % { 'name': user_db.name, 'link': flask.url_for('user_reset', token=user_db.token, _external=True), 'brand': config.CONFIG_DB.brand_name, } flask.flash( 'A reset link has been sent to your email address.', category='success', ) send_mail_notification('Reset your password', body, to) def activate_user_notification(user_db): if not user_db.email: return user_db.token = util.uuid() user_db.put() to = user_db.email body = '''Welcome to %(brand)s. Follow the link below to confirm your email address and activate your account: %(link)s If it wasn't you, we apologize. You can either ignore this email or reply to it so we can take a look. Best regards, %(brand)s ''' % { 'link': flask.url_for('user_activate', token=user_db.token, _external=True), 'brand': config.CONFIG_DB.brand_name, } flask.flash( 'An activation link has been sent to your email address.', category='success', ) send_mail_notification('Activate your account', body, to) ############################################################################### # Admin Related ############################################################################### def email_conflict_notification(email): body = 'There is a conflict with %s\n\n%s' % ( email, flask.url_for('user_list', email=email, _external=True), ) send_mail_notification('Conflict with: %s' % email, body)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env bash set -o nounset -o errexit -o xtrace ansible-playbook -i inventory "play.yml" -v "$@"
unknown
github
https://github.com/ansible/ansible
test/integration/targets/error_from_connection/runme.sh
# frozen_string_literal: true module Bundler module SafeMarshal ALLOWED_CLASSES = [ Array, FalseClass, Gem::Specification, Gem::Version, Hash, String, Symbol, Time, TrueClass, ].freeze ERROR = "Unexpected class %s present in marshaled data. Only %s are allowed." PROC = proc do |object| object.tap do unless ALLOWED_CLASSES.include?(object.class) raise TypeError, format(ERROR, object.class, ALLOWED_CLASSES.join(", ")) end end end def self.proc PROC end end end
ruby
github
https://github.com/ruby/ruby
lib/bundler/safe_marshal.rb
/* * Copyright 2010-2025 JetBrains s.r.o. and Kotlin Programming Language contributors. * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ package org.jetbrains.kotlin.analysis.api.fir.components import org.jetbrains.kotlin.analysis.api.components.KaCompletionCandidateChecker import org.jetbrains.kotlin.analysis.api.components.KaCompletionExtensionCandidateChecker import org.jetbrains.kotlin.analysis.api.components.KaExtensionApplicabilityResult import org.jetbrains.kotlin.analysis.api.components.KaExtensionApplicabilityResult.* import org.jetbrains.kotlin.analysis.api.fir.KaFirSession import org.jetbrains.kotlin.analysis.api.fir.symbols.KaFirSymbol import org.jetbrains.kotlin.analysis.api.fir.utils.createSubstitutorFromTypeArguments import org.jetbrains.kotlin.analysis.api.impl.base.components.KaBaseSessionComponent import org.jetbrains.kotlin.analysis.api.impl.base.components.withPsiValidityAssertion import org.jetbrains.kotlin.analysis.api.lifetime.KaLifetimeToken import org.jetbrains.kotlin.analysis.api.lifetime.withValidityAssertion import org.jetbrains.kotlin.analysis.api.symbols.KaCallableSymbol import org.jetbrains.kotlin.analysis.api.symbols.KaReceiverParameterSymbol import org.jetbrains.kotlin.analysis.low.level.api.fir.api.getOrBuildFirFile import org.jetbrains.kotlin.analysis.low.level.api.fir.api.getOrBuildFirOfType import org.jetbrains.kotlin.analysis.low.level.api.fir.resolver.ResolutionParameters import org.jetbrains.kotlin.analysis.low.level.api.fir.resolver.SingleCandidateResolutionMode import org.jetbrains.kotlin.analysis.low.level.api.fir.resolver.SingleCandidateResolver import org.jetbrains.kotlin.analysis.low.level.api.fir.sessions.llFirSession import org.jetbrains.kotlin.analysis.low.level.api.fir.util.ContextCollector import org.jetbrains.kotlin.fir.FirSession import org.jetbrains.kotlin.fir.declarations.FirFile import org.jetbrains.kotlin.fir.declarations.FirResolvePhase import org.jetbrains.kotlin.fir.expressions.FirCallableReferenceAccess import org.jetbrains.kotlin.fir.expressions.FirExpression import org.jetbrains.kotlin.fir.expressions.FirResolvedQualifier import org.jetbrains.kotlin.fir.expressions.FirSafeCallExpression import org.jetbrains.kotlin.fir.expressions.builder.buildExpressionStub import org.jetbrains.kotlin.fir.resolve.DoubleColonLHS import org.jetbrains.kotlin.fir.resolve.calls.ImplicitReceiverValue import org.jetbrains.kotlin.fir.resolve.calls.candidate.FirErrorReferenceWithCandidate import org.jetbrains.kotlin.fir.symbols.impl.FirCallableSymbol import org.jetbrains.kotlin.fir.symbols.impl.FirVariableSymbol import org.jetbrains.kotlin.fir.symbols.lazyResolveToPhase import org.jetbrains.kotlin.fir.types.receiverType import org.jetbrains.kotlin.psi.* import org.jetbrains.kotlin.psi.psiUtil.getQualifiedExpressionForReceiver import org.jetbrains.kotlin.utils.exceptions.errorWithAttachment import org.jetbrains.kotlin.utils.exceptions.withPsiEntry internal class KaFirCompletionCandidateChecker( override val analysisSessionProvider: () -> KaFirSession ) : KaBaseSessionComponent<KaFirSession>(), KaCompletionCandidateChecker, KaFirSessionComponent { override fun createExtensionCandidateChecker( originalFile: KtFile, nameExpression: KtSimpleNameExpression, explicitReceiver: KtExpression? ): KaCompletionExtensionCandidateChecker = withPsiValidityAssertion(originalFile, nameExpression, explicitReceiver) { KaLazyCompletionExtensionCandidateChecker(analysisSession.token) { KaFirCompletionExtensionCandidateChecker(analysisSession, nameExpression, explicitReceiver, originalFile) } } } private class KaFirCompletionExtensionCandidateChecker( private val analysisSession: KaFirSession, private val nameExpression: KtSimpleNameExpression, explicitReceiver: KtExpression?, originalFile: KtFile, ) : KaCompletionExtensionCandidateChecker { private val resolutionFacade = analysisSession.resolutionFacade private val implicitReceivers: List<ImplicitReceiverValue<*>> private val firCallSiteSession: FirSession private val firOriginalFile: FirFile private val explicitReceiverInfo: ExplicitReceiverInfo? private val candidateResolver: SingleCandidateResolver private val containingCallableReference: KtCallableReferenceExpression? init { val fakeFile = nameExpression.containingKtFile val firFakeFile = fakeFile.getOrBuildFirFile(resolutionFacade) containingCallableReference = explicitReceiver?.parent as? KtCallableReferenceExpression implicitReceivers = computeImplicitReceivers(firFakeFile) firCallSiteSession = firFakeFile.llFirSession firOriginalFile = originalFile.getOrBuildFirFile(resolutionFacade) explicitReceiverInfo = explicitReceiver?.let(::getExplicitReceiverInfo) candidateResolver = SingleCandidateResolver(firCallSiteSession, firOriginalFile) } override val token: KaLifetimeToken get() = analysisSession.token override fun computeApplicability(candidate: KaCallableSymbol): KaExtensionApplicabilityResult = withValidityAssertion { if (candidate is KaReceiverParameterSymbol) { return NonApplicable(token) } require(candidate is KaFirSymbol<*>) val firSymbol = candidate.firSymbol as FirCallableSymbol<*> firSymbol.lazyResolveToPhase(FirResolvePhase.STATUS) val resolutionMode = if (containingCallableReference != null) { SingleCandidateResolutionMode.CHECK_EXTENSION_CALLABlE_REFERENCE_FOR_COMPLETION } else { SingleCandidateResolutionMode.CHECK_EXTENSION_FOR_COMPLETION } fun processReceiver(implicitReceiverValue: ImplicitReceiverValue<*>?): KaExtensionApplicabilityResult? { val resolutionParameters = ResolutionParameters( singleCandidateResolutionMode = resolutionMode, callableSymbol = firSymbol, implicitReceiver = implicitReceiverValue, explicitReceiver = explicitReceiverInfo?.receiverExpression, allowUnsafeCall = true, allowUnstableSmartCast = true, callableReferenceLHS = explicitReceiverInfo?.callableReferenceLHS ) val firResolvedCall = candidateResolver.resolveSingleCandidate(resolutionParameters) ?: return null val substitutor = firResolvedCall.createSubstitutorFromTypeArguments(analysisSession) ?: return null val receiverCastRequired = firResolvedCall.calleeReference is FirErrorReferenceWithCandidate if (firSymbol is FirVariableSymbol<*> && firSymbol.resolvedReturnType.receiverType(firCallSiteSession) != null) { return ApplicableAsFunctionalVariableCall(substitutor, receiverCastRequired, token) } return ApplicableAsExtensionCallable(substitutor, receiverCastRequired, token) } return implicitReceivers.firstNotNullOfOrNull(::processReceiver) ?: processReceiver(null) ?: NonApplicable(token) } private fun computeImplicitReceivers(firFakeFile: FirFile): List<ImplicitReceiverValue<*>> { val elementContext = ContextCollector.process( resolutionFacade = resolutionFacade, file = firFakeFile, targetElement = nameExpression, preferBodyContext = false ) val towerDataContext = elementContext?.towerDataContext ?: errorWithAttachment("Cannot find enclosing declaration for ${nameExpression::class}") { withPsiEntry("fakeNameExpression", nameExpression) } return buildList { addAll(towerDataContext.implicitValueStorage.implicitReceivers) } } /** * Returns a [ExplicitReceiverInfo] matching the given PSI [receiverExpression]. * * @param receiverExpression a qualified expression receiver (e.g., `foo` in `foo?.bar()`, or in `foo.bar`). * * The function unwraps certain receiver expressions. For instance, for safe calls direct counterpart to a [KtSafeQualifiedExpression] * is (FirCheckedSafeCallSubject)[org.jetbrains.kotlin.fir.expressions.FirCheckedSafeCallSubject] which requires additional unwrapping * to be used for call resolution. */ private fun getExplicitReceiverInfo(receiverExpression: KtExpression): ExplicitReceiverInfo? { if (receiverExpression is KtStatementExpression) { // FIR for 'KtStatementExpression' is not a 'FirExpression' return null } val parentCall = receiverExpression.getQualifiedExpressionForReceiver() if (parentCall is KtSafeQualifiedExpression) { val firSafeCall = parentCall.getOrBuildFirOfType<FirSafeCallExpression>(resolutionFacade) return ExplicitReceiverInfo(firSafeCall.checkedSubjectRef.value) } val receiverExpressionFir = receiverExpression.getOrBuildFirOfType<FirExpression>(resolutionFacade) val callableReferenceLHS = if (containingCallableReference != null) { val callableReferenceFir = containingCallableReference.getOrBuildFirOfType<FirCallableReferenceAccess>(resolutionFacade) val resolver = SingleCandidateResolver(firCallSiteSession, firOriginalFile) val components = resolver.bodyResolveComponents val context = components.context context.withFile(firOriginalFile, components) { components.doubleColonExpressionResolver.resolveDoubleColonLHS(callableReferenceFir) } } else { null } val refinedReceiverExpression = if (containingCallableReference != null && receiverExpressionFir is FirResolvedQualifier && callableReferenceLHS is DoubleColonLHS.Type ) { /** * If it's a callable reference completion and the LHS is a regular name reference, * we need to create a stub expression with the type of the referenced class. * Otherwise, the type of the receiver would be `Unit`. * The same mechanism is used when creating callable reference info in the compiler. * * ```kotlin * class A * * fun A.foo() {} * * val x = A::foo * ``` */ buildExpressionStub { source = receiverExpressionFir.source coneTypeOrNull = callableReferenceLHS.type } } else { receiverExpressionFir } return ExplicitReceiverInfo(refinedReceiverExpression, callableReferenceLHS) } private data class ExplicitReceiverInfo( val receiverExpression: FirExpression?, val callableReferenceLHS: DoubleColonLHS? = null ) } private class KaLazyCompletionExtensionCandidateChecker( override val token: KaLifetimeToken, delegateFactory: () -> KaCompletionExtensionCandidateChecker, ) : KaCompletionExtensionCandidateChecker { private val delegate: KaCompletionExtensionCandidateChecker by lazy(delegateFactory) override fun computeApplicability(candidate: KaCallableSymbol): KaExtensionApplicabilityResult = withValidityAssertion { delegate.computeApplicability(candidate) } }
kotlin
github
https://github.com/JetBrains/kotlin
analysis/analysis-api-fir/src/org/jetbrains/kotlin/analysis/api/fir/components/KaFirCompletionCandidateChecker.kt
{ "kind": "Dashboard", "apiVersion": "dashboard.grafana.app/v1beta1", "metadata": { "name": "v0alpha1.timeseries-formats.v42" }, "spec": { "annotations": { "list": [ { "builtIn": 1, "datasource": { "type": "grafana", "uid": "-- Grafana --" }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations \u0026 Alerts", "type": "dashboard" } ] }, "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, "links": [], "liveNow": false, "panels": [ { "datasource": { "type": "grafana", "uid": "grafana" }, "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, "custom": { "align": "auto", "cellOptions": { "type": "auto" }, "inspect": false }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [ { "matcher": { "id": "byName", "options": "dim1" }, "properties": [ { "id": "custom.width", "value": 80 } ] } ] }, "gridPos": { "h": 8, "w": 8, "x": 0, "y": 0 }, "id": 8, "options": { "cellHeight": "sm", "footer": { "countRows": false, "fields": "", "reducer": [ "sum" ], "show": false }, "frameIndex": 0, "showHeader": true, "showRowNums": false, "sortBy": [] }, "pluginVersion": "10.0.0-pre", "targets": [ { "datasource": { "type": "grafana", "uid": "grafana" }, "queryType": "snapshot", "refId": "A", "snapshot": [ { "data": { "values": [ [ 1677256641358, 1677257007358, 1677257373358, 1677257739358, 1677258105358, 1677258471358, 1677258837358, 1677259203358, 1677259569358, 1677259935358, 1677260301358, 1677260667358, 1677261033358, 1677261399358, 1677261765358, 1677262131358, 1677262497358, 1677262863358, 1677263229358, 1677263595358, 1677263961358, 1677264327358, 1677264693358, 1677265059358, 1677265425358, 1677265791358, 1677266157358, 1677266523358, 1677266889358, 1677267255358, 1677267621358, 1677267987358, 1677268353358, 1677268719358, 1677269085358, 1677269451358, 1677269817358, 1677270183358, 1677270549358, 1677270915358, 1677271281358, 1677271647358, 1677272013358, 1677272379358, 1677272745358, 1677273111358, 1677273477358, 1677273843358, 1677274209358, 1677274575358, 1677274941358, 1677275307358, 1677275673358, 1677276039358, 1677276405358, 1677276771358, 1677277137358, 1677277503358, 1677277869358, 1677278235358 ], [ 1, 3, 5, 7, 4, 6, 8, 10, 1, 3, 5, 7, 4, 6, 8, 10, 1, 3, 5, 7, 4, 6, 8, 10, 1, 3, 5, 7, 4, 6, 8, 10, 1, 3, 5, 7, 4, 6, 8, 10, 1, 3, 5, 7, 4, 6, 8, 10, 1, 3, 5, 7, 4, 6, 8, 10, 1, 3, 5, 7 ], [ "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d", "a", "b", "c", "d" ], [ "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y", "x", "y" ] ] }, "schema": { "fields": [ { "config": {}, "labels": {}, "name": "timestamp", "type": "time", "typeInfo": { "frame": "time.Time" } }, { "config": {}, "labels": {}, "name": "numericData", "type": "number", "typeInfo": { "frame": "float64", "nullable": true } }, { "config": {}, "labels": {}, "name": "dim1", "type": "string", "typeInfo": { "frame": "string" } }, { "config": {}, "labels": {}, "name": "dim2", "type": "string", "typeInfo": { "frame": "string" } } ], "meta": { "type": "timeseries-long", "typeVersion": [ 0, 0 ] }, "name": "New Frame", "refId": "A" } }, { "data": { "values": [ [ 1677256641358, 1677257007358, 1677257373358, 1677257739358, 1677258105358, 1677258471358, 1677258837358, 1677259203358, 1677259569358, 1677259935358, 1677260301358, 1677260667358, 1677261033358, 1677261399358, 1677261765358, 1677262131358, 1677262497358, 1677262863358, 1677263229358, 1677263595358, 1677263961358, 1677264327358, 1677264693358, 1677265059358, 1677265425358, 1677265791358, 1677266157358, 1677266523358, 1677266889358, 1677267255358, 1677267621358, 1677267987358, 1677268353358, 1677268719358, 1677269085358, 1677269451358, 1677269817358, 1677270183358, 1677270549358, 1677270915358, 1677271281358, 1677271647358, 1677272013358, 1677272379358, 1677272745358, 1677273111358, 1677273477358, 1677273843358, 1677274209358, 1677274575358, 1677274941358, 1677275307358, 1677275673358, 1677276039358, 1677276405358, 1677276771358, 1677277137358, 1677277503358, 1677277869358, 1677278235358 ], [ 1, 2, 3, 2, 3, 5, 6, 3, 1, 2, 3, 2, 3, 5, 6, 3, 1, 2, 3, 2, 3, 5, 6, 3, 1, 2, 3, 2, 3, 5, 6, 3, 1, 2, 3, 2, 3, 5, 6, 3, 1, 2, 3, 2, 3, 5, 6, 3, 1, 2, 3, 2, 3, 5, 6, 3, 1, 2, 3, 2 ], [ "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h", "e", "f", "g", "h" ], [ "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r", "q", "r" ] ] }, "schema": { "fields": [ { "config": {}, "labels": {}, "name": "timestamp", "type": "time", "typeInfo": { "frame": "time.Time" } }, { "config": {}, "labels": {}, "name": "value", "type": "number", "typeInfo": { "frame": "float64", "nullable": true } }, { "config": {}, "labels": {}, "name": "dim3", "type": "string", "typeInfo": { "frame": "string" } }, { "config": {}, "labels": {}, "name": "dim4", "type": "string", "typeInfo": { "frame": "string" } } ], "meta": { "type": "timeseries-long", "typeVersion": [ 0, 0 ] }, "name": "New Frame", "refId": "B" } } ] } ], "title": "timeseries-long", "type": "table" }, { "datasource": { "type": "datasource", "uid": "-- Dashboard --" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 11, "x": 8, "y": 0 }, "id": 10, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "datasource", "uid": "-- Dashboard --" }, "panelId": 8, "refId": "A" } ], "title": "Timeseries panel requires a transform to render timeseries-long", "type": "timeseries" }, { "datasource": { "type": "grafana-testdata-datasource" }, "gridPos": { "h": 8, "w": 5, "x": 19, "y": 0 }, "id": 4, "options": { "code": { "language": "plaintext", "showLineNumbers": false, "showMiniMap": false }, "content": "The timeseries panel can not show timeseries-long directly, it must first be converted to `timeseries-wide` or `timeseries-multi` first.\n\nThe UI should show a button indicating this.", "mode": "markdown" }, "pluginVersion": "10.0.0-pre", "title": "Timeseries-long info", "type": "text" }, { "datasource": { "type": "grafana-testdata-datasource" }, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green" }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 8, "w": 8, "x": 0, "y": 8 }, "id": 11, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "single", "sort": "none" } }, "targets": [ { "datasource": { "type": "grafana-testdata-datasource" }, "refId": "A", "scenarioId": "random_walk", "seriesCount": 0 } ], "title": "Expected no-data message (empty series", "type": "timeseries" } ], "refresh": "", "revision": 1, "schemaVersion": 42, "tags": [], "templating": { "list": [] }, "time": { "from": "2023-02-24T16:37:21.358Z", "to": "2023-02-24T22:37:15.358Z" }, "timepicker": {}, "timezone": "", "title": "Panel Tests - Timeseries - Supported input formats", "uid": "f4ca24309dd4", "weekStart": "" }, "status": { "conversion": { "failed": false, "storedVersion": "v0alpha1" } } }
json
github
https://github.com/grafana/grafana
apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/panel-timeseries/v0alpha1.timeseries-formats.v42.v1beta1.json
from django.core.management.base import BaseCommand, CommandError import socket import imaplib import sys import email from django.conf import settings from main.models import Email, EmailOversizeNotified, Leg import pprint from django.core.mail import send_mail # This file is part of https://github.com/cpina/science-cruise-data-management # # This project was programmed in a hurry without any prior Django experience, # while circumnavigating the Antarctic on the ACE expedition, without proper # Internet access, with 150 scientists using the system and doing at the same # cruise other data management and system administration tasks. # # Sadly there aren't unit tests and we didn't have time to refactor the code # during the cruise, which is really needed. # # Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017. class Command(BaseCommand): help = 'Sends an email to users with messages bigger than the settings MAXIMUM_EMAIL_SIZE' def add_arguments(self, parser): parser.add_argument('action', help="[notify|dry-run]", type=str) parser.add_argument('user', help="Specify a username to check if for a specific user. Leave it blank or 'all' for all users", type=str) def handle(self, *args, **options): if options['action'] == "dry-run": dry_run = True elif options['action'] == "notify": dry_run = False else: print("Unknown action, should be dry-run or notify") exit(1) if 'user' in options: user = options['user'] else: user = 'all' notify = Notify(dry_run) if user == "all": notify.check_all_users() else: notify.check_user(options['user']) class Notify: def __init__(self, dry_run): self._dry_run = dry_run self._imap = None def _get_message_headers(self, message_uuid, size): resp, data = self._imap.uid('FETCH', message_uuid, '(RFC822.HEADER)') msg = email.message_from_bytes(data[0][1]) information = {} information['From'] = str(email.header.make_header(email.header.decode_header(msg['From']))) information['Date'] = msg['Date'] if 'Subject' not in msg: information['Subject'] = "" else: information['Subject'] = str(email.header.make_header(email.header.decode_header(msg['Subject']))) information['Size'] = "{} KB".format(int(size / 1024)) information['_size_in_bytes'] = size information['_imap_uuid'] = message_uuid information['Uuid'] = message_uuid return information def _notified_for_email(self, headers, email_address): email_object = Email.objects.get(email_address=email_address) # encodes as ascii to avoid problems with an old Mysql subject = headers['Subject'].encode('ascii', 'ignore') from_email = headers['From'].encode('ascii', 'ignore') email_oversized_notified = EmailOversizeNotified.objects.filter(date_string=headers['Date'], size=headers['_size_in_bytes'], #subject=subject, to_email=email_object, #from_email=from_email, imap_uuid=headers['_imap_uuid']) return len(email_oversized_notified) > 0 def _save_headers_as_notified(self, headers_list, email_address): for headers in headers_list: already_notified = self._notified_for_email(headers, email_address) if already_notified: print("Already notified message. Not saving headers in the database") continue email_oversize = EmailOversizeNotified() email = Email.objects.get(email_address=email_address) # encodes as ascii to avoid problems with an old Mysql subject = headers['Subject'].encode('ascii', 'ignore') from_email = headers['From'].encode('ascii', 'ignore') email_oversize.to_email = email email_oversize.date_string = headers['Date'] email_oversize.size = headers['_size_in_bytes'] email_oversize.from_email = from_email email_oversize.subject = subject email_oversize.imap_uuid = headers['_imap_uuid'] email_oversize.save() def _notify_user(self, headers_list, email_to_notify): pprint.pprint(headers_list) information = "" for headers in headers_list: headers['_To_Email'] = email_to_notify headers['_To_User'] = email_to_notify.split("@")[0] already_notified = self._notified_for_email(headers, email_to_notify) if already_notified: print("Already notified. Skipping sending notification") continue information += """From: {From} To: {_To_Email} Date: {Date} Size: {Size} Subject: {Subject} UUID: {_To_User} {Uuid} """.format(**headers) if len(information) == 0: # There are no messages to be notified return message_body = """Hello, There are some oversized emails in your mailbox (see the bottom of this email for details). You will not receive the oversized email and the attachment will not be downloaded. We recommend you contact the sender and ask for a smaller version (<300 KB). If it's really crucial to download the attachment, forward the details below for the relevant email, to data@ace-expedition.net and we will try to download it. When it has downloaded, you will receive an email telling you where you can find it. {} Data team """.format(information) if self._dry_run == False: send_mail( 'Oversized email', message_body, 'Data team <data@ace-expedition.net>', [email_to_notify], fail_silently=False, ) def _process_mailbox(self, imap, email_to_notify): rv, sizes = imap.uid('FETCH', '1:*', '(RFC822.SIZE)') if rv != 'OK': print("No messages can't be retrieved!") return if sizes == [None]: print("No messages in the mailbox") return headers_for_oversized_messages = [] for message_information_size in sizes: message_information_size = message_information_size.decode() message_information_size = message_information_size.replace("(", "") message_information_size = message_information_size.replace(")", "") (index_number, parenthesis_uid, message_uuid, rfc822_size, size) = message_information_size.split() message_uuid = message_uuid size = int(size) if size > settings.MAXIMUM_EMAIL_SIZE: # if size > 3000: headers = self._get_message_headers(message_uuid, size) headers_for_oversized_messages.append(headers) self._notify_user(headers_for_oversized_messages, email_to_notify) self._save_headers_as_notified(headers_for_oversized_messages, email_to_notify) def _get_imap_password(self, email_address): password = Email.objects.get(email_address=email_address).server_password return password def check_user(self, email_address): username = email_address.split("@")[0] password = self._get_imap_password(email_address) socket.setdefaulttimeout(30) self._imap = imaplib.IMAP4(settings.IMAP_SERVER) try: print("Login") rv, data = self._imap.login(username, password) except imaplib.IMAP4.error: print("Login failed for:", username) return print("Select INBOX") # the readonly=True is to avoid that the FETCH command # to check the sizes marks the messages as read rv, data = self._imap.select("INBOX", readonly=True) if rv == 'OK': print("Processing mailbox...") self._process_mailbox(self._imap, email_address) self._imap.close() else: print("ERROR: Unable to open mailbox", rv) self._imap.logout() def check_all_users(self): active_leg = Leg.current_active_leg() emails_active_leg = Email.objects.filter(person__leg=active_leg).order_by("email_address") for (index, email_account) in enumerate(emails_active_leg): while True: try: print("Checking: {} {}/{}".format(email_account.email_address, index+1, len(emails_active_leg))) self.check_user(email_account.email_address) break except ConnectionResetError: print("Connection Reset Error for user: {}. Trying again".format(email_account)) except socket.timeout: print("Connection timeout Error for user: {}. Trying again".format(email_account)) except OSError: print("Probably 'Network is unreachable' error for user {}. Trying again".format(email_account))
unknown
codeparrot/codeparrot-clean
<?php namespace Illuminate\Tests\Routing; use Illuminate\Http\RedirectResponse; use Illuminate\Http\Request; use Illuminate\Routing\Redirector; use Illuminate\Routing\UrlGenerator; use Illuminate\Session\Store; use Mockery as m; use PHPUnit\Framework\TestCase; use Symfony\Component\HttpFoundation\HeaderBag; class RoutingRedirectorTest extends TestCase { protected $headers; protected $request; protected $url; protected $session; protected $redirect; protected function setUp(): void { $this->headers = m::mock(HeaderBag::class); $this->request = m::mock(Request::class); $this->request->shouldReceive('isMethod')->andReturn(true)->byDefault(); $this->request->shouldReceive('method')->andReturn('GET')->byDefault(); $this->request->shouldReceive('route')->andReturn(true)->byDefault(); $this->request->shouldReceive('ajax')->andReturn(false)->byDefault(); $this->request->shouldReceive('expectsJson')->andReturn(false)->byDefault(); $this->request->headers = $this->headers; $this->url = m::mock(UrlGenerator::class); $this->url->shouldReceive('getRequest')->andReturn($this->request); $this->url->shouldReceive('to')->with('bar', [], null)->andReturn('http://foo.com/bar'); $this->url->shouldReceive('to')->with('bar', [], true)->andReturn('https://foo.com/bar'); $this->url->shouldReceive('to')->with('login', [], null)->andReturn('http://foo.com/login'); $this->url->shouldReceive('to')->with('http://foo.com/bar', [], null)->andReturn('http://foo.com/bar'); $this->url->shouldReceive('to')->with('/', [], null)->andReturn('http://foo.com/'); $this->url->shouldReceive('to')->with('http://foo.com/bar?signature=secret', [], null)->andReturn('http://foo.com/bar?signature=secret'); $this->session = m::mock(Store::class); $this->redirect = new Redirector($this->url); $this->redirect->setSession($this->session); } public function testBasicRedirectTo() { $response = $this->redirect->to('bar'); $this->assertInstanceOf(RedirectResponse::class, $response); $this->assertSame('http://foo.com/bar', $response->getTargetUrl()); $this->assertEquals(302, $response->getStatusCode()); $this->assertEquals($this->session, $response->getSession()); } public function testComplexRedirectTo() { $response = $this->redirect->to('bar', 303, ['X-RateLimit-Limit' => 60, 'X-RateLimit-Remaining' => 59], true); $this->assertSame('https://foo.com/bar', $response->getTargetUrl()); $this->assertEquals(303, $response->getStatusCode()); $this->assertEquals(60, $response->headers->get('X-RateLimit-Limit')); $this->assertEquals(59, $response->headers->get('X-RateLimit-Remaining')); } public function testGuestPutCurrentUrlInSession() { $this->url->shouldReceive('full')->andReturn('http://foo.com/bar'); $this->session->shouldReceive('put')->once()->with('url.intended', 'http://foo.com/bar'); $response = $this->redirect->guest('login'); $this->assertSame('http://foo.com/login', $response->getTargetUrl()); } public function testGuestPutPreviousUrlInSession() { $this->request->shouldReceive('isMethod')->once()->with('GET')->andReturn(false); $this->session->shouldReceive('put')->once()->with('url.intended', 'http://foo.com/bar'); $this->url->shouldReceive('previous')->once()->andReturn('http://foo.com/bar'); $response = $this->redirect->guest('login'); $this->assertSame('http://foo.com/login', $response->getTargetUrl()); } public function testIntendedRedirectToIntendedUrlInSession() { $this->session->shouldReceive('pull')->with('url.intended', '/')->andReturn('http://foo.com/bar'); $response = $this->redirect->intended(); $this->assertSame('http://foo.com/bar', $response->getTargetUrl()); } public function testIntendedWithoutIntendedUrlInSession() { $this->session->shouldReceive('forget')->with('url.intended'); // without fallback url $this->session->shouldReceive('pull')->with('url.intended', '/')->andReturn('/'); $response = $this->redirect->intended(); $this->assertSame('http://foo.com/', $response->getTargetUrl()); // with a fallback url $this->session->shouldReceive('pull')->with('url.intended', 'bar')->andReturn('bar'); $response = $this->redirect->intended('bar'); $this->assertSame('http://foo.com/bar', $response->getTargetUrl()); } public function testRefreshRedirectToCurrentUrl() { $this->request->shouldReceive('path')->andReturn('http://foo.com/bar'); $response = $this->redirect->refresh(); $this->assertSame('http://foo.com/bar', $response->getTargetUrl()); } public function testBackRedirectToHttpReferer() { $this->headers->shouldReceive('has')->with('referer')->andReturn(true); $this->url->shouldReceive('previous')->andReturn('http://foo.com/bar'); $response = $this->redirect->back(); $this->assertSame('http://foo.com/bar', $response->getTargetUrl()); } public function testAwayDoesntValidateTheUrl() { $response = $this->redirect->away('bar'); $this->assertSame('bar', $response->getTargetUrl()); } public function testSecureRedirectToHttpsUrl() { $response = $this->redirect->secure('bar'); $this->assertSame('https://foo.com/bar', $response->getTargetUrl()); } public function testAction() { $this->url->shouldReceive('action')->with('bar@index', [])->andReturn('http://foo.com/bar'); $response = $this->redirect->action('bar@index'); $this->assertSame('http://foo.com/bar', $response->getTargetUrl()); } public function testRoute() { $this->url->shouldReceive('route')->with('home')->andReturn('http://foo.com/bar'); $this->url->shouldReceive('route')->with('home', [])->andReturn('http://foo.com/bar'); $response = $this->redirect->route('home'); $this->assertSame('http://foo.com/bar', $response->getTargetUrl()); } public function testSignedRoute() { $this->url->shouldReceive('signedRoute')->with('home', [], null)->andReturn('http://foo.com/bar?signature=secret'); $response = $this->redirect->signedRoute('home'); $this->assertSame('http://foo.com/bar?signature=secret', $response->getTargetUrl()); } public function testTemporarySignedRoute() { $this->url->shouldReceive('temporarySignedRoute')->with('home', 10, [])->andReturn('http://foo.com/bar?signature=secret'); $response = $this->redirect->temporarySignedRoute('home', 10); $this->assertSame('http://foo.com/bar?signature=secret', $response->getTargetUrl()); } public function testItSetsAndGetsValidIntendedUrl() { $this->session->shouldReceive('put')->once()->with('url.intended', 'http://foo.com/bar'); $this->session->shouldReceive('get')->andReturn('http://foo.com/bar'); $result = $this->redirect->setIntendedUrl('http://foo.com/bar'); $this->assertInstanceOf(Redirector::class, $result); $this->assertSame('http://foo.com/bar', $this->redirect->getIntendedUrl()); } }
php
github
https://github.com/laravel/framework
tests/Routing/RoutingRedirectorTest.php
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) %YAML 1.2 --- $id: http://devicetree.org/schemas/media/nxp,imx8-isi.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: i.MX8 Image Sensing Interface maintainers: - Laurent Pinchart <laurent.pinchart@ideasonboard.com> description: | The Image Sensing Interface (ISI) combines image processing pipelines with DMA engines to process and capture frames originating from a variety of sources. The inputs to the ISI go through Pixel Link interfaces, and their number and nature is SoC-dependent. They cover both capture interfaces (MIPI CSI-2 RX, HDMI RX, ...) and display engine outputs for writeback support. properties: compatible: enum: - fsl,imx8mn-isi - fsl,imx8mp-isi - fsl,imx8ulp-isi - fsl,imx91-isi - fsl,imx93-isi reg: maxItems: 1 clocks: items: - description: The AXI clock - description: The APB clock # TODO: Check if the per-channel ipg_proc_clk clocks need to be specified # as well, in case some SoCs have the ability to control them separately. # This may be the case of the i.MX8[DQ]X(P) clock-names: items: - const: axi - const: apb fsl,blk-ctrl: $ref: /schemas/types.yaml#/definitions/phandle description: A phandle referencing the block control that contains the CSIS to ISI gasket. interrupts: description: Processing pipeline interrupts, one per pipeline minItems: 1 maxItems: 2 power-domains: maxItems: 1 ports: $ref: /schemas/graph.yaml#/properties/ports description: | Ports represent the Pixel Link inputs to the ISI. Their number and assignment are model-dependent. Each port shall have a single endpoint. required: - compatible - reg - interrupts - clocks - clock-names - ports allOf: - if: properties: compatible: contains: enum: - fsl,imx8mn-isi - fsl,imx8ulp-isi - fsl,imx91-isi - fsl,imx93-isi then: properties: interrupts: maxItems: 1 ports: properties: port@0: description: MIPI CSI-2 RX port@1: false required: - port@0 - if: properties: compatible: contains: const: fsl,imx8mp-isi then: properties: interrupts: maxItems: 2 ports: properties: port@0: description: MIPI CSI-2 RX 0 port@1: description: MIPI CSI-2 RX 1 required: - port@0 - port@1 - if: properties: compatible: not: contains: const: fsl,imx91-isi then: required: - fsl,blk-ctrl additionalProperties: false examples: - | #include <dt-bindings/clock/imx8mn-clock.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/power/imx8mn-power.h> isi@32e20000 { compatible = "fsl,imx8mn-isi"; reg = <0x32e20000 0x100>; interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_DISP_AXI_ROOT>, <&clk IMX8MN_CLK_DISP_APB_ROOT>; clock-names = "axi", "apb"; fsl,blk-ctrl = <&disp_blk_ctrl>; power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_ISI>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; isi_in: endpoint { remote-endpoint = <&mipi_csi_out>; }; }; }; }; - | #include <dt-bindings/clock/imx8mp-clock.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/interrupt-controller/irq.h> isi@32e00000 { compatible = "fsl,imx8mp-isi"; reg = <0x32e00000 0x4000>; interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>, <&clk IMX8MP_CLK_MEDIA_APB_ROOT>; clock-names = "axi", "apb"; fsl,blk-ctrl = <&media_blk_ctrl>; power-domains = <&mediamix_pd>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; isi_in_0: endpoint { remote-endpoint = <&mipi_csi_0_out>; }; }; port@1 { reg = <1>; isi_in_1: endpoint { remote-endpoint = <&mipi_csi_1_out>; }; }; }; }; ...
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides a seam for user-related services.""" __author__ = 'Sean Lip' import feconf import logging import utils from google.appengine.api import users from google.appengine.ext import ndb def create_login_url(slug): """Creates a login url.""" return users.create_login_url(utils.set_url_query_parameter( feconf.SIGNUP_URL, 'return_url', slug)) def create_logout_url(slug): """Creates a logout url.""" logout_url = utils.set_url_query_parameter('/logout', 'return_url', slug) return logout_url def get_current_user(request): """Returns the current user.""" return users.get_current_user() def is_super_admin(user_id, request): """Checks whether the user with the given user_id owns this app. For GAE, the user in question is also required to be the current user. """ user = users.get_current_user() if user is None: return False return user.user_id() == user_id and users.is_current_user_admin() def get_user_id_from_email(email): """Given an email address, returns a user id. Returns None if the email address does not correspond to a valid user id. """ class _FakeUser(ndb.Model): _use_memcache = False _use_cache = False user = ndb.UserProperty(required=True) try: u = users.User(email) except users.UserNotFoundError: logging.error( 'The email address %s does not correspond to a valid user_id' % email) return None key = _FakeUser(id=email, user=u).put() obj = _FakeUser.get_by_id(key.id()) user_id = obj.user.user_id() if user_id: return unicode(user_id) else: return None def get_user_id(user): """ Given an user object, get the user id. """ return user.user_id() def get_user_email(user): """ Given an user object, get the user's email. """ return user.email()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Author: Abhishek Malik <abhishek.malik@intel.com> # Copyright (c) 2017 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import print_function import time, sys, signal, atexit from upm import pyupm_rsc as rsc # Since CI loads each python example, the following would fail if the types # from rsc_regs.h are NOT exposed in the pyupm_rsc module mode = rsc.NORMAL_MODE dr = rsc.N_DR_20_SPS def main(): # Instantiate a Honeywell RSC Pressure sensor on the SPI bus 0 rsc_sensor = rsc.RSC(0, 9, 8); ## Exit handlers ## # This function stops python from printing a stacktrace when you hit control-C def SIGINTHandler(signum, frame): raise SystemExit # This function lets you run code on exit, including functions from abpdrrt005pg2a5 def exitHandler(): print("Exiting") sys.exit(0) # Register exit handlers atexit.register(exitHandler) signal.signal(signal.SIGINT, SIGINTHandler) # Read the value every second and detect the pressure print("Sensor Name: {0}".format(rsc_sensor.getSensorName())) print("Sensor Serial Number: {0}".format(rsc_sensor.getSensorSerialNumber())) while(1): print("Pressure {0}: {1}".format(rsc_sensor.getPressureUnit(), rsc_sensor.getPressure())) print("Temperature C: {0}".format(rsc_sensor.getTemperature())) time.sleep(1) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
//// [tests/cases/compiler/addMoreCallSignaturesToBaseSignature.ts] //// //// [addMoreCallSignaturesToBaseSignature.ts] interface Foo { (): string; } interface Bar extends Foo { (key: string): string; } var a: Bar; var kitty = a(); //// [addMoreCallSignaturesToBaseSignature.js] "use strict"; var a; var kitty = a();
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/addMoreCallSignaturesToBaseSignature.js
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package phases import ( "io" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" ) // a package local type for testing purposes. type testJoinData struct{} // testJoinData must satisfy JoinData. var _ JoinData = &testJoinData{} func (j *testJoinData) CertificateKey() string { return "" } func (j *testJoinData) Cfg() *kubeadmapi.JoinConfiguration { return nil } func (j *testJoinData) TLSBootstrapCfg() (*clientcmdapi.Config, error) { return nil, nil } func (j *testJoinData) InitCfg() (*kubeadmapi.InitConfiguration, error) { return nil, nil } func (j *testJoinData) Client() (clientset.Interface, error) { return nil, nil } func (j *testJoinData) WaitControlPlaneClient() (clientset.Interface, error) { return nil, nil } func (j *testJoinData) IgnorePreflightErrors() sets.Set[string] { return nil } func (j *testJoinData) OutputWriter() io.Writer { return nil } func (j *testJoinData) PatchesDir() string { return "" } func (j *testJoinData) DryRun() bool { return false } func (j *testJoinData) KubeConfigDir() string { return "" } func (j *testJoinData) KubeletDir() string { return "" } func (j *testJoinData) ManifestDir() string { return "" } func (j *testJoinData) CertificateWriteDir() string { return "" }
go
github
https://github.com/kubernetes/kubernetes
cmd/kubeadm/app/cmd/phases/join/data_test.go
import glob import logging import optparse import os import sys import textwrap import pyfits.diff from pyfits.util import fill log = logging.getLogger('fitsdiff') USAGE = """ Compare two FITS image files and report the differences in header keywords and data. fitsdiff [options] filename1 filename2 where filename1 filename2 are the two files to be compared. They may also be wild cards, in such cases, they must be enclosed by double or single quotes, or they may be directory names. If both are directory names, all files in each of the directories will be included; if only one is directory name, then the directory name will be prefixed to the file name(s) specified by the other argument. for example:: fitsdiff "*.fits" "/machine/data1" will compare all FITS files in the current directory to the corresponding files in the directory /machine/data1. """.strip() EPILOG = """ If the two files are identical within the specified conditions, it will report "No difference is found." If the value(s) of -c and -k takes the form '@filename', list is in the text file 'filename', and each line in that text file contains one keyword. Example ------- fitsdiff -k filename,filtnam1 -n 5 -d 1.e-6 test1.fits test2 This command will compare files test1.fits and test2.fits, report maximum of 5 different pixels values per extension, only report data values larger than 1.e-6 relative to each other, and will neglect the different values of keywords FILENAME and FILTNAM1 (or their very existence). fitsdiff commandline arguments can also be set using the environment variable FITSDIFF_SETTINGS. If the FITSDIFF_SETTINGS environment variable is present, each argument present will override the corresponding argument on the commandline. This environment variable exists to make it easier to change the behavior of fitsdiff on a global level, such as in a set of regression tests. """.strip() class HelpFormatter(optparse.TitledHelpFormatter): def format_epilog(self, epilog): return '\n%s\n' % fill(epilog, self.width) def handle_options(argv=None): # This is a callback--less trouble than actually adding a new action type def store_list(option, opt, value, parser): setattr(parser.values, option.dest, []) # Accept either a comma-separated list or a filename (starting with @) # containing a value on each line if value and value[0] == '@': value = value[1:] if not os.path.exists(value): log.warning('%s argument %s does not exist' % (opt, value)) return try: values = [v.strip() for v in open(value, 'r').readlines()] setattr(parser.values, option.dest, values) except IOError, e: log.warning('reading %s for %s failed: %s; ignoring this ' 'argument' % (value, opt, e)) else: setattr(parser.values, option.dest, [v.strip() for v in value.split(',')]) parser = optparse.OptionParser(usage=USAGE, epilog=EPILOG, formatter=HelpFormatter()) parser.add_option( '-q', '--quiet', action='store_true', help='Produce no output and just return a status code.') parser.add_option( '-n', '--num-diffs', type='int', default=10, dest='numdiffs', metavar='INTEGER', help='Max number of data differences (image pixel or table element) ' 'to report per extension (default %default).') parser.add_option( '-d', '--difference-tolerance', type='float', default=0., dest='tolerance', metavar='NUMBER', help='The relative tolerance for comparison of two numbers, ' 'specifically two floating point numbers. This applies to data ' 'in both images and tables, and to floating point keyword values ' 'in headers (default %default).') parser.add_option( '-b', '--no-ignore-blanks', action='store_false', dest='ignore_blanks', default=True, help="Don't ignore trailing blanks (whitespace) in string values. " "Otherwise trailing blanks both in header keywords/values and in " "table column values) are not treated as significant i.e. " "without this option 'ABC ' and 'ABC' are considered " "equivalent.") parser.add_option( '--no-ignore-blank-cards', action='store_false', dest='ignore_blank_cards', default=True, help="Don't ignore entirey blank cards in headers. Normally fitsdiff " "does not consider blank cards when comparing headers, but this " "will ensure that even blank cards match up.") parser.add_option( '-o', '--output-file', metavar='FILE', help='Output results to this file; otherwise results are printed to ' 'stdout.') group = optparse.OptionGroup(parser, 'Header Comparison Options') group.add_option( '-k', '--ignore-keywords', action='callback', callback=store_list, nargs=1, type='str', default=[], dest='ignore_keywords', metavar='KEYWORDS', help='Comma-separated list of keywords not to be compared. Keywords ' 'may contain wildcard patterns. To exclude all keywords, use ' '"*"; make sure to have double or single quotes around the ' 'asterisk.') group.add_option( '-c', '--ignore-comments', action='callback', callback=store_list, nargs=1, type='str', default=[], dest='ignore_comments', metavar='KEYWORDS', help='Comma-separated list of keywords whose comments will not be ' 'compared. Wildcards may be used as with --ignore-keywords.') parser.add_option_group(group) group = optparse.OptionGroup(parser, 'Table Comparison Options') group.add_option( '-f', '--ignore-fields', action='callback', callback=store_list, nargs=1, type='str', default=[], dest='ignore_fields', metavar='COLUMNS', help='Comma-separated list of fields (i.e. columns) not to be ' 'compared. All columns may be excluded using "*" as with ' '--ignore-keywords.') parser.add_option_group(group) options, args = parser.parse_args(argv) # Determine which filenames to compare if len(args) != 2: parser.error('\n' + textwrap.fill( 'fitsdiff requires two arguments; see `fitsdiff --help` for more ' 'details.', parser.formatter.width)) return options, args def setup_logging(outfile=None): log.setLevel(logging.INFO) error_handler = logging.StreamHandler(sys.stderr) error_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) error_handler.setLevel(logging.WARNING) log.addHandler(error_handler) if outfile is not None: output_handler = logging.FileHandler(outfile) else: output_handler = logging.StreamHandler() class LevelFilter(logging.Filter): """Log only messages matching the specified level.""" def __init__(self, name='', level=logging.NOTSET): logging.Filter.__init__(self, name) self.level = level def filter(self, rec): return rec.levelno == self.level # File output logs all messages, but stdout logs only INFO messages # (since errors are already logged to stderr) output_handler.addFilter(LevelFilter(level=logging.INFO)) output_handler.setFormatter(logging.Formatter('%(message)s')) log.addHandler(output_handler) def match_files(paths): filelists = [] for path in paths: if glob.has_magic(path): files = [os.path.abspath(f) for f in glob.glob(path)] if not files: log.error( 'Wildcard pattern %r did not match any files.' % path) sys.exit(2) filelists.append(files) elif os.path.isdir(path): filelists.append([os.path.abspath(f) for f in os.listdir(path)]) elif os.path.isfile(path): filelists.append([path]) else: log.error( '%r is not an existing file, directory, or wildcard pattern; ' 'see `fitsdiff --help` for more usage help.' % path) sys.exit(2) filelists[0].sort() filelists[1].sort() for a, b in [(0, 1), (1, 0)]: if len(filelists[a]) > len(filelists[b]): for extra in filelists[a][len(filelists[b]):]: log.warning('%r has no match in %r' % (extra, paths[b])) filelists[a] = filelists[a][:len(filelists[b])] break return zip(*filelists) def main(): if 'FITSDIFF_SETTINGS' in os.environ: argv = os.environ['FITSDIFF_SETTINGS'].split() + sys.argv[1:] else: argv = sys.argv[1:] opts, args = handle_options(argv) if not opts.quiet: setup_logging(opts.output_file) files = match_files(args) close_file = False if opts.quiet: out_file = None elif opts.output_file: out_file = open(opts.output_file, 'wb') close_file = True else: out_file = sys.stdout identical = [] try: for a, b in files: # TODO: pass in any additonal arguments here too diff = pyfits.diff.FITSDiff( a, b, ignore_keywords=opts.ignore_keywords, ignore_comments=opts.ignore_comments, ignore_fields=opts.ignore_fields, numdiffs=opts.numdiffs, tolerance=opts.tolerance, ignore_blanks=opts.ignore_blanks, ignore_blank_cards=opts.ignore_blank_cards) diff.report(fileobj=out_file) identical.append(diff.identical) return int(not all(identical)) finally: if close_file: out_file.close()
unknown
codeparrot/codeparrot-clean
"""Lightlink channels module for Zigbee Home Automation.""" import asyncio import zigpy.exceptions import zigpy.zcl.clusters.lightlink as lightlink from .. import registries from .base import ChannelStatus, ZigbeeChannel @registries.CHANNEL_ONLY_CLUSTERS.register(lightlink.LightLink.cluster_id) @registries.ZIGBEE_CHANNEL_REGISTRY.register(lightlink.LightLink.cluster_id) class LightLink(ZigbeeChannel): """Lightlink channel.""" async def async_configure(self) -> None: """Add Coordinator to LightLink group .""" if self._ch_pool.skip_configuration: self._status = ChannelStatus.CONFIGURED return application = self._ch_pool.endpoint.device.application try: coordinator = application.get_device(application.ieee) except KeyError: self.warning("Aborting - unable to locate required coordinator device.") return try: _, _, groups = await self.cluster.get_group_identifiers(0) except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as exc: self.warning("Couldn't get list of groups: %s", str(exc)) return if groups: for group in groups: self.debug("Adding coordinator to 0x%04x group id", group.group_id) await coordinator.add_to_group(group.group_id) else: await coordinator.add_to_group(0x0000, name="Default Lightlink Group")
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Standard library imports import sys PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 # Third party imports # Local application / specific library imports # python_2_unicode_compatible try: from django.utils.encoding import python_2_unicode_compatible except ImportError: python_2_unicode_compatible = lambda x: x # Provides string_types if six is not available try: from django.utils.six import string_types except ImportError: if PY3: string_types = str, else: string_types = basestring, # force_str try: from django.utils.encoding import force_str except ImportError: from django.utils.encoding import smart_str as force_str # noqa # get_model try: from django.apps import apps get_model = apps.get_model except ImportError: from django.db.models import get_model # noqa def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" class metaclass(meta): # noqa __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass("NewBase", None, {})
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # Copyright 2014 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Functions for parsing the gypd output from gyp. """ import os def parse_dictionary(var_dict, d, current_target_name, dest_dir): """Helper function to get the meaningful entries in a dictionary. Parse dictionary d, and store unique relevant entries in var_dict. Recursively parses internal dictionaries and files that are referenced. When parsing the 'libraries' list from gyp, entries in the form '-l<name>' get assigned to var_dict.LOCAL_SHARED_LIBRARIES as 'lib<name>', and entries in the form '[lib]<name>.a' get assigned to var_dict.LOCAL_STATIC_LIBRARIES as 'lib<name>'. Args: var_dict: VarsDict object for storing the results of the parsing. d: Dictionary object to parse. current_target_name: The current target being parsed. If this dictionary is a target, this will be its entry 'target_name'. Otherwise, this will be the name of the target which contains this dictionary. dest_dir: Destination for the eventual Android.mk that will be created from this parse, relative to Skia trunk. Used to determine path for source files. """ for source in d.get('sources', []): # Compare against a lowercase version, in case files are named .H or .GYPI lowercase_source = source.lower() if lowercase_source.endswith('.h'): # Android.mk does not need the header files. continue if lowercase_source.endswith('gypi'): # The gypi files are included in sources, but the sources they included # are also included. No need to parse them again. continue # The path is relative to the gyp folder, but Android wants the path # relative to dest_dir. rel_source = os.path.relpath(source, os.pardir) rel_source = os.path.relpath(rel_source, dest_dir) var_dict.LOCAL_SRC_FILES.add(rel_source) for lib in d.get('libraries', []): if lib.endswith('.a'): # Remove the '.a' lib = lib[:-2] # Add 'lib', if necessary if not lib.startswith('lib'): lib = 'lib' + lib var_dict.LOCAL_STATIC_LIBRARIES.add(lib) else: # lib will be in the form of '-l<name>'. Change it to 'lib<name>' lib = lib.replace('-l', 'lib', 1) var_dict.LOCAL_SHARED_LIBRARIES.add(lib) for dependency in d.get('dependencies', []): # Each dependency is listed as # <path_to_file>:<target>#target li = dependency.split(':') assert(len(li) <= 2 and len(li) >= 1) sub_targets = [] if len(li) == 2 and li[1] != '*': sub_targets.append(li[1].split('#')[0]) sub_path = li[0] assert(sub_path.endswith('.gyp')) # Although the original reference is to a .gyp, parse the corresponding # gypd file, which was constructed by gyp. sub_path = sub_path + 'd' parse_gypd(var_dict, sub_path, dest_dir, sub_targets) if 'default_configuration' in d: config_name = d['default_configuration'] # default_configuration is meaningless without configurations assert('configurations' in d) config = d['configurations'][config_name] parse_dictionary(var_dict, config, current_target_name, dest_dir) for flag in d.get('cflags', []): var_dict.LOCAL_CFLAGS.add(flag) for flag in d.get('cflags_cc', []): var_dict.LOCAL_CPPFLAGS.add(flag) for include in d.get('include_dirs', []): if include.startswith('external'): # This path is relative to the Android root. Leave it alone. rel_include = include else: # As with source, the input path will be relative to gyp/, but Android # wants relative to dest_dir. rel_include = os.path.relpath(include, os.pardir) rel_include = os.path.relpath(rel_include, dest_dir) # No need to include the base directory. if rel_include is os.curdir: continue rel_include = os.path.join('$(LOCAL_PATH)', rel_include) # Remove a trailing slash, if present. if rel_include.endswith('/'): rel_include = rel_include[:-1] var_dict.LOCAL_C_INCLUDES.add(rel_include) # For the top level, libskia, include directories should be exported. # FIXME (scroggo): Do not hard code this. if current_target_name == 'libskia': var_dict.LOCAL_EXPORT_C_INCLUDE_DIRS.add(rel_include) for define in d.get('defines', []): var_dict.DEFINES.add(define) def parse_gypd(var_dict, path, dest_dir, desired_targets=None): """Parse a gypd file. Open a file that consists of python dictionaries representing build targets. Parse those dictionaries using parse_dictionary. Recursively parses referenced files. Args: var_dict: VarsDict object for storing the result of the parse. path: Path to gypd file. dest_dir: Destination for the eventual Android.mk that will be created from this parse, relative to Skia trunk. Used to determine path for source files and include directories. desired_targets: List of targets to be parsed from this file. If empty, parse all targets. """ d = {} with open(path, 'r') as f: # Read the entire file as a dictionary d = eval(f.read()) # The gypd file is structured such that the top level dictionary has an entry # named 'targets' for target in d['targets']: target_name = target['target_name'] if target_name in var_dict.KNOWN_TARGETS: # Avoid circular dependencies continue if desired_targets and target_name not in desired_targets: # Our caller does not depend on this one continue # Add it to our known targets so we don't parse it again var_dict.KNOWN_TARGETS.add(target_name) parse_dictionary(var_dict, target, target_name, dest_dir)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import pipes import random import re from ansible import constants as C from ansible.errors import AnsibleError from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base from ansible.template import Templar from ansible.utils.boolean import boolean from ansible.utils.unicode import to_unicode __all__ = ['PlayContext'] SU_PROMPT_LOCALIZATIONS = [ 'Password', '암호', 'パスワード', 'Adgangskode', 'Contraseña', 'Contrasenya', 'Hasło', 'Heslo', 'Jelszó', 'Lösenord', 'Mật khẩu', 'Mot de passe', 'Parola', 'Parool', 'Pasahitza', 'Passord', 'Passwort', 'Salasana', 'Sandi', 'Senha', 'Wachtwoord', 'ססמה', 'Лозинка', 'Парола', 'Пароль', 'गुप्तशब्द', 'शब्दकूट', 'సంకేతపదము', 'හස්පදය', '密码', '密碼', ] # the magic variable mapping dictionary below is used to translate # host/inventory variables to fields in the PlayContext # object. The dictionary values are tuples, to account for aliases # in variable names. MAGIC_VARIABLE_MAPPING = dict( connection = ('ansible_connection',), remote_addr = ('ansible_ssh_host', 'ansible_host'), remote_user = ('ansible_ssh_user', 'ansible_user'), port = ('ansible_ssh_port', 'ansible_port'), password = ('ansible_ssh_pass', 'ansible_password'), private_key_file = ('ansible_ssh_private_key_file', 'ansible_private_key_file'), shell = ('ansible_shell_type',), become = ('ansible_become',), become_method = ('ansible_become_method',), become_user = ('ansible_become_user',), become_pass = ('ansible_become_password','ansible_become_pass'), become_exe = ('ansible_become_exe',), become_flags = ('ansible_become_flags',), sudo = ('ansible_sudo',), sudo_user = ('ansible_sudo_user',), sudo_pass = ('ansible_sudo_password', 'ansible_sudo_pass'), sudo_exe = ('ansible_sudo_exe',), sudo_flags = ('ansible_sudo_flags',), su = ('ansible_su',), su_user = ('ansible_su_user',), su_pass = ('ansible_su_password', 'ansible_su_pass'), su_exe = ('ansible_su_exe',), su_flags = ('ansible_su_flags',), ) SU_PROMPT_LOCALIZATIONS = [ 'Password', '암호', 'パスワード', 'Adgangskode', 'Contraseña', 'Contrasenya', 'Hasło', 'Heslo', 'Jelszó', 'Lösenord', 'Mật khẩu', 'Mot de passe', 'Parola', 'Parool', 'Pasahitza', 'Passord', 'Passwort', 'Salasana', 'Sandi', 'Senha', 'Wachtwoord', 'ססמה', 'Лозинка', 'Парола', 'Пароль', 'गुप्तशब्द', 'शब्दकूट', 'సంకేతపదము', 'හස්පදය', '密码', '密碼', ] TASK_ATTRIBUTE_OVERRIDES = ( 'become', 'become_user', 'become_pass', 'become_method', 'connection', 'delegate_to', 'no_log', 'remote_user', ) class PlayContext(Base): ''' This class is used to consolidate the connection information for hosts in a play and child tasks, where the task may override some connection/authentication information. ''' # connection fields, some are inherited from Base: # (connection, port, remote_user, environment, no_log) _remote_addr = FieldAttribute(isa='string') _password = FieldAttribute(isa='string') _private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE) _timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT) _shell = FieldAttribute(isa='string') # privilege escalation fields _become = FieldAttribute(isa='bool') _become_method = FieldAttribute(isa='string') _become_user = FieldAttribute(isa='string') _become_pass = FieldAttribute(isa='string') _become_exe = FieldAttribute(isa='string') _become_flags = FieldAttribute(isa='string') _prompt = FieldAttribute(isa='string') # backwards compatibility fields for sudo/su _sudo_exe = FieldAttribute(isa='string') _sudo_flags = FieldAttribute(isa='string') _sudo_pass = FieldAttribute(isa='string') _su_exe = FieldAttribute(isa='string') _su_flags = FieldAttribute(isa='string') _su_pass = FieldAttribute(isa='string') # general flags _verbosity = FieldAttribute(isa='int', default=0) _only_tags = FieldAttribute(isa='set', default=set()) _skip_tags = FieldAttribute(isa='set', default=set()) _check_mode = FieldAttribute(isa='bool', default=False) _force_handlers = FieldAttribute(isa='bool', default=False) _start_at_task = FieldAttribute(isa='string') _step = FieldAttribute(isa='bool', default=False) _diff = FieldAttribute(isa='bool', default=False) def __init__(self, play=None, options=None, passwords=None): super(PlayContext, self).__init__() if passwords is None: passwords = {} self.password = passwords.get('conn_pass','') self.become_pass = passwords.get('become_pass','') # set options before play to allow play to override them if options: self.set_options(options) if play: self.set_play(play) def set_play(self, play): ''' Configures this connection information instance with data from the play class. ''' if play.connection: self.connection = play.connection if play.remote_user: self.remote_user = play.remote_user if play.port: self.port = int(play.port) if play.become is not None: self.become = play.become if play.become_method: self.become_method = play.become_method if play.become_user: self.become_user = play.become_user # non connection related self.no_log = play.no_log if play.force_handlers is not None: self.force_handlers = play.force_handlers def set_options(self, options): ''' Configures this connection information instance with data from options specified by the user on the command line. These have a lower precedence than those set on the play or host. ''' if options.connection: self.connection = options.connection self.remote_user = options.remote_user self.private_key_file = options.private_key_file # privilege escalation self.become = options.become self.become_method = options.become_method self.become_user = options.become_user # general flags (should we move out?) if options.verbosity: self.verbosity = options.verbosity #if options.no_log: # self.no_log = boolean(options.no_log) if options.check: self.check_mode = boolean(options.check) if hasattr(options, 'force_handlers') and options.force_handlers: self.force_handlers = boolean(options.force_handlers) if hasattr(options, 'step') and options.step: self.step = boolean(options.step) if hasattr(options, 'start_at_task') and options.start_at_task: self.start_at_task = to_unicode(options.start_at_task) if hasattr(options, 'diff') and options.diff: self.diff = boolean(options.diff) # get the tag info from options, converting a comma-separated list # of values into a proper list if need be. We check to see if the # options have the attribute, as it is not always added via the CLI if hasattr(options, 'tags'): if isinstance(options.tags, list): self.only_tags.update(options.tags) elif isinstance(options.tags, basestring): self.only_tags.update(options.tags.split(',')) if len(self.only_tags) == 0: self.only_tags = set(['all']) if hasattr(options, 'skip_tags'): if isinstance(options.skip_tags, list): self.skip_tags.update(options.skip_tags) elif isinstance(options.skip_tags, basestring): self.skip_tags.update(options.skip_tags.split(',')) def set_task_and_variable_override(self, task, variables): ''' Sets attributes from the task if they are set, which will override those from the play. ''' new_info = self.copy() # loop through a subset of attributes on the task object and set # connection fields based on their values for attr in TASK_ATTRIBUTE_OVERRIDES: if hasattr(task, attr): attr_val = getattr(task, attr) if attr_val is not None: setattr(new_info, attr, attr_val) # finally, use the MAGIC_VARIABLE_MAPPING dictionary to update this # connection info object with 'magic' variables from the variable list for (attr, variable_names) in MAGIC_VARIABLE_MAPPING.iteritems(): for variable_name in variable_names: if variable_name in variables: setattr(new_info, attr, variables[variable_name]) # make sure we get port defaults if needed if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None: new_info.port = int(C.DEFAULT_REMOTE_PORT) # become legacy updates if not new_info.become_pass: if new_info.become_method == 'sudo' and new_info.sudo_pass: setattr(new_info, 'become_pass', new_info.sudo_pass) elif new_info.become_method == 'su' and new_info.su_pass: setattr(new_info, 'become_pass', new_info.su_pass) return new_info def make_become_cmd(self, cmd, executable=None): """ helper function to create privilege escalation commands """ prompt = None success_key = None if executable is None: executable = C.DEFAULT_EXECUTABLE if self.become: becomecmd = None randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) success_key = 'BECOME-SUCCESS-%s' % randbits #executable = executable or '$SHELL' success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd)) if self.become_method == 'sudo': # Rather than detect if sudo wants a password this time, -k makes sudo always ask for # a password if one is required. Passing a quoted compound command to sudo (or sudo -s) # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted # string to the user's shell. We loop reading output until we see the randomly-generated # sudo prompt set with the -p option. prompt = '[sudo via ansible, key=%s] password: ' % randbits exe = self.become_exe or self.sudo_exe or 'sudo' flags = self.become_flags or self.sudo_flags or '' becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd) elif self.become_method == 'su': def detect_su_prompt(data): SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE) return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data)) prompt = detect_su_prompt exe = self.become_exe or self.su_exe or 'su' flags = self.become_flags or self.su_flags or '' becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd) elif self.become_method == 'pbrun': prompt='assword:' exe = self.become_exe or 'pbrun' flags = self.become_flags or '' becomecmd = '%s -b %s -u %s %s' % (exe, flags, self.become_user, success_cmd) elif self.become_method == 'pfexec': exe = self.become_exe or 'pfexec' flags = self.become_flags or '' # No user as it uses it's own exec_attr to figure it out becomecmd = '%s %s "%s"' % (exe, flags, success_cmd) else: raise AnsibleError("Privilege escalation method not found: %s" % self.become_method) self.prompt = prompt self.success_key = success_key return ('%s -c ' % executable) + pipes.quote(becomecmd) return cmd def update_vars(self, variables): ''' Adds 'magic' variables relating to connections to the variable dictionary provided. In case users need to access from the play, this is a legacy from runner. ''' #FIXME: remove password? possibly add become/sudo settings for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file']: if special_var not in variables: for prop, varnames in MAGIC_VARIABLE_MAPPING.items(): if special_var in varnames: variables[special_var] = getattr(self, prop)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 """MXNet: a concise, fast and flexible framework for deep learning.""" from __future__ import absolute_import from .context import Context, current_context, cpu, gpu, cpu_pinned from . import engine from .base import MXNetError from . import base from . import contrib from . import ndarray from . import ndarray as nd from . import name # use mx.sym as short for symbol from . import symbol as sym from . import symbol from . import symbol_doc from . import io from . import recordio from . import operator # use mx.rnd as short for mx.random from . import random as rnd from . import random from . import optimizer from . import model from . import metric from . import notebook from . import initializer # use mx.init as short for mx.initializer from . import initializer as init from . import visualization # use viz as short for mx.ndarray from . import visualization as viz from . import callback # from . import misc from . import lr_scheduler # use mx.kv as short for kvstore from . import kvstore as kv from . import kvstore_server # Runtime compile module from . import rtc # Attribute scope to add attributes to symbolic graphs from .attribute import AttrScope from . import monitor from . import monitor as mon from . import torch from . import torch as th from . import profiler from . import log from . import module from . import module as mod from . import image from . import image as img from . import test_utils from . import rnn from . import gluon __version__ = base.__version__
unknown
codeparrot/codeparrot-clean
// This is the implementation of Python atomic operations using C++11 or C11 // atomics. Note that the pyatomic_gcc.h implementation is preferred for GCC // compatible compilers, even if they support C++11 atomics. #ifndef Py_ATOMIC_STD_H # error "this header file must not be included directly" #endif #ifdef __cplusplus extern "C++" { # include <atomic> } # define _Py_USING_STD using namespace std # define _Atomic(tp) atomic<tp> #else # define _Py_USING_STD # include <stdatomic.h> #endif // --- _Py_atomic_add -------------------------------------------------------- static inline int _Py_atomic_add_int(int *obj, int value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(int)*)obj, value); } static inline int8_t _Py_atomic_add_int8(int8_t *obj, int8_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(int8_t)*)obj, value); } static inline int16_t _Py_atomic_add_int16(int16_t *obj, int16_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(int16_t)*)obj, value); } static inline int32_t _Py_atomic_add_int32(int32_t *obj, int32_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(int32_t)*)obj, value); } static inline int64_t _Py_atomic_add_int64(int64_t *obj, int64_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(int64_t)*)obj, value); } static inline intptr_t _Py_atomic_add_intptr(intptr_t *obj, intptr_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(intptr_t)*)obj, value); } static inline unsigned int _Py_atomic_add_uint(unsigned int *obj, unsigned int value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(unsigned int)*)obj, value); } static inline uint8_t _Py_atomic_add_uint8(uint8_t *obj, uint8_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(uint8_t)*)obj, value); } static inline uint16_t _Py_atomic_add_uint16(uint16_t *obj, uint16_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(uint16_t)*)obj, value); } static inline uint32_t _Py_atomic_add_uint32(uint32_t *obj, uint32_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(uint32_t)*)obj, value); } static inline uint64_t _Py_atomic_add_uint64(uint64_t *obj, uint64_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(uint64_t)*)obj, value); } static inline uintptr_t _Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(uintptr_t)*)obj, value); } static inline Py_ssize_t _Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value) { _Py_USING_STD; return atomic_fetch_add((_Atomic(Py_ssize_t)*)obj, value); } // --- _Py_atomic_compare_exchange ------------------------------------------- static inline int _Py_atomic_compare_exchange_int(int *obj, int *expected, int desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(int)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(int8_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(int16_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(int32_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(int64_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(intptr_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(unsigned int)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(uint8_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(uint16_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(uint32_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(uint64_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(uintptr_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(Py_ssize_t)*)obj, expected, desired); } static inline int _Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *desired) { _Py_USING_STD; return atomic_compare_exchange_strong((_Atomic(void *)*)obj, (void **)expected, desired); } // --- _Py_atomic_exchange --------------------------------------------------- static inline int _Py_atomic_exchange_int(int *obj, int value) { _Py_USING_STD; return atomic_exchange((_Atomic(int)*)obj, value); } static inline int8_t _Py_atomic_exchange_int8(int8_t *obj, int8_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(int8_t)*)obj, value); } static inline int16_t _Py_atomic_exchange_int16(int16_t *obj, int16_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(int16_t)*)obj, value); } static inline int32_t _Py_atomic_exchange_int32(int32_t *obj, int32_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(int32_t)*)obj, value); } static inline int64_t _Py_atomic_exchange_int64(int64_t *obj, int64_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(int64_t)*)obj, value); } static inline intptr_t _Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(intptr_t)*)obj, value); } static inline unsigned int _Py_atomic_exchange_uint(unsigned int *obj, unsigned int value) { _Py_USING_STD; return atomic_exchange((_Atomic(unsigned int)*)obj, value); } static inline uint8_t _Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(uint8_t)*)obj, value); } static inline uint16_t _Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(uint16_t)*)obj, value); } static inline uint32_t _Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(uint32_t)*)obj, value); } static inline uint64_t _Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(uint64_t)*)obj, value); } static inline uintptr_t _Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(uintptr_t)*)obj, value); } static inline Py_ssize_t _Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value) { _Py_USING_STD; return atomic_exchange((_Atomic(Py_ssize_t)*)obj, value); } static inline void* _Py_atomic_exchange_ptr(void *obj, void *value) { _Py_USING_STD; return atomic_exchange((_Atomic(void *)*)obj, value); } // --- _Py_atomic_and -------------------------------------------------------- static inline uint8_t _Py_atomic_and_uint8(uint8_t *obj, uint8_t value) { _Py_USING_STD; return atomic_fetch_and((_Atomic(uint8_t)*)obj, value); } static inline uint16_t _Py_atomic_and_uint16(uint16_t *obj, uint16_t value) { _Py_USING_STD; return atomic_fetch_and((_Atomic(uint16_t)*)obj, value); } static inline uint32_t _Py_atomic_and_uint32(uint32_t *obj, uint32_t value) { _Py_USING_STD; return atomic_fetch_and((_Atomic(uint32_t)*)obj, value); } static inline uint64_t _Py_atomic_and_uint64(uint64_t *obj, uint64_t value) { _Py_USING_STD; return atomic_fetch_and((_Atomic(uint64_t)*)obj, value); } static inline uintptr_t _Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value) { _Py_USING_STD; return atomic_fetch_and((_Atomic(uintptr_t)*)obj, value); } // --- _Py_atomic_or --------------------------------------------------------- static inline uint8_t _Py_atomic_or_uint8(uint8_t *obj, uint8_t value) { _Py_USING_STD; return atomic_fetch_or((_Atomic(uint8_t)*)obj, value); } static inline uint16_t _Py_atomic_or_uint16(uint16_t *obj, uint16_t value) { _Py_USING_STD; return atomic_fetch_or((_Atomic(uint16_t)*)obj, value); } static inline uint32_t _Py_atomic_or_uint32(uint32_t *obj, uint32_t value) { _Py_USING_STD; return atomic_fetch_or((_Atomic(uint32_t)*)obj, value); } static inline uint64_t _Py_atomic_or_uint64(uint64_t *obj, uint64_t value) { _Py_USING_STD; return atomic_fetch_or((_Atomic(uint64_t)*)obj, value); } static inline uintptr_t _Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value) { _Py_USING_STD; return atomic_fetch_or((_Atomic(uintptr_t)*)obj, value); } // --- _Py_atomic_load ------------------------------------------------------- static inline int _Py_atomic_load_int(const int *obj) { _Py_USING_STD; return atomic_load((const _Atomic(int)*)obj); } static inline int8_t _Py_atomic_load_int8(const int8_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(int8_t)*)obj); } static inline int16_t _Py_atomic_load_int16(const int16_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(int16_t)*)obj); } static inline int32_t _Py_atomic_load_int32(const int32_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(int32_t)*)obj); } static inline int64_t _Py_atomic_load_int64(const int64_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(int64_t)*)obj); } static inline intptr_t _Py_atomic_load_intptr(const intptr_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(intptr_t)*)obj); } static inline uint8_t _Py_atomic_load_uint8(const uint8_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(uint8_t)*)obj); } static inline uint16_t _Py_atomic_load_uint16(const uint16_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(uint32_t)*)obj); } static inline uint32_t _Py_atomic_load_uint32(const uint32_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(uint32_t)*)obj); } static inline uint64_t _Py_atomic_load_uint64(const uint64_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(uint64_t)*)obj); } static inline uintptr_t _Py_atomic_load_uintptr(const uintptr_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(uintptr_t)*)obj); } static inline unsigned int _Py_atomic_load_uint(const unsigned int *obj) { _Py_USING_STD; return atomic_load((const _Atomic(unsigned int)*)obj); } static inline Py_ssize_t _Py_atomic_load_ssize(const Py_ssize_t *obj) { _Py_USING_STD; return atomic_load((const _Atomic(Py_ssize_t)*)obj); } static inline void* _Py_atomic_load_ptr(const void *obj) { _Py_USING_STD; return atomic_load((const _Atomic(void*)*)obj); } // --- _Py_atomic_load_relaxed ----------------------------------------------- static inline int _Py_atomic_load_int_relaxed(const int *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(int)*)obj, memory_order_relaxed); } static inline char _Py_atomic_load_char_relaxed(const char *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(char)*)obj, memory_order_relaxed); } static inline unsigned char _Py_atomic_load_uchar_relaxed(const unsigned char *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(unsigned char)*)obj, memory_order_relaxed); } static inline short _Py_atomic_load_short_relaxed(const short *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(short)*)obj, memory_order_relaxed); } static inline unsigned short _Py_atomic_load_ushort_relaxed(const unsigned short *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(unsigned short)*)obj, memory_order_relaxed); } static inline long _Py_atomic_load_long_relaxed(const long *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(long)*)obj, memory_order_relaxed); } static inline float _Py_atomic_load_float_relaxed(const float *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(float)*)obj, memory_order_relaxed); } static inline double _Py_atomic_load_double_relaxed(const double *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(double)*)obj, memory_order_relaxed); } static inline int8_t _Py_atomic_load_int8_relaxed(const int8_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(int8_t)*)obj, memory_order_relaxed); } static inline int16_t _Py_atomic_load_int16_relaxed(const int16_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(int16_t)*)obj, memory_order_relaxed); } static inline int32_t _Py_atomic_load_int32_relaxed(const int32_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(int32_t)*)obj, memory_order_relaxed); } static inline int64_t _Py_atomic_load_int64_relaxed(const int64_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(int64_t)*)obj, memory_order_relaxed); } static inline intptr_t _Py_atomic_load_intptr_relaxed(const intptr_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(intptr_t)*)obj, memory_order_relaxed); } static inline uint8_t _Py_atomic_load_uint8_relaxed(const uint8_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(uint8_t)*)obj, memory_order_relaxed); } static inline uint16_t _Py_atomic_load_uint16_relaxed(const uint16_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(uint16_t)*)obj, memory_order_relaxed); } static inline uint32_t _Py_atomic_load_uint32_relaxed(const uint32_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(uint32_t)*)obj, memory_order_relaxed); } static inline uint64_t _Py_atomic_load_uint64_relaxed(const uint64_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(uint64_t)*)obj, memory_order_relaxed); } static inline uintptr_t _Py_atomic_load_uintptr_relaxed(const uintptr_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(uintptr_t)*)obj, memory_order_relaxed); } static inline unsigned int _Py_atomic_load_uint_relaxed(const unsigned int *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(unsigned int)*)obj, memory_order_relaxed); } static inline Py_ssize_t _Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(Py_ssize_t)*)obj, memory_order_relaxed); } static inline void* _Py_atomic_load_ptr_relaxed(const void *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(void*)*)obj, memory_order_relaxed); } static inline unsigned long long _Py_atomic_load_ullong_relaxed(const unsigned long long *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(unsigned long long)*)obj, memory_order_relaxed); } static inline long long _Py_atomic_load_llong_relaxed(const long long *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(long long)*)obj, memory_order_relaxed); } // --- _Py_atomic_store ------------------------------------------------------ static inline void _Py_atomic_store_int(int *obj, int value) { _Py_USING_STD; atomic_store((_Atomic(int)*)obj, value); } static inline void _Py_atomic_store_int8(int8_t *obj, int8_t value) { _Py_USING_STD; atomic_store((_Atomic(int8_t)*)obj, value); } static inline void _Py_atomic_store_int16(int16_t *obj, int16_t value) { _Py_USING_STD; atomic_store((_Atomic(int16_t)*)obj, value); } static inline void _Py_atomic_store_int32(int32_t *obj, int32_t value) { _Py_USING_STD; atomic_store((_Atomic(int32_t)*)obj, value); } static inline void _Py_atomic_store_int64(int64_t *obj, int64_t value) { _Py_USING_STD; atomic_store((_Atomic(int64_t)*)obj, value); } static inline void _Py_atomic_store_intptr(intptr_t *obj, intptr_t value) { _Py_USING_STD; atomic_store((_Atomic(intptr_t)*)obj, value); } static inline void _Py_atomic_store_uint8(uint8_t *obj, uint8_t value) { _Py_USING_STD; atomic_store((_Atomic(uint8_t)*)obj, value); } static inline void _Py_atomic_store_uint16(uint16_t *obj, uint16_t value) { _Py_USING_STD; atomic_store((_Atomic(uint16_t)*)obj, value); } static inline void _Py_atomic_store_uint32(uint32_t *obj, uint32_t value) { _Py_USING_STD; atomic_store((_Atomic(uint32_t)*)obj, value); } static inline void _Py_atomic_store_uint64(uint64_t *obj, uint64_t value) { _Py_USING_STD; atomic_store((_Atomic(uint64_t)*)obj, value); } static inline void _Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value) { _Py_USING_STD; atomic_store((_Atomic(uintptr_t)*)obj, value); } static inline void _Py_atomic_store_uint(unsigned int *obj, unsigned int value) { _Py_USING_STD; atomic_store((_Atomic(unsigned int)*)obj, value); } static inline void _Py_atomic_store_ptr(void *obj, void *value) { _Py_USING_STD; atomic_store((_Atomic(void*)*)obj, value); } static inline void _Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value) { _Py_USING_STD; atomic_store((_Atomic(Py_ssize_t)*)obj, value); } // --- _Py_atomic_store_relaxed ---------------------------------------------- static inline void _Py_atomic_store_int_relaxed(int *obj, int value) { _Py_USING_STD; atomic_store_explicit((_Atomic(int)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(int8_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(int16_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(int32_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(int64_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(intptr_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(uint8_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(uint16_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(uint32_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(uint64_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(uintptr_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value) { _Py_USING_STD; atomic_store_explicit((_Atomic(unsigned int)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_ptr_relaxed(void *obj, void *value) { _Py_USING_STD; atomic_store_explicit((_Atomic(void*)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(Py_ssize_t)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_ullong_relaxed(unsigned long long *obj, unsigned long long value) { _Py_USING_STD; atomic_store_explicit((_Atomic(unsigned long long)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_char_relaxed(char *obj, char value) { _Py_USING_STD; atomic_store_explicit((_Atomic(char)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_uchar_relaxed(unsigned char *obj, unsigned char value) { _Py_USING_STD; atomic_store_explicit((_Atomic(unsigned char)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_short_relaxed(short *obj, short value) { _Py_USING_STD; atomic_store_explicit((_Atomic(short)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_ushort_relaxed(unsigned short *obj, unsigned short value) { _Py_USING_STD; atomic_store_explicit((_Atomic(unsigned short)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_long_relaxed(long *obj, long value) { _Py_USING_STD; atomic_store_explicit((_Atomic(long)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_float_relaxed(float *obj, float value) { _Py_USING_STD; atomic_store_explicit((_Atomic(float)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_double_relaxed(double *obj, double value) { _Py_USING_STD; atomic_store_explicit((_Atomic(double)*)obj, value, memory_order_relaxed); } static inline void _Py_atomic_store_llong_relaxed(long long *obj, long long value) { _Py_USING_STD; atomic_store_explicit((_Atomic(long long)*)obj, value, memory_order_relaxed); } // --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------ static inline void * _Py_atomic_load_ptr_acquire(const void *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(void*)*)obj, memory_order_acquire); } static inline uintptr_t _Py_atomic_load_uintptr_acquire(const uintptr_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(uintptr_t)*)obj, memory_order_acquire); } static inline void _Py_atomic_store_ptr_release(void *obj, void *value) { _Py_USING_STD; atomic_store_explicit((_Atomic(void*)*)obj, value, memory_order_release); } static inline void _Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(uintptr_t)*)obj, value, memory_order_release); } static inline void _Py_atomic_store_int_release(int *obj, int value) { _Py_USING_STD; atomic_store_explicit((_Atomic(int)*)obj, value, memory_order_release); } static inline void _Py_atomic_store_int8_release(int8_t *obj, int8_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(int8_t)*)obj, value, memory_order_release); } static inline void _Py_atomic_store_uint_release(unsigned int *obj, unsigned int value) { _Py_USING_STD; atomic_store_explicit((_Atomic(unsigned int)*)obj, value, memory_order_release); } static inline void _Py_atomic_store_ssize_release(Py_ssize_t *obj, Py_ssize_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(Py_ssize_t)*)obj, value, memory_order_release); } static inline int _Py_atomic_load_int_acquire(const int *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(int)*)obj, memory_order_acquire); } static inline void _Py_atomic_store_uint32_release(uint32_t *obj, uint32_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(uint32_t)*)obj, value, memory_order_release); } static inline void _Py_atomic_store_uint64_release(uint64_t *obj, uint64_t value) { _Py_USING_STD; atomic_store_explicit((_Atomic(uint64_t)*)obj, value, memory_order_release); } static inline uint64_t _Py_atomic_load_uint64_acquire(const uint64_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(uint64_t)*)obj, memory_order_acquire); } static inline uint32_t _Py_atomic_load_uint32_acquire(const uint32_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(uint32_t)*)obj, memory_order_acquire); } static inline Py_ssize_t _Py_atomic_load_ssize_acquire(const Py_ssize_t *obj) { _Py_USING_STD; return atomic_load_explicit((const _Atomic(Py_ssize_t)*)obj, memory_order_acquire); } // --- _Py_atomic_fence ------------------------------------------------------ static inline void _Py_atomic_fence_seq_cst(void) { _Py_USING_STD; atomic_thread_fence(memory_order_seq_cst); } static inline void _Py_atomic_fence_acquire(void) { _Py_USING_STD; atomic_thread_fence(memory_order_acquire); } static inline void _Py_atomic_fence_release(void) { _Py_USING_STD; atomic_thread_fence(memory_order_release); }
c
github
https://github.com/python/cpython
Include/cpython/pyatomic_std.h
class Neuron: 'Represents a neuron' currentVal = 0 threshold = 1 connections = [] identity = 0 def displayVal(self): print(self.identity,":",self.currentVal) for c in self.connections: print(self.identity," connected to ",c.end.identity) def addSynapse(self,destination): print(self.identity," connecting to ",destination.identity) connection = Synapse() connection.start = self connection.end = destination self.connections.append(connection) def fire(self): if self.connections.__len__()==0 : print(self.currentVal) else: for connection in self.connections: print(self.identity," firing on ",connection.end.identity) connection.end.currentVal+=connection.modifier*self.currentVal self.currentVal = self.currentVal/2 def go(self): if(self.currentVal>self.threshold): self.fire() self.currentVal-=.05 if(self.currentVal<0): self.currentVal=0 class Synapse: start = Neuron() end = Neuron() modifier = .75 numLayers = 5 nodesPerLayer = 3 layers = [] first = Neuron() firstlayer = [first] layers.append(firstlayer) for i in range(1, numLayers-1, 1): hiddenLayer = [] for j in range(0, nodesPerLayer, 1): new = Neuron() new.identity = i*nodesPerLayer+j hiddenLayer.append(new) print(layers[len(layers)-1]) for k in layers[i - 1]: k.addSynapse(new) layers.append(hiddenLayer) finaLayer = [] final = Neuron() final.identity=numLayers*nodesPerLayer+1 finaLayer.append(final) for k in layers[layers.__len__()-1]: k.addSynapse(final) layers.append(finaLayer) all = [] for i in range(0,layers.__len__(),1): for j in layers[i]: all.append(j) for i in range(0,10,1): first.currentVal+= input("Thing:") for n in all: n.go() n.displayVal()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_vcmp_guest short_description: Manages vCMP guests on a BIG-IP description: - Manages vCMP guests on a BIG-IP. This functionality only exists on actual hardware and must be enabled by provisioning C(vcmp) with the C(bigip_provision) module. version_added: 2.5 options: name: description: - The name of the vCMP guest to manage. required: True vlans: description: - VLANs that the guest uses to communicate with other guests, the host, and with the external network. The available VLANs in the list are those that are currently configured on the vCMP host. - The order of these VLANs is not important; in fact, it's ignored. This module will order the VLANs for you automatically. Therefore, if you deliberately re-order them in subsequent tasks, you will find that this module will B(not) register a change. initial_image: description: - Specifies the base software release ISO image file for installing the TMOS hypervisor instance and any licensed BIG-IP modules onto the guest's virtual disk. When creating a new guest, this parameter is required. mgmt_network: description: - Specifies the method by which the management address is used in the vCMP guest. - When C(bridged), specifies that the guest can communicate with the vCMP host's management network. - When C(isolated), specifies that the guest is isolated from the vCMP host's management network. In this case, the only way that a guest can communicate with the vCMP host is through the console port or through a self IP address on the guest that allows traffic through port 22. - When C(host only), prevents the guest from installing images and hotfixes other than those provided by the hypervisor. - If the guest setting is C(isolated) or C(host only), the C(mgmt_address) does not apply. - Concerning mode changing, changing C(bridged) to C(isolated) causes the vCMP host to remove all of the guest's management interfaces from its bridged management network. This immediately disconnects the guest's VMs from the physical management network. Changing C(isolated) to C(bridged) causes the vCMP host to dynamically add the guest's management interfaces to the bridged management network. This immediately connects all of the guest's VMs to the physical management network. Changing this property while the guest is in the C(configured) or C(provisioned) state has no immediate effect. choices: - bridged - isolated - host only delete_virtual_disk: description: - When C(state) is C(absent), will additionally delete the virtual disk associated with the vCMP guest. By default, this value is C(no). type: bool default: no mgmt_address: description: - Specifies the IP address, and subnet or subnet mask that you use to access the guest when you want to manage a module running within the guest. This parameter is required if the C(mgmt_network) parameter is C(bridged). - When creating a new guest, if you do not specify a network or network mask, a default of C(/24) (C(255.255.255.0)) will be assumed. mgmt_route: description: - Specifies the gateway address for the C(mgmt_address). - If this value is not specified when creating a new guest, it is set to C(none). - The value C(none) can be used during an update to remove this value. state: description: - The state of the vCMP guest on the system. Each state implies the actions of all states before it. - When C(configured), guarantees that the vCMP guest exists with the provided attributes. Additionally, ensures that the vCMP guest is turned off. - When C(disabled), behaves the same as C(configured) the name of this state is just a convenience for the user that is more understandable. - When C(provisioned), will ensure that the guest is created and installed. This state will not start the guest; use C(deployed) for that. This state is one step beyond C(present) as C(present) will not install the guest; only setup the configuration for it to be installed. - When C(present), ensures the guest is properly provisioned and starts the guest so that it is in a running state. - When C(absent), removes the vCMP from the system. default: "present" choices: - configured - disabled - provisioned - present - absent cores_per_slot: description: - Specifies the number of cores that the system allocates to the guest. - Each core represents a portion of CPU and memory. Therefore, the amount of memory allocated per core is directly tied to the amount of CPU. This amount of memory varies per hardware platform type. - The number you can specify depends on the type of hardware you have. - In the event of a reboot, the system persists the guest to the same slot on which it ran prior to the reboot. partition: description: - Device partition to manage resources on. default: Common number_of_slots: description: - Specifies the number of slots for the system to use for creating the guest. - This value dictates how many cores a guest is allocated from each slot that it is assigned to. - Possible values are dependent on the type of blades being used in this cluster. - The default value depends on the type of blades being used in this cluster. version_added: 2.7 min_number_of_slots: description: - Specifies the minimum number of slots that the guest must be assigned to in order to deploy. - This field dictates the number of slots that the guest must be assigned to. - If at the end of any allocation attempt the guest is not assigned to at least this many slots, the attempt fails and the change that initiated it is reverted. - A guest's C(min_number_of_slots) value cannot be greater than its C(number_of_slots). version_added: 2.7 allowed_slots: description: - Contains those slots that the guest is allowed to be assigned to. - When the host determines which slots this guest should be assigned to, only slots in this list will be considered. - This is a good way to force guests to be assigned only to particular slots, or, by configuring disjoint C(allowed_slots) on two guests, that those guests are never assigned to the same slot. - By default this list includes every available slot in the cluster. This means, by default, the guest may be assigned to any slot. version_added: 2.7 notes: - This module can take a lot of time to deploy vCMP guests. This is an intrinsic limitation of the vCMP system because it is booting real VMs on the BIG-IP device. This boot time is very similar in length to the time it takes to boot VMs on any other virtualization platform; public or private. - When BIG-IP starts, the VMs are booted sequentially; not in parallel. This means that it is not unusual for a vCMP host with many guests to take a long time (60+ minutes) to reboot and bring all the guests online. The BIG-IP chassis will be available before all vCMP guests are online. extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Create a vCMP guest bigip_vcmp_guest: name: foo password: secret server: lb.mydomain.com state: present user: admin mgmt_network: bridge mgmt_address: 10.20.30.40/24 delegate_to: localhost - name: Create a vCMP guest with specific VLANs bigip_vcmp_guest: name: foo password: secret server: lb.mydomain.com state: present user: admin mgmt_network: bridge mgmt_address: 10.20.30.40/24 vlans: - vlan1 - vlan2 delegate_to: localhost - name: Remove vCMP guest and disk bigip_vcmp_guest: name: guest1 state: absent delete_virtual_disk: yes register: result ''' RETURN = r''' vlans: description: The VLANs assigned to the vCMP guest, in their full path format. returned: changed type: list sample: ['/Common/vlan1', '/Common/vlan2'] ''' import time from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback from collections import namedtuple try: from library.module_utils.network.f5.bigip import HAS_F5SDK from library.module_utils.network.f5.bigip import F5Client from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.ipaddress import is_valid_ip from library.module_utils.compat.ipaddress import ip_interface try: from library.module_utils.network.f5.common import iControlUnexpectedHTTPError from f5.utils.responses.handlers import Stats except ImportError: HAS_F5SDK = False except ImportError: from ansible.module_utils.network.f5.bigip import HAS_F5SDK from ansible.module_utils.network.f5.bigip import F5Client from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.ipaddress import is_valid_ip from ansible.module_utils.compat.ipaddress import ip_interface try: from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError from f5.utils.responses.handlers import Stats except ImportError: HAS_F5SDK = False class Parameters(AnsibleF5Parameters): api_map = { 'managementGw': 'mgmt_route', 'managementNetwork': 'mgmt_network', 'managementIp': 'mgmt_address', 'initialImage': 'initial_image', 'virtualDisk': 'virtual_disk', 'coresPerSlot': 'cores_per_slot', 'slots': 'number_of_slots', 'minSlots': 'min_number_of_slots', 'allowedSlots': 'allowed_slots', } api_attributes = [ 'vlans', 'managementNetwork', 'managementIp', 'initialImage', 'managementGw', 'state', 'coresPerSlot', 'slots', 'minSlots', 'allowedSlots', ] returnables = [ 'vlans', 'mgmt_network', 'mgmt_address', 'initial_image', 'mgmt_route', 'name', 'cores_per_slot', 'number_of_slots', 'min_number_of_slots', 'allowed_slots', ] updatables = [ 'vlans', 'mgmt_network', 'mgmt_address', 'initial_image', 'mgmt_route', 'state', 'cores_per_slot', 'number_of_slots', 'min_number_of_slots', 'allowed_slots', ] def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result @property def mgmt_route(self): if self._values['mgmt_route'] is None: return None elif self._values['mgmt_route'] == 'none': return 'none' if is_valid_ip(self._values['mgmt_route']): return self._values['mgmt_route'] else: raise F5ModuleError( "The specified 'mgmt_route' is not a valid IP address" ) @property def mgmt_address(self): if self._values['mgmt_address'] is None: return None try: addr = ip_interface(u'%s' % str(self._values['mgmt_address'])) return str(addr.with_prefixlen) except ValueError: raise F5ModuleError( "The specified 'mgmt_address' is not a valid IP address" ) @property def mgmt_tuple(self): result = None Destination = namedtuple('Destination', ['ip', 'subnet']) try: parts = self._values['mgmt_address'].split('/') if len(parts) == 2: result = Destination(ip=parts[0], subnet=parts[1]) elif len(parts) < 2: result = Destination(ip=parts[0], subnet=None) else: raise F5ModuleError( "The provided mgmt_address is malformed." ) except ValueError: result = Destination(ip=None, subnet=None) return result @property def state(self): if self._values['state'] == 'present': return 'deployed' elif self._values['state'] in ['configured', 'disabled']: return 'configured' return self._values['state'] @property def vlans(self): if self._values['vlans'] is None: return None result = [fq_name(self.partition, x) for x in self._values['vlans']] result.sort() return result @property def initial_image(self): if self._values['initial_image'] is None: return None if self.initial_image_exists(self._values['initial_image']): return self._values['initial_image'] raise F5ModuleError( "The specified 'initial_image' does not exist on the remote device" ) def initial_image_exists(self, image): collection = self.client.api.tm.sys.software.images.get_collection() for resource in collection: if resource.name.startswith(image): return True return False @property def allowed_slots(self): if self._values['allowed_slots'] is None: return None result = self._values['allowed_slots'] result.sort() return result class ApiParameters(Parameters): pass class ModuleParameters(Parameters): pass class Changes(Parameters): pass class UsableChanges(Parameters): pass class ReportableChanges(Parameters): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def mgmt_address(self): want = self.want.mgmt_tuple if want.subnet is None: raise F5ModuleError( "A subnet must be specified when changing the mgmt_address" ) if self.want.mgmt_address != self.have.mgmt_address: return self.want.mgmt_address @property def allowed_slots(self): if self.want.allowed_slots is None: return None if self.have.allowed_slots is None: return self.want.allowed_slots if set(self.want.allowed_slots) != set(self.have.allowed_slots): return self.want.allowed_slots class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.want = ModuleParameters(client=self.client, params=self.module.params) self.changes = Changes() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): # lgtm [py/similar-function] diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): changed = False result = dict() state = self.want.state try: if state in ['configured', 'provisioned', 'deployed']: changed = self.present() elif state == "absent": changed = self.absent() except iControlUnexpectedHTTPError as e: raise F5ModuleError(str(e)) changes = self.changes.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def exists(self): result = self.client.api.tm.vcmp.guests.guest.exists( name=self.want.name ) return result def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True if self.changes.cores_per_slot: if not self.is_configured(): self.configure() self.update_on_device() if self.want.state == 'provisioned': self.provision() elif self.want.state == 'deployed': self.deploy() elif self.want.state == 'configured': self.configure() return True def remove(self): if self.module.check_mode: return True if self.want.delete_virtual_disk: self.have = self.read_current_from_device() self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") if self.want.delete_virtual_disk: self.remove_virtual_disk() return True def create(self): self._set_changed_options() if self.module.check_mode: return True if self.want.mgmt_tuple.subnet is None: self.want.update(dict( mgmt_address='{0}/255.255.255.0'.format(self.want.mgmt_tuple.ip) )) self.create_on_device() if self.want.state == 'provisioned': self.provision() elif self.want.state == 'deployed': self.deploy() elif self.want.state == 'configured': self.configure() return True def create_on_device(self): params = self.want.api_params() self.client.api.tm.vcmp.guests.guest.create( name=self.want.name, **params ) def update_on_device(self): params = self.changes.api_params() resource = self.client.api.tm.vcmp.guests.guest.load( name=self.want.name ) resource.modify(**params) def absent(self): if self.exists(): return self.remove() return False def remove_from_device(self): resource = self.client.api.tm.vcmp.guests.guest.load( name=self.want.name ) if resource: resource.delete() def read_current_from_device(self): resource = self.client.api.tm.vcmp.guests.guest.load( name=self.want.name ) result = resource.attrs return ApiParameters(params=result) def remove_virtual_disk(self): if self.virtual_disk_exists(): return self.remove_virtual_disk_from_device() return False def virtual_disk_exists(self): """Checks if a virtual disk exists for a guest The virtual disk names can differ based on the device vCMP is installed on. For instance, on a shuttle-series device with no slots, you will see disks that resemble the following guest1.img On an 8-blade Viprion with slots though, you will see guest1.img/1 The "/1" in this case is the slot that it is a part of. This method looks for the virtual-disk without the trailing slot. Returns: bool: True on success. False otherwise. """ collection = self.client.api.tm.vcmp.virtual_disks.get_collection() for resource in collection: check = '{0}'.format(self.have.virtual_disk) if resource.name.startswith(check): return True return False def remove_virtual_disk_from_device(self): collection = self.client.api.tm.vcmp.virtual_disks.get_collection() for resource in collection: check = '{0}'.format(self.have.virtual_disk) if resource.name.startswith(check): resource.delete() return True return False def is_configured(self): """Checks to see if guest is disabled A disabled guest is fully disabled once their Stats go offline. Until that point they are still in the process of disabling. :return: """ try: res = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name) Stats(res.stats.load()) return False except iControlUnexpectedHTTPError as ex: if 'Object not found - ' in str(ex): return True raise def is_provisioned(self): try: res = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name) stats = Stats(res.stats.load()) if stats.stat['requestedState']['description'] == 'provisioned': if stats.stat['vmStatus']['description'] == 'stopped': return True except iControlUnexpectedHTTPError: pass return False def is_deployed(self): try: res = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name) stats = Stats(res.stats.load()) if stats.stat['requestedState']['description'] == 'deployed': if stats.stat['vmStatus']['description'] == 'running': return True except iControlUnexpectedHTTPError: pass return False def configure(self): if self.is_configured(): return False self.configure_on_device() self.wait_for_configured() return True def configure_on_device(self): resource = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name) resource.modify(state='configured') def wait_for_configured(self): nops = 0 while nops < 3: if self.is_configured(): nops += 1 time.sleep(1) def provision(self): if self.is_provisioned(): return False self.provision_on_device() self.wait_for_provisioned() def provision_on_device(self): resource = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name) resource.modify(state='provisioned') def wait_for_provisioned(self): nops = 0 while nops < 3: if self.is_provisioned(): nops += 1 time.sleep(1) def deploy(self): if self.is_deployed(): return False self.deploy_on_device() self.wait_for_deployed() def deploy_on_device(self): resource = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name) resource.modify(state='deployed') def wait_for_deployed(self): nops = 0 while nops < 3: if self.is_deployed(): nops += 1 time.sleep(1) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), vlans=dict(type='list'), mgmt_network=dict(choices=['bridged', 'isolated', 'host only']), mgmt_address=dict(), mgmt_route=dict(), initial_image=dict(), state=dict( default='present', choices=['configured', 'disabled', 'provisioned', 'absent', 'present'] ), delete_virtual_disk=dict( type='bool', default='no' ), cores_per_slot=dict(type='int'), number_of_slots=dict(type='int'), min_number_of_slots=dict(type='int'), allowed_slots=dict(type='list'), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.required_if = [ ['mgmt_network', 'bridged', ['mgmt_address']] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode ) if not HAS_F5SDK: module.fail_json(msg="The python f5-sdk module is required") try: client = F5Client(**module.params) mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) module.exit_json(**results) except F5ModuleError as ex: cleanup_tokens(client) module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.internals; import org.apache.kafka.common.TopicPartition; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; /** * This class is a useful building block for doing fetch requests where topic partitions have to be rotated via * round-robin to ensure fairness and some level of determinism given the existence of a limit on the fetch response * size. Because the serialization of fetch requests is more efficient if all partitions for the same topic are grouped * together, we do such grouping in the method `set`. * * As partitions are moved to the end, the same topic may be repeated more than once. In the optimal case, a single * topic would "wrap around" and appear twice. However, as partitions are fetched in different orders and partition * leadership changes, we will deviate from the optimal. If this turns out to be an issue in practice, we can improve * it by tracking the partitions per node or calling `set` every so often. * * Note that this class is not thread-safe with the exception of {@link #size()} which returns the number of * partitions currently tracked. */ public class PartitionStates<S> { private final LinkedHashMap<TopicPartition, S> map = new LinkedHashMap<>(); private final Set<TopicPartition> partitionSetView = Collections.unmodifiableSet(map.keySet()); /* the number of partitions that are currently assigned available in a thread safe manner */ private volatile int size = 0; public PartitionStates() {} public void moveToEnd(TopicPartition topicPartition) { S state = map.remove(topicPartition); if (state != null) map.put(topicPartition, state); } public void updateAndMoveToEnd(TopicPartition topicPartition, S state) { map.remove(topicPartition); map.put(topicPartition, state); updateSize(); } public void update(TopicPartition topicPartition, S state) { map.put(topicPartition, state); updateSize(); } public void remove(TopicPartition topicPartition) { map.remove(topicPartition); updateSize(); } /** * Returns an unmodifiable view of the partitions in random order. * changes to this PartitionStates instance will be reflected in this view. */ public Set<TopicPartition> partitionSet() { return partitionSetView; } public void clear() { map.clear(); updateSize(); } public boolean contains(TopicPartition topicPartition) { return map.containsKey(topicPartition); } public Iterator<S> stateIterator() { return map.values().iterator(); } public void forEach(BiConsumer<TopicPartition, S> biConsumer) { map.forEach(biConsumer); } public Map<TopicPartition, S> partitionStateMap() { return Collections.unmodifiableMap(map); } /** * Returns the partition state values in order. */ public List<S> partitionStateValues() { return new ArrayList<>(map.values()); } public S stateValue(TopicPartition topicPartition) { return map.get(topicPartition); } /** * Get the number of partitions that are currently being tracked. This is thread-safe. */ public int size() { return size; } /** * Update the builder to have the received map as its state (i.e. the previous state is cleared). The builder will * "batch by topic", so if we have a, b and c, each with two partitions, we may end up with something like the * following (the order of topics and partitions within topics is dependent on the iteration order of the received * map): a0, a1, b1, b0, c0, c1. */ public void set(Map<TopicPartition, S> partitionToState) { map.clear(); update(partitionToState); updateSize(); } private void updateSize() { size = map.size(); } private void update(Map<TopicPartition, S> partitionToState) { LinkedHashMap<String, List<TopicPartition>> topicToPartitions = new LinkedHashMap<>(); for (TopicPartition tp : partitionToState.keySet()) { List<TopicPartition> partitions = topicToPartitions.computeIfAbsent(tp.topic(), k -> new ArrayList<>()); partitions.add(tp); } for (Map.Entry<String, List<TopicPartition>> entry : topicToPartitions.entrySet()) { for (TopicPartition tp : entry.getValue()) { S state = partitionToState.get(tp); map.put(tp, state); } } } }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/common/internals/PartitionStates.java
""" Provides a function for importing a git repository into the lms instance when using a mongo modulestore """ import os import re import StringIO import subprocess import logging from django.conf import settings from django.core import management from django.core.management.base import CommandError from django.utils import timezone from django.utils.translation import ugettext as _ import mongoengine from dashboard.models import CourseImportLog from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey log = logging.getLogger(__name__) GIT_REPO_DIR = getattr(settings, 'GIT_REPO_DIR', '/edx/var/app/edxapp/course_repos') GIT_IMPORT_STATIC = getattr(settings, 'GIT_IMPORT_STATIC', True) class GitImportError(Exception): """ Exception class for handling the typical errors in a git import. """ NO_DIR = _("Path {0} doesn't exist, please create it, " "or configure a different path with " "GIT_REPO_DIR").format(GIT_REPO_DIR) URL_BAD = _('Non usable git url provided. Expecting something like:' ' git@github.com:mitocw/edx4edx_lite.git') BAD_REPO = _('Unable to get git log') CANNOT_PULL = _('git clone or pull failed!') XML_IMPORT_FAILED = _('Unable to run import command.') UNSUPPORTED_STORE = _('The underlying module store does not support import.') # Translators: This is an error message when they ask for a # particular version of a git repository and that version isn't # available from the remote source they specified REMOTE_BRANCH_MISSING = _('The specified remote branch is not available.') # Translators: Error message shown when they have asked for a git # repository branch, a specific version within a repository, that # doesn't exist, or there is a problem changing to it. CANNOT_BRANCH = _('Unable to switch to specified branch. Please check ' 'your branch name.') def cmd_log(cmd, cwd): """ Helper function to redirect stderr to stdout and log the command used along with the output. Will raise subprocess.CalledProcessError if command doesn't return 0, and returns the command's output. """ output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT) log.debug(u'Command was: %r. Working directory was: %r', ' '.join(cmd), cwd) log.debug(u'Command output was: %r', output) return output def switch_branch(branch, rdir): """ This will determine how to change the branch of the repo, and then use the appropriate git commands to do so. Raises an appropriate GitImportError exception if there is any issues with changing branches. """ # Get the latest remote try: cmd_log(['git', 'fetch', ], rdir) except subprocess.CalledProcessError as ex: log.exception('Unable to fetch remote: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) # Check if the branch is available from the remote. cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ] try: output = cmd_log(cmd, rdir) except subprocess.CalledProcessError as ex: log.exception('Getting a list of remote branches failed: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) if branch not in output: raise GitImportError(GitImportError.REMOTE_BRANCH_MISSING) # Check it the remote branch has already been made locally cmd = ['git', 'branch', '-a', ] try: output = cmd_log(cmd, rdir) except subprocess.CalledProcessError as ex: log.exception('Getting a list of local branches failed: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) branches = [] for line in output.split('\n'): branches.append(line.replace('*', '').strip()) if branch not in branches: # Checkout with -b since it is remote only cmd = ['git', 'checkout', '--force', '--track', '-b', branch, 'origin/{0}'.format(branch), ] try: cmd_log(cmd, rdir) except subprocess.CalledProcessError as ex: log.exception('Unable to checkout remote branch: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) # Go ahead and reset hard to the newest version of the branch now that we know # it is local. try: cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir) except subprocess.CalledProcessError as ex: log.exception('Unable to reset to branch: %r', ex.output) raise GitImportError(GitImportError.CANNOT_BRANCH) def add_repo(repo, rdir_in, branch=None): """ This will add a git repo into the mongo modulestore. If branch is left as None, it will fetch the most recent version of the current branch. """ # pylint: disable=too-many-statements # Set defaults even if it isn't defined in settings mongo_db = { 'host': 'localhost', 'port': 27017, 'user': '', 'password': '', 'db': 'xlog', } # Allow overrides if hasattr(settings, 'MONGODB_LOG'): for config_item in ['host', 'user', 'password', 'db', 'port']: mongo_db[config_item] = settings.MONGODB_LOG.get( config_item, mongo_db[config_item]) if not os.path.isdir(GIT_REPO_DIR): raise GitImportError(GitImportError.NO_DIR) # pull from git if not (repo.endswith('.git') or repo.startswith(('http:', 'https:', 'git:', 'file:'))): raise GitImportError(GitImportError.URL_BAD) if rdir_in: rdir = os.path.basename(rdir_in) else: rdir = repo.rsplit('/', 1)[-1].rsplit('.git', 1)[0] log.debug('rdir = %s', rdir) rdirp = '{0}/{1}'.format(GIT_REPO_DIR, rdir) if os.path.exists(rdirp): log.info('directory already exists, doing a git pull instead ' 'of git clone') cmd = ['git', 'pull', ] cwd = rdirp else: cmd = ['git', 'clone', repo, ] cwd = GIT_REPO_DIR cwd = os.path.abspath(cwd) try: ret_git = cmd_log(cmd, cwd=cwd) except subprocess.CalledProcessError as ex: log.exception('Error running git pull: %r', ex.output) raise GitImportError(GitImportError.CANNOT_PULL) if branch: switch_branch(branch, rdirp) # get commit id cmd = ['git', 'log', '-1', '--format=%H', ] try: commit_id = cmd_log(cmd, cwd=rdirp) except subprocess.CalledProcessError as ex: log.exception('Unable to get git log: %r', ex.output) raise GitImportError(GitImportError.BAD_REPO) ret_git += '\nCommit ID: {0}'.format(commit_id) # get branch cmd = ['git', 'symbolic-ref', '--short', 'HEAD', ] try: branch = cmd_log(cmd, cwd=rdirp) except subprocess.CalledProcessError as ex: # I can't discover a way to excercise this, but git is complex # so still logging and raising here in case. log.exception('Unable to determine branch: %r', ex.output) raise GitImportError(GitImportError.BAD_REPO) ret_git += '{0}Branch: {1}'.format(' \n', branch) # Get XML logging logger and capture debug to parse results output = StringIO.StringIO() import_log_handler = logging.StreamHandler(output) import_log_handler.setLevel(logging.DEBUG) logger_names = ['xmodule.modulestore.xml_importer', 'git_add_course', 'xmodule.modulestore.xml', 'xmodule.seq_module', ] loggers = [] for logger_name in logger_names: logger = logging.getLogger(logger_name) logger.setLevel(logging.DEBUG) logger.addHandler(import_log_handler) loggers.append(logger) try: management.call_command('import', GIT_REPO_DIR, rdir, nostatic=not GIT_IMPORT_STATIC) except CommandError: raise GitImportError(GitImportError.XML_IMPORT_FAILED) except NotImplementedError: raise GitImportError(GitImportError.UNSUPPORTED_STORE) ret_import = output.getvalue() # Remove handler hijacks for logger in loggers: logger.setLevel(logging.NOTSET) logger.removeHandler(import_log_handler) course_key = None location = 'unknown' # extract course ID from output of import-command-run and make symlink # this is needed in order for custom course scripts to work match = re.search(r'(?ms)===> IMPORTING courselike (\S+)', ret_import) if match: course_id = match.group(1) try: course_key = CourseKey.from_string(course_id) except InvalidKeyError: course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) cdir = '{0}/{1}'.format(GIT_REPO_DIR, course_key.course) log.debug('Studio course dir = %s', cdir) if os.path.exists(cdir) and not os.path.islink(cdir): log.debug(' -> exists, but is not symlink') log.debug(subprocess.check_output(['ls', '-l', ], cwd=os.path.abspath(cdir))) try: os.rmdir(os.path.abspath(cdir)) except OSError: log.exception('Failed to remove course directory') if not os.path.exists(cdir): log.debug(' -> creating symlink between %s and %s', rdirp, cdir) try: os.symlink(os.path.abspath(rdirp), os.path.abspath(cdir)) except OSError: log.exception('Unable to create course symlink') log.debug(subprocess.check_output(['ls', '-l', ], cwd=os.path.abspath(cdir))) # store import-command-run output in mongo mongouri = 'mongodb://{user}:{password}@{host}:{port}/{db}'.format(**mongo_db) try: if mongo_db['user'] and mongo_db['password']: mdb = mongoengine.connect(mongo_db['db'], host=mongouri) else: mdb = mongoengine.connect(mongo_db['db'], host=mongo_db['host'], port=mongo_db['port']) except mongoengine.connection.ConnectionError: log.exception('Unable to connect to mongodb to save log, please ' 'check MONGODB_LOG settings') cil = CourseImportLog( course_id=course_key, location=location, repo_dir=rdir, created=timezone.now(), import_log=ret_import, git_log=ret_git, ) cil.save() log.debug('saved CourseImportLog for %s', cil.course_id) mdb.disconnect()
unknown
codeparrot/codeparrot-clean
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS rdatasets (an rdataset is a set of rdatas of a given type and class)""" import random from io import StringIO import struct import dns.exception import dns.rdatatype import dns.rdataclass import dns.rdata import dns.set from ._compat import string_types # define SimpleSet here for backwards compatibility SimpleSet = dns.set.Set class DifferingCovers(dns.exception.DNSException): """An attempt was made to add a DNS SIG/RRSIG whose covered type is not the same as that of the other rdatas in the rdataset.""" class IncompatibleTypes(dns.exception.DNSException): """An attempt was made to add DNS RR data of an incompatible type.""" class Rdataset(dns.set.Set): """A DNS rdataset. @ivar rdclass: The class of the rdataset @type rdclass: int @ivar rdtype: The type of the rdataset @type rdtype: int @ivar covers: The covered type. Usually this value is dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or dns.rdatatype.RRSIG, then the covers value will be the rdata type the SIG/RRSIG covers. The library treats the SIG and RRSIG types as if they were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much easier to work with than if RRSIGs covering different rdata types were aggregated into a single RRSIG rdataset. @type covers: int @ivar ttl: The DNS TTL (Time To Live) value @type ttl: int """ __slots__ = ['rdclass', 'rdtype', 'covers', 'ttl'] def __init__(self, rdclass, rdtype, covers=dns.rdatatype.NONE): """Create a new rdataset of the specified class and type. @see: the description of the class instance variables for the meaning of I{rdclass} and I{rdtype}""" super(Rdataset, self).__init__() self.rdclass = rdclass self.rdtype = rdtype self.covers = covers self.ttl = 0 def _clone(self): obj = super(Rdataset, self)._clone() obj.rdclass = self.rdclass obj.rdtype = self.rdtype obj.covers = self.covers obj.ttl = self.ttl return obj def update_ttl(self, ttl): """Set the TTL of the rdataset to be the lesser of the set's current TTL or the specified TTL. If the set contains no rdatas, set the TTL to the specified TTL. @param ttl: The TTL @type ttl: int""" if len(self) == 0: self.ttl = ttl elif ttl < self.ttl: self.ttl = ttl def add(self, rd, ttl=None): """Add the specified rdata to the rdataset. If the optional I{ttl} parameter is supplied, then self.update_ttl(ttl) will be called prior to adding the rdata. @param rd: The rdata @type rd: dns.rdata.Rdata object @param ttl: The TTL @type ttl: int""" # # If we're adding a signature, do some special handling to # check that the signature covers the same type as the # other rdatas in this rdataset. If this is the first rdata # in the set, initialize the covers field. # if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype: raise IncompatibleTypes if ttl is not None: self.update_ttl(ttl) if self.rdtype == dns.rdatatype.RRSIG or \ self.rdtype == dns.rdatatype.SIG: covers = rd.covers() if len(self) == 0 and self.covers == dns.rdatatype.NONE: self.covers = covers elif self.covers != covers: raise DifferingCovers if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0: self.clear() super(Rdataset, self).add(rd) def union_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).union_update(other) def intersection_update(self, other): self.update_ttl(other.ttl) super(Rdataset, self).intersection_update(other) def update(self, other): """Add all rdatas in other to self. @param other: The rdataset from which to update @type other: dns.rdataset.Rdataset object""" self.update_ttl(other.ttl) super(Rdataset, self).update(other) def __repr__(self): if self.covers == 0: ctext = '' else: ctext = '(' + dns.rdatatype.to_text(self.covers) + ')' return '<DNS ' + dns.rdataclass.to_text(self.rdclass) + ' ' + \ dns.rdatatype.to_text(self.rdtype) + ctext + ' rdataset>' def __str__(self): return self.to_text() def __eq__(self, other): """Two rdatasets are equal if they have the same class, type, and covers, and contain the same rdata. @rtype: bool""" if not isinstance(other, Rdataset): return False if self.rdclass != other.rdclass or \ self.rdtype != other.rdtype or \ self.covers != other.covers: return False return super(Rdataset, self).__eq__(other) def __ne__(self, other): return not self.__eq__(other) def to_text(self, name=None, origin=None, relativize=True, override_rdclass=None, **kw): """Convert the rdataset into DNS master file format. @see: L{dns.name.Name.choose_relativity} for more information on how I{origin} and I{relativize} determine the way names are emitted. Any additional keyword arguments are passed on to the rdata to_text() method. @param name: If name is not None, emit a RRs with I{name} as the owner name. @type name: dns.name.Name object @param origin: The origin for relative names, or None. @type origin: dns.name.Name object @param relativize: True if names should names be relativized @type relativize: bool""" if name is not None: name = name.choose_relativity(origin, relativize) ntext = str(name) pad = ' ' else: ntext = '' pad = '' s = StringIO() if override_rdclass is not None: rdclass = override_rdclass else: rdclass = self.rdclass if len(self) == 0: # # Empty rdatasets are used for the question section, and in # some dynamic updates, so we don't need to print out the TTL # (which is meaningless anyway). # s.write(u'%s%s%s %s\n' % (ntext, pad, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype))) else: for rd in self: s.write(u'%s%s%d %s %s %s\n' % (ntext, pad, self.ttl, dns.rdataclass.to_text(rdclass), dns.rdatatype.to_text(self.rdtype), rd.to_text(origin=origin, relativize=relativize, **kw))) # # We strip off the final \n for the caller's convenience in printing # return s.getvalue()[:-1] def to_wire(self, name, file, compress=None, origin=None, override_rdclass=None, want_shuffle=True): """Convert the rdataset to wire format. @param name: The owner name of the RRset that will be emitted @type name: dns.name.Name object @param file: The file to which the wire format data will be appended @type file: file @param compress: The compression table to use; the default is None. @type compress: dict @param origin: The origin to be appended to any relative names when they are emitted. The default is None. @returns: the number of records emitted @rtype: int """ if override_rdclass is not None: rdclass = override_rdclass want_shuffle = False else: rdclass = self.rdclass file.seek(0, 2) if len(self) == 0: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, 0, 0) file.write(stuff) return 1 else: if want_shuffle: l = list(self) random.shuffle(l) else: l = self for rd in l: name.to_wire(file, compress, origin) stuff = struct.pack("!HHIH", self.rdtype, rdclass, self.ttl, 0) file.write(stuff) start = file.tell() rd.to_wire(file, compress, origin) end = file.tell() assert end - start < 65536 file.seek(start - 2) stuff = struct.pack("!H", end - start) file.write(stuff) file.seek(0, 2) return len(self) def match(self, rdclass, rdtype, covers): """Returns True if this rdataset matches the specified class, type, and covers""" if self.rdclass == rdclass and \ self.rdtype == rdtype and \ self.covers == covers: return True return False def from_text_list(rdclass, rdtype, ttl, text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified list of rdatas in text format. @rtype: dns.rdataset.Rdataset object """ if isinstance(rdclass, string_types): rdclass = dns.rdataclass.from_text(rdclass) if isinstance(rdtype, string_types): rdtype = dns.rdatatype.from_text(rdtype) r = Rdataset(rdclass, rdtype) r.update_ttl(ttl) for t in text_rdatas: rd = dns.rdata.from_text(r.rdclass, r.rdtype, t) r.add(rd) return r def from_text(rdclass, rdtype, ttl, *text_rdatas): """Create an rdataset with the specified class, type, and TTL, and with the specified rdatas in text format. @rtype: dns.rdataset.Rdataset object """ return from_text_list(rdclass, rdtype, ttl, text_rdatas) def from_rdata_list(ttl, rdatas): """Create an rdataset with the specified TTL, and with the specified list of rdata objects. @rtype: dns.rdataset.Rdataset object """ if len(rdatas) == 0: raise ValueError("rdata list must not be empty") r = None for rd in rdatas: if r is None: r = Rdataset(rd.rdclass, rd.rdtype) r.update_ttl(ttl) r.add(rd) return r def from_rdata(ttl, *rdatas): """Create an rdataset with the specified TTL, and with the specified rdata objects. @rtype: dns.rdataset.Rdataset object """ return from_rdata_list(ttl, rdatas)
unknown
codeparrot/codeparrot-clean
use crate::config::native_libs::{NativeLibParts, split_native_lib_value}; #[test] fn split() { // This is a unit test for some implementation details, so consider deleting // it if it gets in the way. use NativeLibParts as P; let examples = &[ ("", P { kind: None, modifiers: None, name: "", new_name: None }), ("foo", P { kind: None, modifiers: None, name: "foo", new_name: None }), ("foo:", P { kind: None, modifiers: None, name: "foo", new_name: Some("") }), ("foo:bar", P { kind: None, modifiers: None, name: "foo", new_name: Some("bar") }), (":bar", P { kind: None, modifiers: None, name: "", new_name: Some("bar") }), ("kind=foo", P { kind: Some("kind"), modifiers: None, name: "foo", new_name: None }), (":mods=foo", P { kind: Some(""), modifiers: Some("mods"), name: "foo", new_name: None }), ( ":mods=:bar", P { kind: Some(""), modifiers: Some("mods"), name: "", new_name: Some("bar") }, ), ( "kind=foo:bar", P { kind: Some("kind"), modifiers: None, name: "foo", new_name: Some("bar") }, ), ( "kind:mods=foo", P { kind: Some("kind"), modifiers: Some("mods"), name: "foo", new_name: None }, ), ( "kind:mods=foo:bar", P { kind: Some("kind"), modifiers: Some("mods"), name: "foo", new_name: Some("bar") }, ), ("::==::", P { kind: Some(""), modifiers: Some(":"), name: "=", new_name: Some(":") }), ("==::==", P { kind: Some(""), modifiers: None, name: "=", new_name: Some(":==") }), ]; for &(value, ref expected) in examples { println!("{value:?}"); let actual = split_native_lib_value(value); assert_eq!(&actual, expected); } }
rust
github
https://github.com/rust-lang/rust
compiler/rustc_session/src/config/native_libs/tests.rs
// // Helper library for querying WMI using its COM-based query API. // // Copyright (c) Microsoft Corporation // Licensed to PSF under a contributor agreement // // Version history // 2022-08: Initial contribution (Steve Dower) // clinic/_wmimodule.cpp.h uses internal pycore_modsupport.h API #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #define _WIN32_DCOM #include <Windows.h> #include <comdef.h> #include <Wbemidl.h> #include <propvarutil.h> #include <Python.h> #if _MSVC_LANG >= 202002L // We can use clinic directly when the C++ compiler supports C++20 #include "clinic/_wmimodule.cpp.h" #else // Cannot use clinic because of missing C++20 support, so create a simpler // API instead. This won't impact releases, so fine to omit the docstring. static PyObject *_wmi_exec_query_impl(PyObject *module, PyObject *query); #define _WMI_EXEC_QUERY_METHODDEF {"exec_query", _wmi_exec_query_impl, METH_O, NULL}, #endif /*[clinic input] module _wmi [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=7ca95dad1453d10d]*/ struct _query_data { LPCWSTR query; HANDLE writePipe; HANDLE readPipe; HANDLE initEvent; HANDLE connectEvent; }; static DWORD WINAPI _query_thread(LPVOID param) { IWbemLocator *locator = NULL; IWbemServices *services = NULL; IEnumWbemClassObject* enumerator = NULL; HRESULT hr = S_OK; BSTR bstrQuery = NULL; _query_data data = *(struct _query_data*)param; // gh-125315: Copy the query string first, so that if the main thread gives // up on waiting we aren't left with a dangling pointer (and a likely crash) bstrQuery = SysAllocString(data.query); if (!bstrQuery) { hr = HRESULT_FROM_WIN32(ERROR_NOT_ENOUGH_MEMORY); } if (SUCCEEDED(hr)) { hr = CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED); } if (FAILED(hr)) { CloseHandle(data.writePipe); if (bstrQuery) { SysFreeString(bstrQuery); } return (DWORD)hr; } hr = CoInitializeSecurity( NULL, -1, NULL, NULL, RPC_C_AUTHN_LEVEL_DEFAULT, RPC_C_IMP_LEVEL_IMPERSONATE, NULL, EOAC_NONE, NULL ); // gh-96684: CoInitializeSecurity will fail if another part of the app has // already called it. Hopefully they passed lenient enough settings that we // can complete the WMI query, so keep going. if (hr == RPC_E_TOO_LATE) { hr = 0; } if (SUCCEEDED(hr)) { hr = CoCreateInstance( CLSID_WbemLocator, 0, CLSCTX_INPROC_SERVER, IID_IWbemLocator, (LPVOID *)&locator ); } if (SUCCEEDED(hr) && !SetEvent(data.initEvent)) { hr = HRESULT_FROM_WIN32(GetLastError()); } if (SUCCEEDED(hr)) { hr = locator->ConnectServer( bstr_t(L"ROOT\\CIMV2"), NULL, NULL, 0, NULL, 0, 0, &services ); } if (SUCCEEDED(hr) && !SetEvent(data.connectEvent)) { hr = HRESULT_FROM_WIN32(GetLastError()); } if (SUCCEEDED(hr)) { hr = CoSetProxyBlanket( services, RPC_C_AUTHN_WINNT, RPC_C_AUTHZ_NONE, NULL, RPC_C_AUTHN_LEVEL_CALL, RPC_C_IMP_LEVEL_IMPERSONATE, NULL, EOAC_NONE ); } if (SUCCEEDED(hr)) { hr = services->ExecQuery( bstr_t("WQL"), bstrQuery, WBEM_FLAG_FORWARD_ONLY | WBEM_FLAG_RETURN_IMMEDIATELY, NULL, &enumerator ); } // Okay, after all that, at this stage we should have an enumerator // to the query results and can start writing them to the pipe! IWbemClassObject *value = NULL; int startOfEnum = TRUE; int endOfEnum = FALSE; while (SUCCEEDED(hr) && !endOfEnum) { ULONG got = 0; DWORD written; hr = enumerator->Next(WBEM_INFINITE, 1, &value, &got); if (hr == WBEM_S_FALSE) { // Could be at the end, but still got a result this time endOfEnum = TRUE; hr = 0; break; } if (FAILED(hr) || got != 1 || !value) { continue; } if (!startOfEnum && !WriteFile(data.writePipe, (LPVOID)L"\0", 2, &written, NULL)) { hr = HRESULT_FROM_WIN32(GetLastError()); break; } startOfEnum = FALSE; // Okay, now we have each resulting object it's time to // enumerate its members hr = value->BeginEnumeration(0); if (FAILED(hr)) { value->Release(); break; } while (SUCCEEDED(hr)) { BSTR propName; VARIANT propValue; long flavor; hr = value->Next(0, &propName, &propValue, NULL, &flavor); if (hr == WBEM_S_NO_MORE_DATA) { hr = 0; break; } if (SUCCEEDED(hr) && (flavor & WBEM_FLAVOR_MASK_ORIGIN) != WBEM_FLAVOR_ORIGIN_SYSTEM) { WCHAR propStr[8192]; hr = VariantToString(propValue, propStr, sizeof(propStr) / sizeof(propStr[0])); if (SUCCEEDED(hr)) { DWORD cbStr1, cbStr2; cbStr1 = (DWORD)(wcslen(propName) * sizeof(propName[0])); cbStr2 = (DWORD)(wcslen(propStr) * sizeof(propStr[0])); if (!WriteFile(data.writePipe, propName, cbStr1, &written, NULL) || !WriteFile(data.writePipe, (LPVOID)L"=", 2, &written, NULL) || !WriteFile(data.writePipe, propStr, cbStr2, &written, NULL) || !WriteFile(data.writePipe, (LPVOID)L"\0", 2, &written, NULL) ) { hr = HRESULT_FROM_WIN32(GetLastError()); } } VariantClear(&propValue); SysFreeString(propName); } } value->EndEnumeration(); value->Release(); } if (bstrQuery) { SysFreeString(bstrQuery); } if (enumerator) { enumerator->Release(); } if (services) { services->Release(); } if (locator) { locator->Release(); } CoUninitialize(); CloseHandle(data.writePipe); return (DWORD)hr; } static DWORD wait_event(HANDLE event, DWORD timeout) { DWORD err = 0; switch (WaitForSingleObject(event, timeout)) { case WAIT_OBJECT_0: break; case WAIT_TIMEOUT: err = WAIT_TIMEOUT; break; default: err = GetLastError(); break; } return err; } /*[clinic input] @permit_long_docstring_body _wmi.exec_query query: unicode Runs a WMI query against the local machine. This returns a single string with 'name=value' pairs in a flat array separated by null characters. [clinic start generated code]*/ static PyObject * _wmi_exec_query_impl(PyObject *module, PyObject *query) /*[clinic end generated code: output=a62303d5bb5e003f input=621f5c50c56d06d0]*/ /*[clinic end generated code]*/ { HANDLE hThread = NULL; int err = 0; WCHAR buffer[8192]; DWORD offset = 0; DWORD bytesRead; struct _query_data data = {0}; if (PySys_Audit("_wmi.exec_query", "O", query) < 0) { return NULL; } data.query = PyUnicode_AsWideCharString(query, NULL); if (!data.query) { return NULL; } if (0 != _wcsnicmp(data.query, L"select ", 7)) { PyMem_Free((void *)data.query); PyErr_SetString(PyExc_ValueError, "only SELECT queries are supported"); return NULL; } Py_BEGIN_ALLOW_THREADS data.initEvent = CreateEvent(NULL, TRUE, FALSE, NULL); data.connectEvent = CreateEvent(NULL, TRUE, FALSE, NULL); if (!data.initEvent || !data.connectEvent || !CreatePipe(&data.readPipe, &data.writePipe, NULL, 0)) { err = GetLastError(); } else { hThread = CreateThread(NULL, 0, _query_thread, (LPVOID*)&data, 0, NULL); if (!hThread) { err = GetLastError(); // Normally the thread proc closes this handle, but since we never started // we need to close it here. CloseHandle(data.writePipe); } } // gh-112278: If current user doesn't have permission to query the WMI, the // function IWbemLocator::ConnectServer will hang for 5 seconds, and there // is no way to specify the timeout. So we use an Event object to simulate // a timeout. The initEvent will be set after COM initialization, it will // take a longer time when first initialized. The connectEvent will be set // after connected to WMI. if (!err) { err = wait_event(data.initEvent, 1000); if (!err) { err = wait_event(data.connectEvent, 100); } } while (!err) { if (ReadFile( data.readPipe, (LPVOID)&buffer[offset / sizeof(buffer[0])], sizeof(buffer) - offset, &bytesRead, NULL )) { offset += bytesRead; if (offset >= sizeof(buffer)) { err = ERROR_MORE_DATA; } } else { err = GetLastError(); } } if (data.readPipe) { CloseHandle(data.readPipe); } if (hThread) { // Allow the thread some time to clean up int thread_err; switch (WaitForSingleObject(hThread, 100)) { case WAIT_OBJECT_0: // Thread ended cleanly if (!GetExitCodeThread(hThread, (LPDWORD)&thread_err)) { thread_err = GetLastError(); } break; case WAIT_TIMEOUT: // Probably stuck - there's not much we can do, unfortunately thread_err = WAIT_TIMEOUT; break; default: thread_err = GetLastError(); break; } // An error on our side is more likely to be relevant than one from // the thread, but if we don't have one on our side we'll take theirs. if (err == 0 || err == ERROR_BROKEN_PIPE) { err = thread_err; } CloseHandle(hThread); } CloseHandle(data.initEvent); CloseHandle(data.connectEvent); hThread = NULL; Py_END_ALLOW_THREADS PyMem_Free((void *)data.query); if (err == ERROR_MORE_DATA) { PyErr_Format(PyExc_OSError, "Query returns more than %zd characters", Py_ARRAY_LENGTH(buffer)); return NULL; } else if (err) { PyErr_SetFromWindowsErr(err); return NULL; } if (!offset) { return PyUnicode_FromStringAndSize(NULL, 0); } return PyUnicode_FromWideChar(buffer, offset / sizeof(buffer[0]) - 1); } static PyMethodDef wmi_functions[] = { _WMI_EXEC_QUERY_METHODDEF { NULL, NULL, 0, NULL } }; static PyModuleDef_Slot wmi_slots[] = { {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL}, }; static PyModuleDef wmi_def = { PyModuleDef_HEAD_INIT, "_wmi", NULL, // doc 0, // m_size wmi_functions, // m_methods wmi_slots, // m_slots }; extern "C" { PyMODINIT_FUNC PyInit__wmi(void) { return PyModuleDef_Init(&wmi_def); } }
cpp
github
https://github.com/python/cpython
PC/_wmimodule.cpp
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals # Allow direct execution import os import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # Various small unit tests import io import json import xml.etree.ElementTree from youtube_dl.utils import ( age_restricted, args_to_str, clean_html, DateRange, detect_exe_version, encodeFilename, escape_rfc3986, escape_url, ExtractorError, find_xpath_attr, fix_xml_ampersands, InAdvancePagedList, intlist_to_bytes, is_html, js_to_json, limit_length, OnDemandPagedList, orderedSet, parse_duration, parse_filesize, parse_iso8601, read_batch_urls, sanitize_filename, sanitize_path, prepend_extension, replace_extension, shell_quote, smuggle_url, str_to_int, strip_jsonp, struct_unpack, timeconvert, unescapeHTML, unified_strdate, unsmuggle_url, uppercase_escape, lowercase_escape, url_basename, urlencode_postdata, version_tuple, xpath_with_ns, xpath_text, render_table, match_str, parse_dfxp_time_expr, dfxp2srt, ) class TestUtil(unittest.TestCase): def test_timeconvert(self): self.assertTrue(timeconvert('') is None) self.assertTrue(timeconvert('bougrg') is None) def test_sanitize_filename(self): self.assertEqual(sanitize_filename('abc'), 'abc') self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e') self.assertEqual(sanitize_filename('123'), '123') self.assertEqual('abc_de', sanitize_filename('abc/de')) self.assertFalse('/' in sanitize_filename('abc/de///')) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de')) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|')) self.assertEqual('yes no', sanitize_filename('yes? no')) self.assertEqual('this - that', sanitize_filename('this: that')) self.assertEqual(sanitize_filename('AT&T'), 'AT&T') aumlaut = 'ä' self.assertEqual(sanitize_filename(aumlaut), aumlaut) tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430' self.assertEqual(sanitize_filename(tests), tests) self.assertEqual( sanitize_filename('New World record at 0:12:34'), 'New World record at 0_12_34') self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf') self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf') self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf') self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf') forbidden = '"\0\\/' for fc in forbidden: for fbc in forbidden: self.assertTrue(fbc not in sanitize_filename(fc)) def test_sanitize_filename_restricted(self): self.assertEqual(sanitize_filename('abc', restricted=True), 'abc') self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e') self.assertEqual(sanitize_filename('123', restricted=True), '123') self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True)) self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True)) self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True)) self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True)) self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True)) self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True)) tests = 'a\xe4b\u4e2d\u56fd\u7684c' self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c') self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#' for fc in forbidden: for fbc in forbidden: self.assertTrue(fbc not in sanitize_filename(fc, restricted=True)) # Handle a common case more neatly self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song') self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech') # .. but make sure the file name is never empty self.assertTrue(sanitize_filename('-', restricted=True) != '') self.assertTrue(sanitize_filename(':', restricted=True) != '') def test_sanitize_ids(self): self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw') self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw') self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI') def test_sanitize_path(self): if sys.platform != 'win32': return self.assertEqual(sanitize_path('abc'), 'abc') self.assertEqual(sanitize_path('abc/def'), 'abc\\def') self.assertEqual(sanitize_path('abc\\def'), 'abc\\def') self.assertEqual(sanitize_path('abc|def'), 'abc#def') self.assertEqual(sanitize_path('<>:"|?*'), '#######') self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def') self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def') self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc') self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc') self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc') self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc') self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f') self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc') self.assertEqual( sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'), 'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s') self.assertEqual( sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'), 'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part') self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#') self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def') self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#') self.assertEqual(sanitize_path('../abc'), '..\\abc') self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc') self.assertEqual(sanitize_path('./abc'), 'abc') self.assertEqual(sanitize_path('./../abc'), '..\\abc') def test_prepend_extension(self): self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp') self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp') self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext') def test_replace_extension(self): self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp') self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp') self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp') self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp') self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp') self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp') def test_ordered_set(self): self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) self.assertEqual(orderedSet([]), []) self.assertEqual(orderedSet([1]), [1]) # keep the list ordered self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1]) def test_unescape_html(self): self.assertEqual(unescapeHTML('%20;'), '%20;') self.assertEqual(unescapeHTML('&#x2F;'), '/') self.assertEqual(unescapeHTML('&#47;'), '/') self.assertEqual( unescapeHTML('&eacute;'), 'é') def test_daterange(self): _20century = DateRange("19000101", "20000101") self.assertFalse("17890714" in _20century) _ac = DateRange("00010101") self.assertTrue("19690721" in _ac) _firstmilenium = DateRange(end="10000101") self.assertTrue("07110427" in _firstmilenium) def test_unified_dates(self): self.assertEqual(unified_strdate('December 21, 2010'), '20101221') self.assertEqual(unified_strdate('8/7/2009'), '20090708') self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214') self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011') self.assertEqual(unified_strdate('1968 12 10'), '19681210') self.assertEqual(unified_strdate('1968-12-10'), '19681210') self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128') self.assertEqual( unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False), '20141126') self.assertEqual( unified_strdate('2/2/2015 6:47:40 PM', day_first=False), '20150202') self.assertEqual(unified_strdate('25-09-2014'), '20140925') def test_find_xpath_attr(self): testxml = '''<root> <node/> <node x="a"/> <node x="a" y="c" /> <node x="b" y="d" /> <node x="" /> </root>''' doc = xml.etree.ElementTree.fromstring(testxml) self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None) self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None) self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3]) self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2]) self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2]) self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3]) self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4]) def test_xpath_with_ns(self): testxml = '''<root xmlns:media="http://example.com/"> <media:song> <media:author>The Author</media:author> <url>http://server.com/download.mp3</url> </media:song> </root>''' doc = xml.etree.ElementTree.fromstring(testxml) find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'})) self.assertTrue(find('media:song') is not None) self.assertEqual(find('media:song/media:author').text, 'The Author') self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3') def test_xpath_text(self): testxml = '''<root> <div> <p>Foo</p> </div> </root>''' doc = xml.etree.ElementTree.fromstring(testxml) self.assertEqual(xpath_text(doc, 'div/p'), 'Foo') self.assertTrue(xpath_text(doc, 'div/bar') is None) self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True) def test_smuggle_url(self): data = {"ö": "ö", "abc": [3]} url = 'https://foo.bar/baz?x=y#a' smug_url = smuggle_url(url, data) unsmug_url, unsmug_data = unsmuggle_url(smug_url) self.assertEqual(url, unsmug_url) self.assertEqual(data, unsmug_data) res_url, res_data = unsmuggle_url(url) self.assertEqual(res_url, url) self.assertEqual(res_data, None) def test_shell_quote(self): args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')] self.assertEqual(shell_quote(args), """ffmpeg -i 'ñ€ß'"'"'.mp4'""") def test_str_to_int(self): self.assertEqual(str_to_int('123,456'), 123456) self.assertEqual(str_to_int('123.456'), 123456) def test_url_basename(self): self.assertEqual(url_basename('http://foo.de/'), '') self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz') self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz') self.assertEqual( url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'), 'trailer.mp4') def test_parse_duration(self): self.assertEqual(parse_duration(None), None) self.assertEqual(parse_duration(False), None) self.assertEqual(parse_duration('invalid'), None) self.assertEqual(parse_duration('1'), 1) self.assertEqual(parse_duration('1337:12'), 80232) self.assertEqual(parse_duration('9:12:43'), 33163) self.assertEqual(parse_duration('12:00'), 720) self.assertEqual(parse_duration('00:01:01'), 61) self.assertEqual(parse_duration('x:y'), None) self.assertEqual(parse_duration('3h11m53s'), 11513) self.assertEqual(parse_duration('3h 11m 53s'), 11513) self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513) self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513) self.assertEqual(parse_duration('62m45s'), 3765) self.assertEqual(parse_duration('6m59s'), 419) self.assertEqual(parse_duration('49s'), 49) self.assertEqual(parse_duration('0h0m0s'), 0) self.assertEqual(parse_duration('0m0s'), 0) self.assertEqual(parse_duration('0s'), 0) self.assertEqual(parse_duration('01:02:03.05'), 3723.05) self.assertEqual(parse_duration('T30M38S'), 1838) self.assertEqual(parse_duration('5 s'), 5) self.assertEqual(parse_duration('3 min'), 180) self.assertEqual(parse_duration('2.5 hours'), 9000) self.assertEqual(parse_duration('02:03:04'), 7384) self.assertEqual(parse_duration('01:02:03:04'), 93784) self.assertEqual(parse_duration('1 hour 3 minutes'), 3780) self.assertEqual(parse_duration('87 Min.'), 5220) def test_fix_xml_ampersands(self): self.assertEqual( fix_xml_ampersands('"&x=y&z=a'), '"&amp;x=y&amp;z=a') self.assertEqual( fix_xml_ampersands('"&amp;x=y&wrong;&z=a'), '"&amp;x=y&amp;wrong;&amp;z=a') self.assertEqual( fix_xml_ampersands('&amp;&apos;&gt;&lt;&quot;'), '&amp;&apos;&gt;&lt;&quot;') self.assertEqual( fix_xml_ampersands('&#1234;&#x1abC;'), '&#1234;&#x1abC;') self.assertEqual(fix_xml_ampersands('&#&#'), '&amp;#&amp;#') def test_paged_list(self): def testPL(size, pagesize, sliceargs, expected): def get_page(pagenum): firstid = pagenum * pagesize upto = min(size, pagenum * pagesize + pagesize) for i in range(firstid, upto): yield i pl = OnDemandPagedList(get_page, pagesize) got = pl.getslice(*sliceargs) self.assertEqual(got, expected) iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize) got = iapl.getslice(*sliceargs) self.assertEqual(got, expected) testPL(5, 2, (), [0, 1, 2, 3, 4]) testPL(5, 2, (1,), [1, 2, 3, 4]) testPL(5, 2, (2,), [2, 3, 4]) testPL(5, 2, (4,), [4]) testPL(5, 2, (0, 3), [0, 1, 2]) testPL(5, 2, (1, 4), [1, 2, 3]) testPL(5, 2, (2, 99), [2, 3, 4]) testPL(5, 2, (20, 99), []) def test_struct_unpack(self): self.assertEqual(struct_unpack('!B', b'\x00'), (0,)) def test_read_batch_urls(self): f = io.StringIO('''\xef\xbb\xbf foo bar\r baz # More after this line\r ; or after this bam''') self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam']) def test_urlencode_postdata(self): data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'}) self.assertTrue(isinstance(data, bytes)) def test_parse_iso8601(self): self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266) self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266) self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266) self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266) def test_strip_jsonp(self): stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);') d = json.loads(stripped) self.assertEqual(d, [{"id": "532cb", "x": 3}]) stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc') d = json.loads(stripped) self.assertEqual(d, {'STATUS': 'OK'}) def test_uppercase_escape(self): self.assertEqual(uppercase_escape('aä'), 'aä') self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐') def test_lowercase_escape(self): self.assertEqual(lowercase_escape('aä'), 'aä') self.assertEqual(lowercase_escape('\\u0026'), '&') def test_limit_length(self): self.assertEqual(limit_length(None, 12), None) self.assertEqual(limit_length('foo', 12), 'foo') self.assertTrue( limit_length('foo bar baz asd', 12).startswith('foo bar')) self.assertTrue('...' in limit_length('foo bar baz asd', 12)) def test_escape_rfc3986(self): reserved = "!*'();:@&=+$,/?#[]" unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~' self.assertEqual(escape_rfc3986(reserved), reserved) self.assertEqual(escape_rfc3986(unreserved), unreserved) self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82') self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82') self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar') self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar') def test_escape_url(self): self.assertEqual( escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'), 'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4' ) self.assertEqual( escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'), 'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290' ) self.assertEqual( escape_url('http://тест.рф/фрагмент'), 'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82' ) self.assertEqual( escape_url('http://тест.рф/абв?абв=абв#абв'), 'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2' ) self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0') def test_js_to_json_realworld(self): inp = '''{ 'clip':{'provider':'pseudo'} }''' self.assertEqual(js_to_json(inp), '''{ "clip":{"provider":"pseudo"} }''') json.loads(js_to_json(inp)) inp = '''{ 'playlist':[{'controls':{'all':null}}] }''' self.assertEqual(js_to_json(inp), '''{ "playlist":[{"controls":{"all":null}}] }''') inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"' json_code = js_to_json(inp) self.assertEqual(json.loads(json_code), json.loads(inp)) def test_js_to_json_edgecases(self): on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}") self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"}) on = js_to_json('{"abc": true}') self.assertEqual(json.loads(on), {'abc': True}) # Ignore JavaScript code as well on = js_to_json('''{ "x": 1, y: "a", z: some.code }''') d = json.loads(on) self.assertEqual(d['x'], 1) self.assertEqual(d['y'], 'a') on = js_to_json('["abc", "def",]') self.assertEqual(json.loads(on), ['abc', 'def']) on = js_to_json('{"abc": "def",}') self.assertEqual(json.loads(on), {'abc': 'def'}) def test_clean_html(self): self.assertEqual(clean_html('a:\nb'), 'a: b') self.assertEqual(clean_html('a:\n "b"'), 'a: "b"') def test_intlist_to_bytes(self): self.assertEqual( intlist_to_bytes([0, 1, 127, 128, 255]), b'\x00\x01\x7f\x80\xff') def test_args_to_str(self): self.assertEqual( args_to_str(['foo', 'ba/r', '-baz', '2 be', '']), 'foo ba/r -baz \'2 be\' \'\'' ) def test_parse_filesize(self): self.assertEqual(parse_filesize(None), None) self.assertEqual(parse_filesize(''), None) self.assertEqual(parse_filesize('91 B'), 91) self.assertEqual(parse_filesize('foobar'), None) self.assertEqual(parse_filesize('2 MiB'), 2097152) self.assertEqual(parse_filesize('5 GB'), 5000000000) self.assertEqual(parse_filesize('1.2Tb'), 1200000000000) self.assertEqual(parse_filesize('1,24 KB'), 1240) def test_version_tuple(self): self.assertEqual(version_tuple('1'), (1,)) self.assertEqual(version_tuple('10.23.344'), (10, 23, 344)) self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style def test_detect_exe_version(self): self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1 built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4) configuration: --prefix=/usr --extra-'''), '1.2.1') self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685 built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685') self.assertEqual(detect_exe_version('''X server found. dri2 connection failed! Trying to open render node... Success at /dev/dri/renderD128. ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4') def test_age_restricted(self): self.assertFalse(age_restricted(None, 10)) # unrestricted content self.assertFalse(age_restricted(1, None)) # unrestricted policy self.assertFalse(age_restricted(8, 10)) self.assertTrue(age_restricted(18, 14)) self.assertFalse(age_restricted(18, 18)) def test_is_html(self): self.assertFalse(is_html(b'\x49\x44\x43<html')) self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa')) self.assertTrue(is_html( # UTF-8 with BOM b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa')) self.assertTrue(is_html( # UTF-16-LE b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00' )) self.assertTrue(is_html( # UTF-16-BE b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4' )) self.assertTrue(is_html( # UTF-32-BE b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4')) self.assertTrue(is_html( # UTF-32-LE b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00')) def test_render_table(self): self.assertEqual( render_table( ['a', 'bcd'], [[123, 4], [9999, 51]]), 'a bcd\n' '123 4\n' '9999 51') def test_match_str(self): self.assertRaises(ValueError, match_str, 'xy>foobar', {}) self.assertFalse(match_str('xy', {'x': 1200})) self.assertTrue(match_str('!xy', {'x': 1200})) self.assertTrue(match_str('x', {'x': 1200})) self.assertFalse(match_str('!x', {'x': 1200})) self.assertTrue(match_str('x', {'x': 0})) self.assertFalse(match_str('x>0', {'x': 0})) self.assertFalse(match_str('x>0', {})) self.assertTrue(match_str('x>?0', {})) self.assertTrue(match_str('x>1K', {'x': 1200})) self.assertFalse(match_str('x>2K', {'x': 1200})) self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200})) self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200})) self.assertFalse(match_str('y=a212', {'y': 'foobar42'})) self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'})) self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'})) self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'})) self.assertFalse(match_str( 'like_count > 100 & dislike_count <? 50 & description', {'like_count': 90, 'description': 'foo'})) self.assertTrue(match_str( 'like_count > 100 & dislike_count <? 50 & description', {'like_count': 190, 'description': 'foo'})) self.assertFalse(match_str( 'like_count > 100 & dislike_count <? 50 & description', {'like_count': 190, 'dislike_count': 60, 'description': 'foo'})) self.assertFalse(match_str( 'like_count > 100 & dislike_count <? 50 & description', {'like_count': 190, 'dislike_count': 10})) def test_parse_dfxp_time_expr(self): self.assertEqual(parse_dfxp_time_expr(None), 0.0) self.assertEqual(parse_dfxp_time_expr(''), 0.0) self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1) self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1) self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0) self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1) def test_dfxp2srt(self): dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?> <tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter"> <body> <div xml:lang="en"> <p begin="0" end="1">The following line contains Chinese characters and special symbols</p> <p begin="1" end="2">第二行<br/>♪♪</p> <p begin="2" dur="1"><span>Third<br/>Line</span></p> </div> </body> </tt>''' srt_data = '''1 00:00:00,000 --> 00:00:01,000 The following line contains Chinese characters and special symbols 2 00:00:01,000 --> 00:00:02,000 第二行 ♪♪ 3 00:00:02,000 --> 00:00:03,000 Third Line ''' self.assertEqual(dfxp2srt(dfxp_data), srt_data) dfxp_data_no_default_namespace = '''<?xml version="1.0" encoding="UTF-8"?> <tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter"> <body> <div xml:lang="en"> <p begin="0" end="1">The first line</p> </div> </body> </tt>''' srt_data = '''1 00:00:00,000 --> 00:00:01,000 The first line ''' self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
# coding: utf-8 # # Copyright (C) 2013 Google Inc. # # This file is part of YouCompleteMe. # # YouCompleteMe is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # YouCompleteMe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>. from nose.tools import eq_, ok_, with_setup from mock import MagicMock from ycm.test_utils import MockVimModule vim_mock = MockVimModule() from ycm import base from ycm import vimsupport import sys # column is 0-based def SetVimCurrentColumnAndLineValue( column, line_value ): vimsupport.CurrentColumn = MagicMock( return_value = column ) vimsupport.CurrentLineContents = MagicMock( return_value = line_value ) def Setup(): sys.modules[ 'ycm.vimsupport' ] = MagicMock() vimsupport.CurrentFiletypes = MagicMock( return_value = [''] ) vimsupport.CurrentColumn = MagicMock( return_value = 1 ) vimsupport.CurrentLineContents = MagicMock( return_value = '' ) @with_setup( Setup ) def AdjustCandidateInsertionText_Basic_test(): vimsupport.TextAfterCursor = MagicMock( return_value = 'bar' ) eq_( [ { 'abbr': 'foobar', 'word': 'foo' } ], base.AdjustCandidateInsertionText( [ 'foobar' ] ) ) @with_setup( Setup ) def AdjustCandidateInsertionText_ParenInTextAfterCursor_test(): vimsupport.TextAfterCursor = MagicMock( return_value = 'bar(zoo' ) eq_( [ { 'abbr': 'foobar', 'word': 'foo' } ], base.AdjustCandidateInsertionText( [ 'foobar' ] ) ) @with_setup( Setup ) def AdjustCandidateInsertionText_PlusInTextAfterCursor_test(): vimsupport.TextAfterCursor = MagicMock( return_value = 'bar+zoo' ) eq_( [ { 'abbr': 'foobar', 'word': 'foo' } ], base.AdjustCandidateInsertionText( [ 'foobar' ] ) ) @with_setup( Setup ) def AdjustCandidateInsertionText_WhitespaceInTextAfterCursor_test(): vimsupport.TextAfterCursor = MagicMock( return_value = 'bar zoo' ) eq_( [ { 'abbr': 'foobar', 'word': 'foo' } ], base.AdjustCandidateInsertionText( [ 'foobar' ] ) ) @with_setup( Setup ) def AdjustCandidateInsertionText_MoreThanWordMatchingAfterCursor_test(): vimsupport.TextAfterCursor = MagicMock( return_value = 'bar.h' ) eq_( [ { 'abbr': 'foobar.h', 'word': 'foo' } ], base.AdjustCandidateInsertionText( [ 'foobar.h' ] ) ) vimsupport.TextAfterCursor = MagicMock( return_value = 'bar(zoo' ) eq_( [ { 'abbr': 'foobar(zoo', 'word': 'foo' } ], base.AdjustCandidateInsertionText( [ 'foobar(zoo' ] ) ) @with_setup( Setup ) def AdjustCandidateInsertionText_NotSuffix_test(): vimsupport.TextAfterCursor = MagicMock( return_value = 'bar' ) eq_( [ { 'abbr': 'foofoo', 'word': 'foofoo' } ], base.AdjustCandidateInsertionText( [ 'foofoo' ] ) ) @with_setup( Setup ) def AdjustCandidateInsertionText_NothingAfterCursor_test(): vimsupport.TextAfterCursor = MagicMock( return_value = '' ) eq_( [ 'foofoo', 'zobar' ], base.AdjustCandidateInsertionText( [ 'foofoo', 'zobar' ] ) ) @with_setup( Setup ) def AdjustCandidateInsertionText_MultipleStrings_test(): vimsupport.TextAfterCursor = MagicMock( return_value = 'bar' ) eq_( [ { 'abbr': 'foobar', 'word': 'foo' }, { 'abbr': 'zobar', 'word': 'zo' }, { 'abbr': 'qbar', 'word': 'q' }, { 'abbr': 'bar', 'word': '' }, ], base.AdjustCandidateInsertionText( [ 'foobar', 'zobar', 'qbar', 'bar' ] ) ) @with_setup( Setup ) def AdjustCandidateInsertionText_DictInput_test(): vimsupport.TextAfterCursor = MagicMock( return_value = 'bar' ) eq_( [ { 'abbr': 'foobar', 'word': 'foo' } ], base.AdjustCandidateInsertionText( [ { 'word': 'foobar' } ] ) ) @with_setup( Setup ) def AdjustCandidateInsertionText_DontTouchAbbr_test(): vimsupport.TextAfterCursor = MagicMock( return_value = 'bar' ) eq_( [ { 'abbr': '1234', 'word': 'foo' } ], base.AdjustCandidateInsertionText( [ { 'abbr': '1234', 'word': 'foobar' } ] ) ) @with_setup( Setup ) def OverlapLength_Basic_test(): eq_( 3, base.OverlapLength( 'foo bar', 'bar zoo' ) ) eq_( 3, base.OverlapLength( 'foobar', 'barzoo' ) ) @with_setup( Setup ) def OverlapLength_BasicWithUnicode_test(): eq_( 3, base.OverlapLength( u'bar fäö', u'fäö bar' ) ) eq_( 3, base.OverlapLength( u'zoofäö', u'fäözoo' ) ) @with_setup( Setup ) def OverlapLength_OneCharOverlap_test(): eq_( 1, base.OverlapLength( 'foo b', 'b zoo' ) ) @with_setup( Setup ) def OverlapLength_SameStrings_test(): eq_( 6, base.OverlapLength( 'foobar', 'foobar' ) ) @with_setup( Setup ) def OverlapLength_Substring_test(): eq_( 6, base.OverlapLength( 'foobar', 'foobarzoo' ) ) eq_( 6, base.OverlapLength( 'zoofoobar', 'foobar' ) ) @with_setup( Setup ) def OverlapLength_LongestOverlap_test(): eq_( 7, base.OverlapLength( 'bar foo foo', 'foo foo bar' ) ) @with_setup( Setup ) def OverlapLength_EmptyInput_test(): eq_( 0, base.OverlapLength( '', 'goobar' ) ) eq_( 0, base.OverlapLength( 'foobar', '' ) ) eq_( 0, base.OverlapLength( '', '' ) ) @with_setup( Setup ) def OverlapLength_NoOverlap_test(): eq_( 0, base.OverlapLength( 'foobar', 'goobar' ) ) eq_( 0, base.OverlapLength( 'foobar', '(^($@#$#@' ) ) eq_( 0, base.OverlapLength( 'foo bar zoo', 'foo zoo bar' ) ) @with_setup( Setup ) def LastEnteredCharIsIdentifierChar_Basic_test(): SetVimCurrentColumnAndLineValue( 3, 'abc' ) ok_( base.LastEnteredCharIsIdentifierChar() ) SetVimCurrentColumnAndLineValue( 2, 'abc' ) ok_( base.LastEnteredCharIsIdentifierChar() ) SetVimCurrentColumnAndLineValue( 1, 'abc' ) ok_( base.LastEnteredCharIsIdentifierChar() ) @with_setup( Setup ) def LastEnteredCharIsIdentifierChar_FiletypeHtml_test(): SetVimCurrentColumnAndLineValue( 3, 'ab-' ) vimsupport.CurrentFiletypes = MagicMock( return_value = ['html'] ) ok_( base.LastEnteredCharIsIdentifierChar() ) @with_setup( Setup ) def LastEnteredCharIsIdentifierChar_ColumnIsZero_test(): SetVimCurrentColumnAndLineValue( 0, 'abc' ) ok_( not base.LastEnteredCharIsIdentifierChar() ) @with_setup( Setup ) def LastEnteredCharIsIdentifierChar_LineEmpty_test(): SetVimCurrentColumnAndLineValue( 3, '' ) ok_( not base.LastEnteredCharIsIdentifierChar() ) SetVimCurrentColumnAndLineValue( 0, '' ) ok_( not base.LastEnteredCharIsIdentifierChar() ) @with_setup( Setup ) def LastEnteredCharIsIdentifierChar_NotIdentChar_test(): SetVimCurrentColumnAndLineValue( 3, 'ab;' ) ok_( not base.LastEnteredCharIsIdentifierChar() ) SetVimCurrentColumnAndLineValue( 1, ';' ) ok_( not base.LastEnteredCharIsIdentifierChar() ) SetVimCurrentColumnAndLineValue( 3, 'ab-' ) ok_( not base.LastEnteredCharIsIdentifierChar() ) @with_setup( Setup ) def CurrentIdentifierFinished_Basic_test(): SetVimCurrentColumnAndLineValue( 3, 'ab;' ) ok_( base.CurrentIdentifierFinished() ) SetVimCurrentColumnAndLineValue( 2, 'ab;' ) ok_( not base.CurrentIdentifierFinished() ) SetVimCurrentColumnAndLineValue( 1, 'ab;' ) ok_( not base.CurrentIdentifierFinished() ) @with_setup( Setup ) def CurrentIdentifierFinished_NothingBeforeColumn_test(): SetVimCurrentColumnAndLineValue( 0, 'ab;' ) ok_( base.CurrentIdentifierFinished() ) SetVimCurrentColumnAndLineValue( 0, '' ) ok_( base.CurrentIdentifierFinished() ) @with_setup( Setup ) def CurrentIdentifierFinished_InvalidColumn_test(): SetVimCurrentColumnAndLineValue( 5, '' ) ok_( not base.CurrentIdentifierFinished() ) SetVimCurrentColumnAndLineValue( 5, 'abc' ) ok_( not base.CurrentIdentifierFinished() ) @with_setup( Setup ) def CurrentIdentifierFinished_InMiddleOfLine_test(): SetVimCurrentColumnAndLineValue( 4, 'bar.zoo' ) ok_( base.CurrentIdentifierFinished() ) SetVimCurrentColumnAndLineValue( 4, 'bar(zoo' ) ok_( base.CurrentIdentifierFinished() ) SetVimCurrentColumnAndLineValue( 4, 'bar-zoo' ) ok_( base.CurrentIdentifierFinished() ) @with_setup( Setup ) def CurrentIdentifierFinished_Html_test(): SetVimCurrentColumnAndLineValue( 4, 'bar-zoo' ) vimsupport.CurrentFiletypes = MagicMock( return_value = ['html'] ) ok_( not base.CurrentIdentifierFinished() ) @with_setup( Setup ) def CurrentIdentifierFinished_WhitespaceOnly_test(): SetVimCurrentColumnAndLineValue( 1, '\n' ) ok_( base.CurrentIdentifierFinished() ) SetVimCurrentColumnAndLineValue( 3, '\n ' ) ok_( base.CurrentIdentifierFinished() ) SetVimCurrentColumnAndLineValue( 3, '\t\t\t\t' ) ok_( base.CurrentIdentifierFinished() )
unknown
codeparrot/codeparrot-clean
from sympy import (Add, factor_list, igcd, Matrix, Mul, S, simplify, Symbol, symbols, Eq, pi, factorint, oo, powsimp) from sympy.core.function import _mexpand from sympy.core.compatibility import range from sympy.functions.elementary.trigonometric import sin from sympy.solvers.diophantine import (descent, diop_bf_DN, diop_DN, diop_solve, diophantine, divisible, equivalent, find_DN, ldescent, length, reconstruct, partition, power_representation, prime_as_sum_of_two_squares, square_factor, sum_of_four_squares, sum_of_three_squares, transformation_to_DN, transformation_to_normal, classify_diop, base_solution_linear, cornacchia, sqf_normal, diop_ternary_quadratic_normal, _diop_ternary_quadratic_normal, gaussian_reduce, holzer,diop_general_pythagorean, _diop_general_sum_of_squares, _nint_or_floor, _odd, _even, _remove_gcd, check_param, parametrize_ternary_quadratic, diop_ternary_quadratic, diop_linear, diop_quadratic, diop_general_sum_of_squares, sum_of_powers, sum_of_squares, diop_general_sum_of_even_powers, _can_do_sum_of_squares) from sympy.utilities import default_sort_key from sympy.utilities.pytest import slow, raises, XFAIL a, b, c, d, p, q, x, y, z, w, t, u, v, X, Y, Z = symbols( "a, b, c, d, p, q, x, y, z, w, t, u, v, X, Y, Z", integer=True) t_0, t_1, t_2, t_3, t_4, t_5, t_6 = symbols("t_:7", integer=True) m1, m2, m3 = symbols('m1:4', integer=True) n1 = symbols('n1', integer=True) def diop_simplify(eq): return _mexpand(powsimp(_mexpand(eq))) def test_input_format(): raises(TypeError, lambda: diophantine(sin(x))) raises(TypeError, lambda: diophantine(3)) raises(TypeError, lambda: diophantine(x/pi - 3)) def test_univariate(): assert diop_solve((x - 1)*(x - 2)**2) == set([(1,), (2,)]) assert diop_solve((x - 1)*(x - 2)) == set([(1,), (2,)]) def test_classify_diop(): raises(TypeError, lambda: classify_diop(x**2/3 - 1)) raises(ValueError, lambda: classify_diop(1)) raises(NotImplementedError, lambda: classify_diop(w*x*y*z - 1)) assert classify_diop(14*x**2 + 15*x - 42) == ( [x], {1: -42, x: 15, x**2: 14}, 'univariate') assert classify_diop(x*y + z) == ( [x, y, z], {x*y: 1, z: 1}, 'inhomogeneous_ternary_quadratic') assert classify_diop(x*y + z + w + x**2) == ( [w, x, y, z], {x*y: 1, w: 1, x**2: 1, z: 1}, 'inhomogeneous_general_quadratic') assert classify_diop(x*y + x*z + x**2 + 1) == ( [x, y, z], {x*y: 1, x*z: 1, x**2: 1, 1: 1}, 'inhomogeneous_general_quadratic') assert classify_diop(x*y + z + w + 42) == ( [w, x, y, z], {x*y: 1, w: 1, 1: 42, z: 1}, 'inhomogeneous_general_quadratic') assert classify_diop(x*y + z*w) == ( [w, x, y, z], {x*y: 1, w*z: 1}, 'homogeneous_general_quadratic') assert classify_diop(x*y**2 + 1) == ( [x, y], {x*y**2: 1, 1: 1}, 'cubic_thue') def test_linear(): assert diop_solve(x) == (0,) assert diop_solve(1*x) == (0,) assert diop_solve(3*x) == (0,) assert diop_solve(x + 1) == (-1,) assert diop_solve(2*x + 1) == (None,) assert diop_solve(2*x + 4) == (-2,) assert diop_solve(y + x) == (t_0, -t_0) assert diop_solve(y + x + 0) == (t_0, -t_0) assert diop_solve(y + x - 0) == (t_0, -t_0) assert diop_solve(0*x - y - 5) == (-5,) assert diop_solve(3*y + 2*x - 5) == (3*t_0 - 5, -2*t_0 + 5) assert diop_solve(2*x - 3*y - 5) == (3*t_0 - 5, 2*t_0 - 5) assert diop_solve(-2*x - 3*y - 5) == (3*t_0 + 5, -2*t_0 - 5) assert diop_solve(7*x + 5*y) == (5*t_0, -7*t_0) assert diop_solve(2*x + 4*y) == (2*t_0, -t_0) assert diop_solve(4*x + 6*y - 4) == (3*t_0 - 2, -2*t_0 + 2) assert diop_solve(4*x + 6*y - 3) == (None, None) assert diop_solve(0*x + 3*y - 4*z + 5) == (4*t_0 + 5, 3*t_0 + 5) assert diop_solve(4*x + 3*y - 4*z + 5) == (t_0, 8*t_0 + 4*t_1 + 5, 7*t_0 + 3*t_1 + 5) assert diop_solve(4*x + 3*y - 4*z + 5, None) == (0, 5, 5) assert diop_solve(4*x + 2*y + 8*z - 5) == (None, None, None) assert diop_solve(5*x + 7*y - 2*z - 6) == (t_0, -3*t_0 + 2*t_1 + 6, -8*t_0 + 7*t_1 + 18) assert diop_solve(3*x - 6*y + 12*z - 9) == (2*t_0 + 3, t_0 + 2*t_1, t_1) assert diop_solve(6*w + 9*x + 20*y - z) == (t_0, t_1, t_1 + t_2, 6*t_0 + 29*t_1 + 20*t_2) # to ignore constant factors, use diophantine raises(TypeError, lambda: diop_solve(x/2)) def test_quadratic_simple_hyperbolic_case(): # Simple Hyperbolic case: A = C = 0 and B != 0 assert diop_solve(3*x*y + 34*x - 12*y + 1) == \ set([(-133, -11), (5, -57)]) assert diop_solve(6*x*y + 2*x + 3*y + 1) == set([]) assert diop_solve(-13*x*y + 2*x - 4*y - 54) == set([(27, 0)]) assert diop_solve(-27*x*y - 30*x - 12*y - 54) == set([(-14, -1)]) assert diop_solve(2*x*y + 5*x + 56*y + 7) == set([(-161, -3),\ (-47,-6), (-35, -12), (-29, -69),\ (-27, 64), (-21, 7),(-9, 1),\ (105, -2)]) assert diop_solve(6*x*y + 9*x + 2*y + 3) == set([]) assert diop_solve(x*y + x + y + 1) == set([(-1, t), (t, -1)]) assert diophantine(48*x*y) def test_quadratic_elliptical_case(): # Elliptical case: B**2 - 4AC < 0 # Two test cases highlighted require lot of memory due to quadratic_congruence() method. # This above method should be replaced by Pernici's square_mod() method when his PR gets merged. #assert diop_solve(42*x**2 + 8*x*y + 15*y**2 + 23*x + 17*y - 4915) == set([(-11, -1)]) assert diop_solve(4*x**2 + 3*y**2 + 5*x - 11*y + 12) == set([]) assert diop_solve(x**2 + y**2 + 2*x + 2*y + 2) == set([(-1, -1)]) #assert diop_solve(15*x**2 - 9*x*y + 14*y**2 - 23*x - 14*y - 4950) == set([(-15, 6)]) assert diop_solve(10*x**2 + 12*x*y + 12*y**2 - 34) == \ set([(1, -2), (-1, -1),(1, 1), (-1, 2)]) def test_quadratic_parabolic_case(): # Parabolic case: B**2 - 4AC = 0 assert check_solutions(8*x**2 - 24*x*y + 18*y**2 + 5*x + 7*y + 16) assert check_solutions(8*x**2 - 24*x*y + 18*y**2 + 6*x + 12*y - 6) assert check_solutions(8*x**2 + 24*x*y + 18*y**2 + 4*x + 6*y - 7) assert check_solutions(x**2 + 2*x*y + y**2 + 2*x + 2*y + 1) assert check_solutions(x**2 - 2*x*y + y**2 + 2*x + 2*y + 1) assert check_solutions(y**2 - 41*x + 40) def test_quadratic_perfect_square(): # B**2 - 4*A*C > 0 # B**2 - 4*A*C is a perfect square assert check_solutions(48*x*y) assert check_solutions(4*x**2 - 5*x*y + y**2 + 2) assert check_solutions(-2*x**2 - 3*x*y + 2*y**2 -2*x - 17*y + 25) assert check_solutions(12*x**2 + 13*x*y + 3*y**2 - 2*x + 3*y - 12) assert check_solutions(8*x**2 + 10*x*y + 2*y**2 - 32*x - 13*y - 23) assert check_solutions(4*x**2 - 4*x*y - 3*y- 8*x - 3) assert check_solutions(- 4*x*y - 4*y**2 - 3*y- 5*x - 10) assert check_solutions(x**2 - y**2 - 2*x - 2*y) assert check_solutions(x**2 - 9*y**2 - 2*x - 6*y) assert check_solutions(4*x**2 - 9*y**2 - 4*x - 12*y - 3) def test_quadratic_non_perfect_square(): # B**2 - 4*A*C is not a perfect square # Used check_solutions() since the solutions are complex expressions involving # square roots and exponents assert check_solutions(x**2 - 2*x - 5*y**2) assert check_solutions(3*x**2 - 2*y**2 - 2*x - 2*y) assert check_solutions(x**2 - x*y - y**2 - 3*y) assert check_solutions(x**2 - 9*y**2 - 2*x - 6*y) def test_issue_9106(): eq = -48 - 2*x*(3*x - 1) + y*(3*y - 1) v = (x, y) for sol in diophantine(eq): assert not diop_simplify(eq.xreplace(dict(zip(v, sol)))) @slow def test_quadratic_non_perfect_slow(): assert check_solutions(8*x**2 + 10*x*y - 2*y**2 - 32*x - 13*y - 23) # This leads to very large numbers. # assert check_solutions(5*x**2 - 13*x*y + y**2 - 4*x - 4*y - 15) assert check_solutions(-3*x**2 - 2*x*y + 7*y**2 - 5*x - 7) assert check_solutions(-4 - x + 4*x**2 - y - 3*x*y - 4*y**2) assert check_solutions(1 + 2*x + 2*x**2 + 2*y + x*y - 2*y**2) def test_DN(): # Most of the test cases were adapted from, # Solving the generalized Pell equation x**2 - D*y**2 = N, John P. Robertson, July 31, 2004. # http://www.jpr2718.org/pell.pdf # others are verified using Wolfram Alpha. # Covers cases where D <= 0 or D > 0 and D is a square or N = 0 # Solutions are straightforward in these cases. assert diop_DN(3, 0) == [(0, 0)] assert diop_DN(-17, -5) == [] assert diop_DN(-19, 23) == [(2, 1)] assert diop_DN(-13, 17) == [(2, 1)] assert diop_DN(-15, 13) == [] assert diop_DN(0, 5) == [] assert diop_DN(0, 9) == [(3, t)] assert diop_DN(9, 0) == [(3*t, t)] assert diop_DN(16, 24) == [] assert diop_DN(9, 180) == [(18, 4)] assert diop_DN(9, -180) == [(12, 6)] assert diop_DN(7, 0) == [(0, 0)] # When equation is x**2 + y**2 = N # Solutions are interchangeable assert diop_DN(-1, 5) == [(1, 2)] assert diop_DN(-1, 169) == [(5, 12), (0, 13)] # D > 0 and D is not a square # N = 1 assert diop_DN(13, 1) == [(649, 180)] assert diop_DN(980, 1) == [(51841, 1656)] assert diop_DN(981, 1) == [(158070671986249, 5046808151700)] assert diop_DN(986, 1) == [(49299, 1570)] assert diop_DN(991, 1) == [(379516400906811930638014896080, 12055735790331359447442538767)] assert diop_DN(17, 1) == [(33, 8)] assert diop_DN(19, 1) == [(170, 39)] # N = -1 assert diop_DN(13, -1) == [(18, 5)] assert diop_DN(991, -1) == [] assert diop_DN(41, -1) == [(32, 5)] assert diop_DN(290, -1) == [(17, 1)] assert diop_DN(21257, -1) == [(13913102721304, 95427381109)] assert diop_DN(32, -1) == [] # |N| > 1 # Some tests were created using calculator at # http://www.numbertheory.org/php/patz.html assert diop_DN(13, -4) == [(3, 1), (393, 109), (36, 10)] # Source I referred returned (3, 1), (393, 109) and (-3, 1) as fundamental solutions # So (-3, 1) and (393, 109) should be in the same equivalent class assert equivalent(-3, 1, 393, 109, 13, -4) == True assert diop_DN(13, 27) == [(220, 61), (40, 11), (768, 213), (12, 3)] assert set(diop_DN(157, 12)) == \ set([(13, 1), (10663, 851), (579160, 46222), \ (483790960,38610722), (26277068347, 2097138361), (21950079635497, 1751807067011)]) assert diop_DN(13, 25) == [(3245, 900)] assert diop_DN(192, 18) == [] assert diop_DN(23, 13) == [(-6, 1), (6, 1)] assert diop_DN(167, 2) == [(13, 1)] assert diop_DN(167, -2) == [] assert diop_DN(123, -2) == [(11, 1)] # One calculator returned [(11, 1), (-11, 1)] but both of these are in # the same equivalence class assert equivalent(11, 1, -11, 1, 123, -2) assert diop_DN(123, -23) == [(-10, 1), (10, 1)] assert diop_DN(0, 0, t) == [(0, t)] assert diop_DN(0, -1, t) == [] def test_bf_pell(): assert diop_bf_DN(13, -4) == [(3, 1), (-3, 1), (36, 10)] assert diop_bf_DN(13, 27) == [(12, 3), (-12, 3), (40, 11), (-40, 11)] assert diop_bf_DN(167, -2) == [] assert diop_bf_DN(1729, 1) == [(44611924489705, 1072885712316)] assert diop_bf_DN(89, -8) == [(9, 1), (-9, 1)] assert diop_bf_DN(21257, -1) == [(13913102721304, 95427381109)] assert diop_bf_DN(340, -4) == [(756, 41)] assert diop_bf_DN(-1, 0, t) == [(0, 0)] assert diop_bf_DN(0, 0, t) == [(0, t)] assert diop_bf_DN(4, 0, t) == [(2*t, t), (-2*t, t)] assert diop_bf_DN(3, 0, t) == [(0, 0)] assert diop_bf_DN(1, -2, t) == [] def test_length(): assert length(2, 1, 0) == 1 assert length(-2, 4, 5) == 3 assert length(-5, 4, 17) == 5 assert length(0, 4, 13) == 6 assert length(-31, 8, 613) == 69 assert length(7, 13, 11) == 23 assert length(-40, 5, 23) == 4 assert length(1, 6, 4) == 2 def is_pell_transformation_ok(eq): """ Test whether X*Y, X, or Y terms are present in the equation after transforming the equation using the transformation returned by transformation_to_pell(). If they are not present we are good. Moreover, coefficient of X**2 should be a divisor of coefficient of Y**2 and the constant term. """ A, B = transformation_to_DN(eq) u = (A*Matrix([X, Y]) + B)[0] v = (A*Matrix([X, Y]) + B)[1] simplified = diop_simplify(eq.subs(zip((x, y), (u, v)))) coeff = dict([reversed(t.as_independent(*[X, Y])) for t in simplified.args]) for term in [X*Y, X, Y]: if term in coeff.keys(): return False for term in [X**2, Y**2, 1]: if term not in coeff.keys(): coeff[term] = 0 if coeff[X**2] != 0: return divisible(coeff[Y**2], coeff[X**2]) and \ divisible(coeff[1], coeff[X**2]) return True def test_transformation_to_pell(): assert is_pell_transformation_ok(-13*x**2 - 7*x*y + y**2 + 2*x - 2*y - 14) assert is_pell_transformation_ok(-17*x**2 + 19*x*y - 7*y**2 - 5*x - 13*y - 23) assert is_pell_transformation_ok(x**2 - y**2 + 17) assert is_pell_transformation_ok(-x**2 + 7*y**2 - 23) assert is_pell_transformation_ok(25*x**2 - 45*x*y + 5*y**2 - 5*x - 10*y + 5) assert is_pell_transformation_ok(190*x**2 + 30*x*y + y**2 - 3*y - 170*x - 130) assert is_pell_transformation_ok(x**2 - 2*x*y -190*y**2 - 7*y - 23*x - 89) assert is_pell_transformation_ok(15*x**2 - 9*x*y + 14*y**2 - 23*x - 14*y - 4950) def test_find_DN(): assert find_DN(x**2 - 2*x - y**2) == (1, 1) assert find_DN(x**2 - 3*y**2 - 5) == (3, 5) assert find_DN(x**2 - 2*x*y - 4*y**2 - 7) == (5, 7) assert find_DN(4*x**2 - 8*x*y - y**2 - 9) == (20, 36) assert find_DN(7*x**2 - 2*x*y - y**2 - 12) == (8, 84) assert find_DN(-3*x**2 + 4*x*y -y**2) == (1, 0) assert find_DN(-13*x**2 - 7*x*y + y**2 + 2*x - 2*y -14) == (101, -7825480) def test_ldescent(): # Equations which have solutions u = ([(13, 23), (3, -11), (41, -113), (4, -7), (-7, 4), (91, -3), (1, 1), (1, -1), (4, 32), (17, 13), (123689, 1), (19, -570)]) for a, b in u: w, x, y = ldescent(a, b) assert a*x**2 + b*y**2 == w**2 assert ldescent(-1, -1) is None def test_diop_ternary_quadratic_normal(): assert check_solutions(234*x**2 - 65601*y**2 - z**2) assert check_solutions(23*x**2 + 616*y**2 - z**2) assert check_solutions(5*x**2 + 4*y**2 - z**2) assert check_solutions(3*x**2 + 6*y**2 - 3*z**2) assert check_solutions(x**2 + 3*y**2 - z**2) assert check_solutions(4*x**2 + 5*y**2 - z**2) assert check_solutions(x**2 + y**2 - z**2) assert check_solutions(16*x**2 + y**2 - 25*z**2) assert check_solutions(6*x**2 - y**2 + 10*z**2) assert check_solutions(213*x**2 + 12*y**2 - 9*z**2) assert check_solutions(34*x**2 - 3*y**2 - 301*z**2) assert check_solutions(124*x**2 - 30*y**2 - 7729*z**2) def is_normal_transformation_ok(eq): A = transformation_to_normal(eq) X, Y, Z = A*Matrix([x, y, z]) simplified = diop_simplify(eq.subs(zip((x, y, z), (X, Y, Z)))) coeff = dict([reversed(t.as_independent(*[X, Y, Z])) for t in simplified.args]) for term in [X*Y, Y*Z, X*Z]: if term in coeff.keys(): return False return True def test_transformation_to_normal(): assert is_normal_transformation_ok(x**2 + 3*y**2 + z**2 - 13*x*y - 16*y*z + 12*x*z) assert is_normal_transformation_ok(x**2 + 3*y**2 - 100*z**2) assert is_normal_transformation_ok(x**2 + 23*y*z) assert is_normal_transformation_ok(3*y**2 - 100*z**2 - 12*x*y) assert is_normal_transformation_ok(x**2 + 23*x*y - 34*y*z + 12*x*z) assert is_normal_transformation_ok(z**2 + 34*x*y - 23*y*z + x*z) assert is_normal_transformation_ok(x**2 + y**2 + z**2 - x*y - y*z - x*z) assert is_normal_transformation_ok(x**2 + 2*y*z + 3*z**2) assert is_normal_transformation_ok(x*y + 2*x*z + 3*y*z) assert is_normal_transformation_ok(2*x*z + 3*y*z) def test_diop_ternary_quadratic(): assert check_solutions(2*x**2 + z**2 + y**2 - 4*x*y) assert check_solutions(x**2 - y**2 - z**2 - x*y - y*z) assert check_solutions(3*x**2 - x*y - y*z - x*z) assert check_solutions(x**2 - y*z - x*z) assert check_solutions(5*x**2 - 3*x*y - x*z) assert check_solutions(4*x**2 - 5*y**2 - x*z) assert check_solutions(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z) assert check_solutions(8*x**2 - 12*y*z) assert check_solutions(45*x**2 - 7*y**2 - 8*x*y - z**2) assert check_solutions(x**2 - 49*y**2 - z**2 + 13*z*y -8*x*y) assert check_solutions(90*x**2 + 3*y**2 + 5*x*y + 2*z*y + 5*x*z) assert check_solutions(x**2 + 3*y**2 + z**2 - x*y - 17*y*z) assert check_solutions(x**2 + 3*y**2 + z**2 - x*y - 16*y*z + 12*x*z) assert check_solutions(x**2 + 3*y**2 + z**2 - 13*x*y - 16*y*z + 12*x*z) assert check_solutions(x*y - 7*y*z + 13*x*z) assert diop_ternary_quadratic_normal(x**2 + y**2 + z**2) == (None, None, None) assert diop_ternary_quadratic_normal(x**2 + y**2) is None raises(ValueError, lambda: _diop_ternary_quadratic_normal((x, y, z), {x*y: 1, x**2: 2, y**2: 3, z**2: 0})) eq = -2*x*y - 6*x*z + 7*y**2 - 3*y*z + 4*z**2 assert diop_ternary_quadratic(eq) == (7, 2, 0) assert diop_ternary_quadratic_normal(4*x**2 + 5*y**2 - z**2) == \ (1, 0, 2) assert diop_ternary_quadratic(x*y + 2*y*z) == \ (-2, 0, n1) eq = -5*x*y - 8*x*z - 3*y*z + 8*z**2 assert parametrize_ternary_quadratic(eq) == \ (64*p**2 - 24*p*q, -64*p*q + 64*q**2, 40*p*q) # this cannot be tested with diophantine because it will # factor into a product assert diop_solve(x*y + 2*y*z) == (-4*p*q, -2*n1*p**2 + 2*p**2, 2*p*q) def test_square_factor(): assert square_factor(1) == square_factor(-1) == 1 assert square_factor(0) == 1 assert square_factor(5) == square_factor(-5) == 1 assert square_factor(4) == square_factor(-4) == 2 assert square_factor(12) == square_factor(-12) == 2 assert square_factor(6) == 1 assert square_factor(18) == 3 assert square_factor(52) == 2 assert square_factor(49) == 7 assert square_factor(392) == 14 assert square_factor(factorint(-12)) == 2 def test_parametrize_ternary_quadratic(): assert check_solutions(x**2 + y**2 - z**2) assert check_solutions(x**2 + 2*x*y + z**2) assert check_solutions(234*x**2 - 65601*y**2 - z**2) assert check_solutions(3*x**2 + 2*y**2 - z**2 - 2*x*y + 5*y*z - 7*y*z) assert check_solutions(x**2 - y**2 - z**2) assert check_solutions(x**2 - 49*y**2 - z**2 + 13*z*y - 8*x*y) assert check_solutions(8*x*y + z**2) assert check_solutions(124*x**2 - 30*y**2 - 7729*z**2) assert check_solutions(236*x**2 - 225*y**2 - 11*x*y - 13*y*z - 17*x*z) assert check_solutions(90*x**2 + 3*y**2 + 5*x*y + 2*z*y + 5*x*z) assert check_solutions(124*x**2 - 30*y**2 - 7729*z**2) def test_no_square_ternary_quadratic(): assert check_solutions(2*x*y + y*z - 3*x*z) assert check_solutions(189*x*y - 345*y*z - 12*x*z) assert check_solutions(23*x*y + 34*y*z) assert check_solutions(x*y + y*z + z*x) assert check_solutions(23*x*y + 23*y*z + 23*x*z) def test_descent(): u = ([(13, 23), (3, -11), (41, -113), (91, -3), (1, 1), (1, -1), (17, 13), (123689, 1), (19, -570)]) for a, b in u: w, x, y = descent(a, b) assert a*x**2 + b*y**2 == w**2 # the docstring warns against bad input, so these are expected results # - can't both be negative raises(TypeError, lambda: descent(-1, -3)) # A can't be zero unless B != 1 raises(ZeroDivisionError, lambda: descent(0, 3)) # supposed to be square-free raises(TypeError, lambda: descent(4, 3)) def test_diophantine(): assert check_solutions((x - y)*(y - z)*(z - x)) assert check_solutions((x - y)*(x**2 + y**2 - z**2)) assert check_solutions((x - 3*y + 7*z)*(x**2 + y**2 - z**2)) assert check_solutions((x**2 - 3*y**2 - 1)) assert check_solutions(y**2 + 7*x*y) assert check_solutions(x**2 - 3*x*y + y**2) assert check_solutions(z*(x**2 - y**2 - 15)) assert check_solutions(x*(2*y - 2*z + 5)) assert check_solutions((x**2 - 3*y**2 - 1)*(x**2 - y**2 - 15)) assert check_solutions((x**2 - 3*y**2 - 1)*(y - 7*z)) assert check_solutions((x**2 + y**2 - z**2)*(x - 7*y - 3*z + 4*w)) # Following test case caused problems in parametric representation # But this can be solved by factroing out y. # No need to use methods for ternary quadratic equations. assert check_solutions(y**2 - 7*x*y + 4*y*z) assert check_solutions(x**2 - 2*x + 1) assert diophantine(x - y) == diophantine(Eq(x, y)) assert diophantine(3*x*pi - 2*y*pi) == set([(2*t_0, 3*t_0)]) assert diophantine(x**2 + y**2 + z**2 - 14) == set([(1, 2, 3)]) assert diophantine(x**2 + 15*x/14 - 3) == set() # test issue 11049 eq = 92*x**2 - 99*y**2 - z**2 coeff = eq.as_coefficients_dict() assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \ (9, 7, 51) assert diophantine(eq) == set([( 891*p**2 + 9*q**2, -693*p**2 - 102*p*q + 7*q**2, 5049*p**2 - 1386*p*q - 51*q**2)]) eq = 2*x**2 + 2*y**2 - z**2 coeff = eq.as_coefficients_dict() assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \ (1, 1, 2) assert diophantine(eq) == set([( 2*p**2 - q**2, -2*p**2 + 4*p*q - q**2, 4*p**2 - 4*p*q + 2*q**2)]) eq = 411*x**2+57*y**2-221*z**2 coeff = eq.as_coefficients_dict() assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \ (2021, 2645, 3066) assert diophantine(eq) == \ set([(115197*p**2 - 446641*q**2, -150765*p**2 + 1355172*p*q - 584545*q**2, 174762*p**2 - 301530*p*q + 677586*q**2)]) eq = 573*x**2+267*y**2-984*z**2 coeff = eq.as_coefficients_dict() assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \ (49, 233, 127) assert diophantine(eq) == \ set([(4361*p**2 - 16072*q**2, -20737*p**2 + 83312*p*q - 76424*q**2, 11303*p**2 - 41474*p*q + 41656*q**2)]) # this produces factors during reconstruction eq = x**2 + 3*y**2 - 12*z**2 coeff = eq.as_coefficients_dict() assert _diop_ternary_quadratic_normal((x, y, z), coeff) == \ (0, 2, 1) assert diophantine(eq) == \ set([(24*p*q, 2*p**2 - 24*q**2, p**2 + 12*q**2)]) # solvers have not been written for every type raises(NotImplementedError, lambda: diophantine(x*y**2 + 1)) # rational expressions assert diophantine(1/x) == set() assert diophantine(1/x + 1/y - S.Half) set([(6, 3), (-2, 1), (4, 4), (1, -2), (3, 6)]) def test_general_pythagorean(): from sympy.abc import a, b, c, d, e assert check_solutions(a**2 + b**2 + c**2 - d**2) assert check_solutions(a**2 + 4*b**2 + 4*c**2 - d**2) assert check_solutions(9*a**2 + 4*b**2 + 4*c**2 - d**2) assert check_solutions(9*a**2 + 4*b**2 - 25*d**2 + 4*c**2 ) assert check_solutions(9*a**2 - 16*d**2 + 4*b**2 + 4*c**2) assert check_solutions(-e**2 + 9*a**2 + 4*b**2 + 4*c**2 + 25*d**2) assert check_solutions(16*a**2 - b**2 + 9*c**2 + d**2 + 25*e**2) def test_diop_general_sum_of_squares_quick(): for i in range(3, 10): assert check_solutions(sum(i**2 for i in symbols(':%i' % i)) - i) raises(ValueError, lambda: _diop_general_sum_of_squares((x, y), 2)) assert _diop_general_sum_of_squares((x, y, z), -2) == set() eq = x**2 + y**2 + z**2 - (1 + 4 + 9) assert diop_general_sum_of_squares(eq) == \ set([(1, 2, 3)]) eq = u**2 + v**2 + x**2 + y**2 + z**2 - 1313 assert len(diop_general_sum_of_squares(eq, 3)) == 3 # issue 11016 var = symbols(':5') + (symbols('6', negative=True),) eq = Add(*[i**2 for i in var]) - 112 assert diophantine(eq) == set( [(0, 1, 1, 5, 6, -7), (1, 1, 1, 3, 6, -8), (2, 3, 3, 4, 5, -7), (0, 1, 1, 1, 3, -10), (0, 0, 4, 4, 4, -8), (1, 2, 3, 3, 5, -8), (0, 1, 2, 3, 7, -7), (2, 2, 4, 4, 6, -6), (1, 1, 3, 4, 6, -7), (0, 2, 3, 3, 3, -9), (0, 0, 2, 2, 2, -10), (1, 1, 2, 3, 4, -9), (0, 1, 1, 2, 5, -9), (0, 0, 2, 6, 6, -6), (1, 3, 4, 5, 5, -6), (0, 2, 2, 2, 6, -8), (0, 3, 3, 3, 6, -7), (0, 2, 3, 5, 5, -7), (0, 1, 5, 5, 5, -6)]) # handle negated squares with signsimp assert diophantine(12 - x**2 - y**2 - z**2) == set([(2, 2, 2)]) # diophantine handles simplification, so classify_diop should # not have to look for additional patterns that are removed # by diophantine eq = a**2 + b**2 + c**2 + d**2 - 4 raises(NotImplementedError, lambda: classify_diop(-eq)) def test_diop_partition(): for n in [8, 10]: for k in range(1, 8): for p in partition(n, k): assert len(p) == k assert [p for p in partition(3, 5)] == [] assert [list(p) for p in partition(3, 5, 1)] == [ [0, 0, 0, 0, 3], [0, 0, 0, 1, 2], [0, 0, 1, 1, 1]] assert list(partition(0)) == [()] assert list(partition(1, 0)) == [()] assert [list(i) for i in partition(3)] == [[1, 1, 1], [1, 2], [3]] def test_prime_as_sum_of_two_squares(): for i in [5, 13, 17, 29, 37, 41, 2341, 3557, 34841, 64601]: a, b = prime_as_sum_of_two_squares(i) assert a**2 + b**2 == i assert prime_as_sum_of_two_squares(7) is None ans = prime_as_sum_of_two_squares(800029) assert ans == (450, 773) and type(ans[0]) is int def test_sum_of_three_squares(): for i in [0, 1, 2, 34, 123, 34304595905, 34304595905394941, 343045959052344, 800, 801, 802, 803, 804, 805, 806]: a, b, c = sum_of_three_squares(i) assert a**2 + b**2 + c**2 == i assert sum_of_three_squares(7) is None assert sum_of_three_squares((4**5)*15) is None assert sum_of_three_squares(25) == (5, 0, 0) assert sum_of_three_squares(4) == (0, 0, 2) def test_sum_of_four_squares(): from random import randint # this should never fail n = randint(1, 100000000000000) assert sum(i**2 for i in sum_of_four_squares(n)) == n assert sum_of_four_squares(0) == (0, 0, 0, 0) assert sum_of_four_squares(14) == (0, 1, 2, 3) assert sum_of_four_squares(15) == (1, 1, 2, 3) assert sum_of_four_squares(18) == (1, 2, 2, 3) assert sum_of_four_squares(19) == (0, 1, 3, 3) assert sum_of_four_squares(48) == (0, 4, 4, 4) def test_power_representation(): tests = [(1729, 3, 2), (234, 2, 4), (2, 1, 2), (3, 1, 3), (5, 2, 2), (12352, 2, 4), (32760, 2, 3)] for test in tests: n, p, k = test f = power_representation(n, p, k) while True: try: l = next(f) assert len(l) == k chk_sum = 0 for l_i in l: chk_sum = chk_sum + l_i**p assert chk_sum == n except StopIteration: break assert list(power_representation(20, 2, 4, True)) == \ [(1, 1, 3, 3), (0, 0, 2, 4)] raises(ValueError, lambda: list(power_representation(1.2, 2, 2))) raises(ValueError, lambda: list(power_representation(2, 0, 2))) raises(ValueError, lambda: list(power_representation(2, 2, 0))) assert list(power_representation(-1, 2, 2)) == [] assert list(power_representation(1, 1, 1)) == [(1,)] assert list(power_representation(3, 2, 1)) == [] assert list(power_representation(4, 2, 1)) == [(2,)] assert list(power_representation(3**4, 4, 6, zeros=True)) == \ [(1, 2, 2, 2, 2, 2), (0, 0, 0, 0, 0, 3)] assert list(power_representation(3**4, 4, 5, zeros=False)) == [] assert list(power_representation(-2, 3, 2)) == [(-1, -1)] assert list(power_representation(-2, 4, 2)) == [] assert list(power_representation(0, 3, 2, True)) == [(0, 0)] assert list(power_representation(0, 3, 2, False)) == [] # when we are dealing with squares, do feasibility checks assert len(list(power_representation(4**10*(8*10 + 7), 2, 3))) == 0 # there will be a recursion error if these aren't recognized big = 2**30 for i in [13, 10, 7, 5, 4, 2, 1]: assert list(sum_of_powers(big, 2, big - i)) == [] def test_assumptions(): """ Test whether diophantine respects the assumptions. """ #Test case taken from the below so question regarding assumptions in diophantine module #http://stackoverflow.com/questions/23301941/how-can-i-declare-natural-symbols-with-sympy m, n = symbols('m n', integer=True, positive=True) diof = diophantine(n ** 2 + m * n - 500) assert diof == set([(5, 20), (40, 10), (95, 5), (121, 4), (248, 2), (499, 1)]) a, b = symbols('a b', integer=True, positive=False) diof = diophantine(a*b + 2*a + 3*b - 6) assert diof == set([(-15, -3), (-9, -4), (-7, -5), (-6, -6), (-5, -8), (-4, -14)]) def check_solutions(eq): """ Determines whether solutions returned by diophantine() satisfy the original equation. Hope to generalize this so we can remove functions like check_ternay_quadratic, check_solutions_normal, check_solutions() """ s = diophantine(eq) factors = Mul.make_args(eq) var = list(eq.free_symbols) var.sort(key=default_sort_key) while s: solution = s.pop() for f in factors: if diop_simplify(f.subs(zip(var, solution))) == 0: break else: return False return True def test_diopcoverage(): eq = (2*x + y + 1)**2 assert diop_solve(eq) == set([(t_0, -2*t_0 - 1)]) eq = 2*x**2 + 6*x*y + 12*x + 4*y**2 + 18*y + 18 assert diop_solve(eq) == set([(t_0, -t_0 - 3), (2*t_0 - 3, -t_0)]) assert diop_quadratic(x + y**2 - 3) == set([(-t**2 + 3, -t)]) assert diop_linear(x + y - 3) == (t_0, 3 - t_0) assert base_solution_linear(0, 1, 2, t=None) == (0, 0) ans = (3*t - 1, -2*t + 1) assert base_solution_linear(4, 8, 12, t) == ans assert base_solution_linear(4, 8, 12, t=None) == tuple(_.subs(t, 0) for _ in ans) assert cornacchia(1, 1, 20) is None assert cornacchia(1, 1, 5) == set([(1, 2)]) assert cornacchia(1, 2, 17) == set([(3, 2)]) raises(ValueError, lambda: reconstruct(4, 20, 1)) assert gaussian_reduce(4, 1, 3) == (1, 1) eq = -w**2 - x**2 - y**2 + z**2 assert diop_general_pythagorean(eq) == \ diop_general_pythagorean(-eq) == \ (m1**2 + m2**2 - m3**2, 2*m1*m3, 2*m2*m3, m1**2 + m2**2 + m3**2) assert check_param(S(3) + x/3, S(4) + x/2, S(2), x) == (None, None) assert check_param(S(3)/2, S(4) + x, S(2), x) == (None, None) assert check_param(S(4) + x, S(3)/2, S(2), x) == (None, None) assert _nint_or_floor(16, 10) == 2 assert _odd(1) == (not _even(1)) == True assert _odd(0) == (not _even(0)) == False assert _remove_gcd(2, 4, 6) == (1, 2, 3) raises(TypeError, lambda: _remove_gcd((2, 4, 6))) assert sqf_normal(2 * 3**2 * 5, 2 * 5 * 11, 2 * 7**2 * 11) == \ (11, 1, 5) # it's ok if these pass some day when the solvers are implemented raises(NotImplementedError, lambda: diophantine(x**2 + y**2 + x*y + 2*y*z - 12)) raises(NotImplementedError, lambda: diophantine(x**3 + y**2)) def test_holzer(): # if the input is good, don't let it diverge in holzer() # (but see test_fail_holzer below) assert holzer(2, 7, 13, 4, 79, 23) == (2, 7, 13) # None in uv condition met; solution is not Holzer reduced # so this will hopefully change but is here for coverage assert holzer(2, 6, 2, 1, 1, 10) == (2, 6, 2) raises(ValueError, lambda: holzer(2, 7, 14, 4, 79, 23)) @XFAIL def test_fail_holzer(): eq = lambda x, y, z: a*x**2 + b*y**2 - c*z**2 a, b, c = 4, 79, 23 x, y, z = xyz = 26, 1, 11 X, Y, Z = ans = 2, 7, 13 assert eq(*xyz) == 0 assert eq(*ans) == 0 assert max(a*x**2, b*y**2, c*z**2) <= a*b*c assert max(a*X**2, b*Y**2, c*Z**2) <= a*b*c h = holzer(x, y, z, a, b, c) assert h == ans # it would be nice to get the smaller soln def test_issue_9539(): assert diophantine(6*w + 9*y + 20*x - z) == \ set([(t_0, t_1, t_1 + t_2, 6*t_0 + 29*t_1 + 9*t_2)]) def test_issue_8943(): assert diophantine( (3*(x**2 + y**2 + z**2) - 14*(x*y + y*z + z*x))) == \ set([(0, 0, 0)]) def test_diop_sum_of_even_powers(): eq = x**4 + y**4 + z**4 - 2673 assert diop_solve(eq) == set([(3, 6, 6), (2, 4, 7)]) assert diop_general_sum_of_even_powers(eq, 2) == set( [(3, 6, 6), (2, 4, 7)]) raises(NotImplementedError, lambda: diop_general_sum_of_even_powers(-eq, 2)) neg = symbols('neg', negative=True) eq = x**4 + y**4 + neg**4 - 2673 assert diop_general_sum_of_even_powers(eq) == set([(-3, 6, 6)]) assert diophantine(x**4 + y**4 + 2) == set() assert diop_general_sum_of_even_powers(x**4 + y**4 - 2, limit=0) == set() def test_sum_of_squares_powers(): tru = set([ (0, 0, 1, 1, 11), (0, 0, 5, 7, 7), (0, 1, 3, 7, 8), (0, 1, 4, 5, 9), (0, 3, 4, 7, 7), (0, 3, 5, 5, 8), (1, 1, 2, 6, 9), (1, 1, 6, 6, 7), (1, 2, 3, 3, 10), (1, 3, 4, 4, 9), (1, 5, 5, 6, 6), (2, 2, 3, 5, 9), (2, 3, 5, 6, 7), (3, 3, 4, 5, 8)]) eq = u**2 + v**2 + x**2 + y**2 + z**2 - 123 ans = diop_general_sum_of_squares(eq, oo) # allow oo to be used assert len(ans) == 14 raises(ValueError, lambda: list(sum_of_squares(10, -1))) assert list(sum_of_squares(-10, 2)) == [] assert list(sum_of_squares(2, 3)) == [] assert list(sum_of_squares(0, 3, True)) == [(0, 0, 0)] assert list(sum_of_squares(0, 3)) == [] assert list(sum_of_squares(4, 1)) == [(2,)] assert list(sum_of_squares(5, 1)) == [] assert list(sum_of_squares(50, 2)) == [(5, 5), (1, 7)] assert list(sum_of_squares(11, 5, True)) == [ (1, 1, 1, 2, 2), (0, 0, 1, 1, 3)] assert list(sum_of_squares(8, 8)) == [(1, 1, 1, 1, 1, 1, 1, 1)] assert [len(list(sum_of_squares(i, 5, True))) for i in range(30)] == [ 1, 1, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 2, 3, 2, 1, 3, 3, 3, 3, 4, 3, 3, 2, 2, 4, 4, 4, 4, 5] assert [len(list(sum_of_squares(i, 5))) for i in range(30)] == [ 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 3] for i in range(30): s1 = set(sum_of_squares(i, 5, True)) assert not s1 or all(sum(j**2 for j in t) == i for t in s1) s2 = set(sum_of_squares(i, 5)) assert all(sum(j**2 for j in t) == i for t in s2) raises(ValueError, lambda: list(sum_of_powers(2, -1, 1))) raises(ValueError, lambda: list(sum_of_powers(2, 1, -1))) assert list(sum_of_powers(-2, 3, 2)) == [(-1, -1)] assert list(sum_of_powers(-2, 4, 2)) == [] assert list(sum_of_powers(2, 1, 1)) == [(2,)] assert list(sum_of_powers(2, 1, 3, True)) == [(0, 0, 2), (0, 1, 1)] assert list(sum_of_powers(5, 1, 2, True)) == [(0, 5), (1, 4), (2, 3)] assert list(sum_of_powers(6, 2, 2)) == [] assert list(sum_of_powers(3**5, 3, 1)) == [] assert list(sum_of_powers(3**6, 3, 1)) == [(9,)] and (9**3 == 3**6) assert list(sum_of_powers(2**1000, 5, 2)) == [] def test__can_do_sum_of_squares(): assert _can_do_sum_of_squares(3, -1) is False assert _can_do_sum_of_squares(-3, 1) is False assert _can_do_sum_of_squares(0, 1) assert _can_do_sum_of_squares(4, 1) assert _can_do_sum_of_squares(1, 2) assert _can_do_sum_of_squares(2, 2) assert _can_do_sum_of_squares(3, 2) is False def test_issue_9538(): eq = x - 3*y + 2 assert diophantine(eq, syms=[y,x]) == set([(t_0, 3*t_0 - 2)]) raises(TypeError, lambda: diophantine(eq, syms=set([y,x])))
unknown
codeparrot/codeparrot-clean
# (c) 2016, Matt Martz <matt@sivel.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'json' def __init__(self, display=None): super(CallbackModule, self).__init__(display) self.results = [] def _new_play(self, play): return { 'play': { 'name': play.name, 'id': str(play._uuid) }, 'tasks': [] } def _new_task(self, task): return { 'task': { 'name': task.name, 'id': str(task._uuid) }, 'hosts': {} } def v2_playbook_on_play_start(self, play): self.results.append(self._new_play(play)) def v2_playbook_on_task_start(self, task, is_conditional): self.results[-1]['tasks'].append(self._new_task(task)) def v2_runner_on_ok(self, result, **kwargs): host = result._host self.results[-1]['tasks'][-1]['hosts'][host.name] = result._result def v2_playbook_on_stats(self, stats): """Display info about playbook statistics""" hosts = sorted(stats.processed.keys()) summary = {} for h in hosts: s = stats.summarize(h) summary[h] = s output = { 'plays': self.results, 'stats': summary } print(json.dumps(output, indent=4, sort_keys=True)) v2_runner_on_failed = v2_runner_on_ok v2_runner_on_unreachable = v2_runner_on_ok v2_runner_on_skipped = v2_runner_on_ok
unknown
codeparrot/codeparrot-clean
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Counts words in UTF8 encoded, '\n' delimited text received from the network every second. Usage: network_wordcount.py <hostname> <port> <hostname> and <port> describe the TCP server that Spark Streaming would connect to receive data. To run this on your local machine, you need to first run a Netcat server `$ nc -lk 9999` and then run the example `$ bin/spark-submit examples/src/main/python/streaming/network_wordcount.py localhost 9999` """ from __future__ import print_function import sys from pyspark import SparkContext from pyspark.streaming import StreamingContext if __name__ == "__main__": if len(sys.argv) != 3: print("Usage: network_wordcount.py <hostname> <port>", file=sys.stderr) exit(-1) sc = SparkContext(appName="PythonStreamingNetworkWordCount") ssc = StreamingContext(sc, 1) lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2])) counts = lines.flatMap(lambda line: line.split(" "))\ .map(lambda word: (word, 1))\ .reduceByKey(lambda a, b: a+b) counts.pprint() ssc.start() ssc.awaitTermination()
unknown
codeparrot/codeparrot-clean
import functools import json import logging import random import re import string # pylint: disable=deprecated-module import fnmatch import unicodedata import urllib from textwrap import dedent from external_auth.models import ExternalAuthMap from external_auth.djangostore import DjangoOpenIDStore from django.conf import settings from django.contrib.auth import REDIRECT_FIELD_NAME, authenticate, login from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.core.validators import validate_email from django.core.exceptions import ValidationError if settings.FEATURES.get('AUTH_USE_CAS'): from django_cas.views import login as django_cas_login from student.helpers import get_next_url_for_login_page from student.models import UserProfile from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden from django.utils.http import urlquote, is_safe_url from django.shortcuts import redirect from django.utils.translation import ugettext as _ from edxmako.shortcuts import render_to_response, render_to_string try: from django.views.decorators.csrf import csrf_exempt except ImportError: from django.contrib.csrf.middleware import csrf_exempt from django.views.decorators.csrf import ensure_csrf_cookie import django_openid_auth.views as openid_views from django_openid_auth import auth as openid_auth from openid.consumer.consumer import SUCCESS from openid.server.server import Server, ProtocolError, UntrustedReturnURL from openid.server.trustroot import TrustRoot from openid.extensions import ax, sreg from ratelimitbackend.exceptions import RateLimitException import student.views from xmodule.modulestore.django import modulestore from opaque_keys.edx.locations import SlashSeparatedCourseKey log = logging.getLogger("edx.external_auth") AUDIT_LOG = logging.getLogger("audit") SHIBBOLETH_DOMAIN_PREFIX = settings.SHIBBOLETH_DOMAIN_PREFIX OPENID_DOMAIN_PREFIX = settings.OPENID_DOMAIN_PREFIX # ----------------------------------------------------------------------------- # OpenID Common # ----------------------------------------------------------------------------- @csrf_exempt def default_render_failure(request, message, status=403, template_name='extauth_failure.html', exception=None): """Render an Openid error page to the user""" log.debug("In openid_failure " + message) data = render_to_string(template_name, dict(message=message, exception=exception)) return HttpResponse(data, status=status) # ----------------------------------------------------------------------------- # OpenID Authentication # ----------------------------------------------------------------------------- def generate_password(length=12, chars=string.letters + string.digits): """Generate internal password for externally authenticated user""" choice = random.SystemRandom().choice return ''.join([choice(chars) for _i in range(length)]) @csrf_exempt def openid_login_complete(request, redirect_field_name=REDIRECT_FIELD_NAME, render_failure=None): """Complete the openid login process""" render_failure = (render_failure or default_render_failure) openid_response = openid_views.parse_openid_response(request) if not openid_response: return render_failure(request, 'This is an OpenID relying party endpoint.') if openid_response.status == SUCCESS: external_id = openid_response.identity_url oid_backend = openid_auth.OpenIDBackend() details = oid_backend._extract_user_details(openid_response) log.debug('openid success, details=%s', details) url = getattr(settings, 'OPENID_SSO_SERVER_URL', None) external_domain = "{0}{1}".format(OPENID_DOMAIN_PREFIX, url) fullname = '%s %s' % (details.get('first_name', ''), details.get('last_name', '')) return _external_login_or_signup( request, external_id, external_domain, details, details.get('email', ''), fullname, retfun=functools.partial(redirect, get_next_url_for_login_page(request)), ) return render_failure(request, 'Openid failure') def _external_login_or_signup(request, external_id, external_domain, credentials, email, fullname, retfun=None): """Generic external auth login or signup""" # see if we have a map from this external_id to an edX username try: eamap = ExternalAuthMap.objects.get(external_id=external_id, external_domain=external_domain) log.debug(u'Found eamap=%s', eamap) except ExternalAuthMap.DoesNotExist: # go render form for creating edX user eamap = ExternalAuthMap(external_id=external_id, external_domain=external_domain, external_credentials=json.dumps(credentials)) eamap.external_email = email eamap.external_name = fullname eamap.internal_password = generate_password() log.debug(u'Created eamap=%s', eamap) eamap.save() log.info(u"External_Auth login_or_signup for %s : %s : %s : %s", external_domain, external_id, email, fullname) uses_shibboleth = settings.FEATURES.get('AUTH_USE_SHIB') and external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX) uses_certs = settings.FEATURES.get('AUTH_USE_CERTIFICATES') internal_user = eamap.user if internal_user is None: if uses_shibboleth: # If we are using shib, try to link accounts # For Stanford shib, the email the idp returns is actually under the control of the user. # Since the id the idps return is not user-editable, and is of the from "username@stanford.edu", # use the id to link accounts instead. try: link_user = User.objects.get(email=eamap.external_id) if not ExternalAuthMap.objects.filter(user=link_user).exists(): # if there's no pre-existing linked eamap, we link the user eamap.user = link_user eamap.save() internal_user = link_user log.info(u'SHIB: Linking existing account for %s', eamap.external_id) # now pass through to log in else: # otherwise, there must have been an error, b/c we've already linked a user with these external # creds failure_msg = _( "You have already created an account using " "an external login like WebAuth or Shibboleth. " "Please contact {tech_support_email} for support." ).format( tech_support_email=settings.TECH_SUPPORT_EMAIL, ) return default_render_failure(request, failure_msg) except User.DoesNotExist: log.info(u'SHIB: No user for %s yet, doing signup', eamap.external_email) return _signup(request, eamap, retfun) else: log.info(u'No user for %s yet. doing signup', eamap.external_email) return _signup(request, eamap, retfun) # We trust shib's authentication, so no need to authenticate using the password again uname = internal_user.username if uses_shibboleth: user = internal_user # Assuming this 'AUTHENTICATION_BACKENDS' is set in settings, which I think is safe if settings.AUTHENTICATION_BACKENDS: auth_backend = settings.AUTHENTICATION_BACKENDS[0] else: auth_backend = 'django.contrib.auth.backends.ModelBackend' user.backend = auth_backend if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info(u'Linked user.id: {0} logged in via Shibboleth'.format(user.id)) else: AUDIT_LOG.info(u'Linked user "{0}" logged in via Shibboleth'.format(user.email)) elif uses_certs: # Certificates are trusted, so just link the user and log the action user = internal_user user.backend = 'django.contrib.auth.backends.ModelBackend' if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info(u'Linked user_id {0} logged in via SSL certificate'.format(user.id)) else: AUDIT_LOG.info(u'Linked user "{0}" logged in via SSL certificate'.format(user.email)) else: user = authenticate(username=uname, password=eamap.internal_password, request=request) if user is None: # we want to log the failure, but don't want to log the password attempted: if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning(u'External Auth Login failed') else: AUDIT_LOG.warning(u'External Auth Login failed for "{0}"'.format(uname)) return _signup(request, eamap, retfun) if not user.is_active: if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'): # if BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH, we trust external auth and activate any users # that aren't already active user.is_active = True user.save() if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info(u'Activating user {0} due to external auth'.format(user.id)) else: AUDIT_LOG.info(u'Activating user "{0}" due to external auth'.format(uname)) else: if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning(u'User {0} is not active after external login'.format(user.id)) else: AUDIT_LOG.warning(u'User "{0}" is not active after external login'.format(uname)) # TODO: improve error page msg = 'Account not yet activated: please look for link in your email' return default_render_failure(request, msg) login(request, user) request.session.set_expiry(0) if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info(u"Login success - user.id: {0}".format(user.id)) else: AUDIT_LOG.info(u"Login success - {0} ({1})".format(user.username, user.email)) if retfun is None: return redirect('/') return retfun() def _flatten_to_ascii(txt): """ Flattens possibly unicode txt to ascii (django username limitation) @param name: @return: the flattened txt (in the same type as was originally passed in) """ if isinstance(txt, str): txt = txt.decode('utf-8') return unicodedata.normalize('NFKD', txt).encode('ASCII', 'ignore') else: return unicode(unicodedata.normalize('NFKD', txt).encode('ASCII', 'ignore')) @ensure_csrf_cookie def _signup(request, eamap, retfun=None): """ Present form to complete for signup via external authentication. Even though the user has external credentials, he/she still needs to create an account on the edX system, and fill in the user registration form. eamap is an ExternalAuthMap object, specifying the external user for which to complete the signup. retfun is a function to execute for the return value, if immediate signup is used. That allows @ssl_login_shortcut() to work. """ # save this for use by student.views.create_account request.session['ExternalAuthMap'] = eamap if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP', ''): # do signin immediately, by calling create_account, instead of asking # student to fill in form. MIT students already have information filed. username = eamap.external_email.split('@', 1)[0] username = username.replace('.', '_') post_vars = dict(username=username, honor_code=u'true', terms_of_service=u'true') log.info(u'doing immediate signup for %s, params=%s', username, post_vars) student.views.create_account(request, post_vars) # should check return content for successful completion before if retfun is not None: return retfun() else: return redirect('/') # default conjoin name, no spaces, flattened to ascii b/c django can't handle unicode usernames, sadly # but this only affects username, not fullname username = re.sub(r'\s', '', _flatten_to_ascii(eamap.external_name), flags=re.UNICODE) context = {'has_extauth_info': True, 'show_signup_immediately': True, 'extauth_domain': eamap.external_domain, 'extauth_id': eamap.external_id, 'extauth_email': eamap.external_email, 'extauth_username': username, 'extauth_name': eamap.external_name, 'ask_for_tos': True, } # Some openEdX instances can't have terms of service for shib users, like # according to Stanford's Office of General Counsel uses_shibboleth = (settings.FEATURES.get('AUTH_USE_SHIB') and eamap.external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX)) if uses_shibboleth and settings.FEATURES.get('SHIB_DISABLE_TOS'): context['ask_for_tos'] = False # detect if full name is blank and ask for it from user context['ask_for_fullname'] = eamap.external_name.strip() == '' # validate provided mail and if it's not valid ask the user try: validate_email(eamap.external_email) context['ask_for_email'] = False except ValidationError: context['ask_for_email'] = True log.info(u'EXTAUTH: Doing signup for %s', eamap.external_id) return student.views.register_user(request, extra_context=context) # ----------------------------------------------------------------------------- # MIT SSL # ----------------------------------------------------------------------------- def _ssl_dn_extract_info(dn_string): """ Extract username, email address (may be anyuser@anydomain.com) and full name from the SSL DN string. Return (user,email,fullname) if successful, and None otherwise. """ ss = re.search('/emailAddress=(.*)@([^/]+)', dn_string) if ss: user = ss.group(1) email = "%s@%s" % (user, ss.group(2)) else: return None ss = re.search('/CN=([^/]+)/', dn_string) if ss: fullname = ss.group(1) else: return None return (user, email, fullname) def ssl_get_cert_from_request(request): """ Extract user information from certificate, if it exists, returning (user, email, fullname). Else return None. """ certkey = "SSL_CLIENT_S_DN" # specify the request.META field to use cert = request.META.get(certkey, '') if not cert: cert = request.META.get('HTTP_' + certkey, '') if not cert: try: # try the direct apache2 SSL key cert = request._req.subprocess_env.get(certkey, '') except Exception: return '' return cert def ssl_login_shortcut(fn): """ Python function decorator for login procedures, to allow direct login based on existing ExternalAuth record and MIT ssl certificate. """ def wrapped(*args, **kwargs): """ This manages the function wrapping, by determining whether to inject the _external signup or just continuing to the internal function call. """ if not settings.FEATURES['AUTH_USE_CERTIFICATES']: return fn(*args, **kwargs) request = args[0] if request.user and request.user.is_authenticated(): # don't re-authenticate return fn(*args, **kwargs) cert = ssl_get_cert_from_request(request) if not cert: # no certificate information - show normal login window return fn(*args, **kwargs) def retfun(): """Wrap function again for call by _external_login_or_signup""" return fn(*args, **kwargs) (_user, email, fullname) = _ssl_dn_extract_info(cert) return _external_login_or_signup( request, external_id=email, external_domain="ssl:MIT", credentials=cert, email=email, fullname=fullname, retfun=retfun ) return wrapped @csrf_exempt def ssl_login(request): """ This is called by branding.views.index when FEATURES['AUTH_USE_CERTIFICATES'] = True Used for MIT user authentication. This presumes the web server (nginx) has been configured to require specific client certificates. If the incoming protocol is HTTPS (SSL) then authenticate via client certificate. The certificate provides user email and fullname; this populates the ExternalAuthMap. The user is nevertheless still asked to complete the edX signup. Else continues on with student.views.index, and no authentication. """ # Just to make sure we're calling this only at MIT: if not settings.FEATURES['AUTH_USE_CERTIFICATES']: return HttpResponseForbidden() cert = ssl_get_cert_from_request(request) if not cert: # no certificate information - go onward to main index return student.views.index(request) (_user, email, fullname) = _ssl_dn_extract_info(cert) redirect_to = get_next_url_for_login_page(request) retfun = functools.partial(redirect, redirect_to) return _external_login_or_signup( request, external_id=email, external_domain="ssl:MIT", credentials=cert, email=email, fullname=fullname, retfun=retfun ) # ----------------------------------------------------------------------------- # CAS (Central Authentication Service) # ----------------------------------------------------------------------------- def cas_login(request, next_page=None, required=False): """ Uses django_cas for authentication. CAS is a common authentcation method pioneered by Yale. See http://en.wikipedia.org/wiki/Central_Authentication_Service Does normal CAS login then generates user_profile if nonexistent, and if login was successful. We assume that user details are maintained by the central service, and thus an empty user profile is appropriate. """ ret = django_cas_login(request, next_page, required) if request.user.is_authenticated(): user = request.user if not UserProfile.objects.filter(user=user): user_profile = UserProfile(name=user.username, user=user) user_profile.save() return ret # ----------------------------------------------------------------------------- # Shibboleth (Stanford and others. Uses *Apache* environment variables) # ----------------------------------------------------------------------------- def shib_login(request): """ Uses Apache's REMOTE_USER environment variable as the external id. This in turn typically uses EduPersonPrincipalName http://www.incommonfederation.org/attributesummary.html#eduPersonPrincipal but the configuration is in the shibboleth software. """ shib_error_msg = _(dedent( """ Your university identity server did not return your ID information to us. Please try logging in again. (You may need to restart your browser.) """)) if not request.META.get('REMOTE_USER'): log.error(u"SHIB: no REMOTE_USER found in request.META") return default_render_failure(request, shib_error_msg) elif not request.META.get('Shib-Identity-Provider'): log.error(u"SHIB: no Shib-Identity-Provider in request.META") return default_render_failure(request, shib_error_msg) else: # If we get here, the user has authenticated properly shib = {attr: request.META.get(attr, '').decode('utf-8') for attr in ['REMOTE_USER', 'givenName', 'sn', 'mail', 'Shib-Identity-Provider', 'displayName']} # Clean up first name, last name, and email address # TODO: Make this less hardcoded re: format, but split will work # even if ";" is not present, since we are accessing 1st element shib['sn'] = shib['sn'].split(";")[0].strip().capitalize() shib['givenName'] = shib['givenName'].split(";")[0].strip().capitalize() # TODO: should we be logging creds here, at info level? log.info(u"SHIB creds returned: %r", shib) fullname = shib['displayName'] if shib['displayName'] else u'%s %s' % (shib['givenName'], shib['sn']) redirect_to = get_next_url_for_login_page(request) retfun = functools.partial(_safe_postlogin_redirect, redirect_to, request.get_host()) return _external_login_or_signup( request, external_id=shib['REMOTE_USER'], external_domain=SHIBBOLETH_DOMAIN_PREFIX + shib['Shib-Identity-Provider'], credentials=shib, email=shib['mail'], fullname=fullname, retfun=retfun ) def _safe_postlogin_redirect(redirect_to, safehost, default_redirect='/'): """ If redirect_to param is safe (not off this host), then perform the redirect. Otherwise just redirect to '/'. Basically copied from django.contrib.auth.views.login @param redirect_to: user-supplied redirect url @param safehost: which host is safe to redirect to @return: an HttpResponseRedirect """ if is_safe_url(url=redirect_to, host=safehost): return redirect(redirect_to) return redirect(default_redirect) def course_specific_login(request, course_id): """ Dispatcher function for selecting the specific login method required by the course """ course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) course = modulestore().get_course(course_key) if not course: # couldn't find the course, will just return vanilla signin page return redirect_with_get('signin_user', request.GET) # now the dispatching conditionals. Only shib for now if ( settings.FEATURES.get('AUTH_USE_SHIB') and course.enrollment_domain and course.enrollment_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX) ): return redirect_with_get('shib-login', request.GET) # Default fallthrough to normal signin page return redirect_with_get('signin_user', request.GET) def course_specific_register(request, course_id): """ Dispatcher function for selecting the specific registration method required by the course """ course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) course = modulestore().get_course(course_key) if not course: # couldn't find the course, will just return vanilla registration page return redirect_with_get('register_user', request.GET) # now the dispatching conditionals. Only shib for now if ( settings.FEATURES.get('AUTH_USE_SHIB') and course.enrollment_domain and course.enrollment_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX) ): # shib-login takes care of both registration and login flows return redirect_with_get('shib-login', request.GET) # Default fallthrough to normal registration page return redirect_with_get('register_user', request.GET) def redirect_with_get(view_name, get_querydict, do_reverse=True): """ Helper function to carry over get parameters across redirects Using urlencode(safe='/') because the @login_required decorator generates 'next' queryparams with '/' unencoded """ if do_reverse: url = reverse(view_name) else: url = view_name if get_querydict: return redirect("%s?%s" % (url, get_querydict.urlencode(safe='/'))) return redirect(view_name) # ----------------------------------------------------------------------------- # OpenID Provider # ----------------------------------------------------------------------------- def get_xrds_url(resource, request): """ Return the XRDS url for a resource """ host = request.get_host() location = host + '/openid/provider/' + resource + '/' if request.is_secure(): return 'https://' + location else: return 'http://' + location def add_openid_simple_registration(request, response, data): sreg_data = {} sreg_request = sreg.SRegRequest.fromOpenIDRequest(request) sreg_fields = sreg_request.allRequestedFields() # if consumer requested simple registration fields, add them if sreg_fields: for field in sreg_fields: if field == 'email' and 'email' in data: sreg_data['email'] = data['email'] elif field == 'fullname' and 'fullname' in data: sreg_data['fullname'] = data['fullname'] elif field == 'nickname' and 'nickname' in data: sreg_data['nickname'] = data['nickname'] # construct sreg response sreg_response = sreg.SRegResponse.extractResponse(sreg_request, sreg_data) sreg_response.toMessage(response.fields) def add_openid_attribute_exchange(request, response, data): try: ax_request = ax.FetchRequest.fromOpenIDRequest(request) except ax.AXError: # not using OpenID attribute exchange extension pass else: ax_response = ax.FetchResponse() # if consumer requested attribute exchange fields, add them if ax_request and ax_request.requested_attributes: for type_uri in ax_request.requested_attributes.iterkeys(): email_schema = 'http://axschema.org/contact/email' name_schema = 'http://axschema.org/namePerson' if type_uri == email_schema and 'email' in data: ax_response.addValue(email_schema, data['email']) elif type_uri == name_schema and 'fullname' in data: ax_response.addValue(name_schema, data['fullname']) # construct ax response ax_response.toMessage(response.fields) def provider_respond(server, request, response, data): """ Respond to an OpenID request """ # get and add extensions add_openid_simple_registration(request, response, data) add_openid_attribute_exchange(request, response, data) # create http response from OpenID response webresponse = server.encodeResponse(response) http_response = HttpResponse(webresponse.body) http_response.status_code = webresponse.code # add OpenID headers to response for k, v in webresponse.headers.iteritems(): http_response[k] = v return http_response def validate_trust_root(openid_request): """ Only allow OpenID requests from valid trust roots """ trusted_roots = getattr(settings, 'OPENID_PROVIDER_TRUSTED_ROOT', None) if not trusted_roots: # not using trusted roots return True # don't allow empty trust roots if (not hasattr(openid_request, 'trust_root') or not openid_request.trust_root): log.error('no trust_root') return False # ensure trust root parses cleanly (one wildcard, of form *.foo.com, etc.) trust_root = TrustRoot.parse(openid_request.trust_root) if not trust_root: log.error('invalid trust_root') return False # don't allow empty return tos if (not hasattr(openid_request, 'return_to') or not openid_request.return_to): log.error('empty return_to') return False # ensure return to is within trust root if not trust_root.validateURL(openid_request.return_to): log.error('invalid return_to') return False # check that the root matches the ones we trust if not any(r for r in trusted_roots if fnmatch.fnmatch(trust_root, r)): log.error('non-trusted root') return False return True @csrf_exempt def provider_login(request): """ OpenID login endpoint """ # make and validate endpoint endpoint = get_xrds_url('login', request) if not endpoint: return default_render_failure(request, "Invalid OpenID request") # initialize store and server store = DjangoOpenIDStore() server = Server(store, endpoint) # first check to see if the request is an OpenID request. # If so, the client will have specified an 'openid.mode' as part # of the request. querydict = dict(request.REQUEST.items()) error = False if 'openid.mode' in request.GET or 'openid.mode' in request.POST: # decode request try: openid_request = server.decodeRequest(querydict) except (UntrustedReturnURL, ProtocolError): openid_request = None if not openid_request: return default_render_failure(request, "Invalid OpenID request") # don't allow invalid and non-trusted trust roots if not validate_trust_root(openid_request): return default_render_failure(request, "Invalid OpenID trust root") # checkid_immediate not supported, require user interaction if openid_request.mode == 'checkid_immediate': return provider_respond(server, openid_request, openid_request.answer(False), {}) # checkid_setup, so display login page # (by falling through to the provider_login at the # bottom of this method). elif openid_request.mode == 'checkid_setup': if openid_request.idSelect(): # remember request and original path request.session['openid_setup'] = { 'request': openid_request, 'url': request.get_full_path(), 'post_params': request.POST, } # user failed login on previous attempt if 'openid_error' in request.session: error = True del request.session['openid_error'] # OpenID response else: return provider_respond(server, openid_request, server.handleRequest(openid_request), {}) # handle login redirection: these are also sent to this view function, # but are distinguished by lacking the openid mode. We also know that # they are posts, because they come from the popup elif request.method == 'POST' and 'openid_setup' in request.session: # get OpenID request from session openid_setup = request.session['openid_setup'] openid_request = openid_setup['request'] openid_request_url = openid_setup['url'] post_params = openid_setup['post_params'] # We need to preserve the parameters, and the easiest way to do this is # through the URL url_post_params = { param: post_params[param] for param in post_params if param.startswith('openid') } encoded_params = urllib.urlencode(url_post_params) if '?' not in openid_request_url: openid_request_url = openid_request_url + '?' + encoded_params else: openid_request_url = openid_request_url + '&' + encoded_params del request.session['openid_setup'] # don't allow invalid trust roots if not validate_trust_root(openid_request): return default_render_failure(request, "Invalid OpenID trust root") # check if user with given email exists # Failure is redirected to this method (by using the original URL), # which will bring up the login dialog. email = request.POST.get('email', None) try: user = User.objects.get(email=email) except User.DoesNotExist: request.session['openid_error'] = True if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning("OpenID login failed - Unknown user email") else: msg = "OpenID login failed - Unknown user email: {0}".format(email) AUDIT_LOG.warning(msg) return HttpResponseRedirect(openid_request_url) # attempt to authenticate user (but not actually log them in...) # Failure is again redirected to the login dialog. username = user.username password = request.POST.get('password', None) try: user = authenticate(username=username, password=password, request=request) except RateLimitException: AUDIT_LOG.warning('OpenID - Too many failed login attempts.') return HttpResponseRedirect(openid_request_url) if user is None: request.session['openid_error'] = True if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning("OpenID login failed - invalid password") else: msg = "OpenID login failed - password for {0} is invalid".format(email) AUDIT_LOG.warning(msg) return HttpResponseRedirect(openid_request_url) # authentication succeeded, so fetch user information # that was requested if user is not None and user.is_active: # remove error from session since login succeeded if 'openid_error' in request.session: del request.session['openid_error'] if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info("OpenID login success - user.id: {0}".format(user.id)) else: AUDIT_LOG.info("OpenID login success - {0} ({1})".format( user.username, user.email)) # redirect user to return_to location url = endpoint + urlquote(user.username) response = openid_request.answer(True, None, url) # Note too that this is hardcoded, and not really responding to # the extensions that were registered in the first place. results = { 'nickname': user.username, 'email': user.email, 'fullname': user.profile.name, } # the request succeeded: return provider_respond(server, openid_request, response, results) # the account is not active, so redirect back to the login page: request.session['openid_error'] = True if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.warning("Login failed - Account not active for user.id {0}".format(user.id)) else: msg = "Login failed - Account not active for user {0}".format(username) AUDIT_LOG.warning(msg) return HttpResponseRedirect(openid_request_url) # determine consumer domain if applicable return_to = '' if 'openid.return_to' in request.REQUEST: return_to = request.REQUEST['openid.return_to'] matches = re.match(r'\w+:\/\/([\w\.-]+)', return_to) return_to = matches.group(1) # display login page response = render_to_response('provider_login.html', { 'error': error, 'return_to': return_to }) # add custom XRDS header necessary for discovery process response['X-XRDS-Location'] = get_xrds_url('xrds', request) return response def provider_identity(request): """ XRDS for identity discovery """ response = render_to_response('identity.xml', {'url': get_xrds_url('login', request)}, mimetype='text/xml') # custom XRDS header necessary for discovery process response['X-XRDS-Location'] = get_xrds_url('identity', request) return response def provider_xrds(request): """ XRDS for endpoint discovery """ response = render_to_response('xrds.xml', {'url': get_xrds_url('login', request)}, mimetype='text/xml') # custom XRDS header necessary for discovery process response['X-XRDS-Location'] = get_xrds_url('xrds', request) return response
unknown
codeparrot/codeparrot-clean
// Copyright 2021 The Cockroach Authors. // // Use of this software is governed by the CockroachDB Software License // included in the /LICENSE file. package sql import ( "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb" "github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb" "github.com/cockroachdb/cockroach/pkg/util/mon" ) // SchemaChangerState is state associated with the new schema changer. // It is used to capture the state of an ongoing schema changer in the // transaction inside extraTnState field of a `conn_executor` (or an // `internal_executor`). type SchemaChangerState struct { mode sessiondatapb.NewSchemaChangerMode state scpb.CurrentState // jobID contains the ID of the schema changer job, if it is to be created. jobID jobspb.JobID // stmts contains the SQL statements involved in the schema change. This is // the bare minimum of statement information we need for testing, but in the // future we may want sql.Statement or something. stmts []string // memAcc tracks memory usage of this schema changer state. memAcc mon.BoundAccount }
go
github
https://github.com/cockroachdb/cockroach
pkg/sql/schema_changer_state.go
/* * Copyright 2002-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.aop.target; import org.apache.commons.pool2.ObjectPool; import org.apache.commons.pool2.PooledObject; import org.apache.commons.pool2.PooledObjectFactory; import org.apache.commons.pool2.impl.DefaultPooledObject; import org.apache.commons.pool2.impl.GenericObjectPool; import org.apache.commons.pool2.impl.GenericObjectPoolConfig; import org.jspecify.annotations.Nullable; import org.springframework.util.Assert; /** * {@link org.springframework.aop.TargetSource} implementation that holds * objects in a configurable Apache Commons2 Pool. * * <p>By default, an instance of {@code GenericObjectPool} is created. * Subclasses may change the type of {@code ObjectPool} used by * overriding the {@code createObjectPool()} method. * * <p>Provides many configuration properties mirroring those of the Commons Pool * {@code GenericObjectPool} class; these properties are passed to the * {@code GenericObjectPool} during construction. If creating a subclass of this * class to change the {@code ObjectPool} implementation type, pass in the values * of configuration properties that are relevant to your chosen implementation. * * <p>The {@code testOnBorrow}, {@code testOnReturn} and {@code testWhileIdle} * properties are explicitly not mirrored because the implementation of * {@code PoolableObjectFactory} used by this class does not implement * meaningful validation. All exposed Commons Pool properties use the * corresponding Commons Pool defaults. * * @author Rod Johnson * @author Rob Harrop * @author Juergen Hoeller * @author Stephane Nicoll * @author Kazuki Shimizu * @since 4.2 * @see GenericObjectPool * @see #createObjectPool() * @see #setMaxSize * @see #setMaxIdle * @see #setMinIdle * @see #setMaxWait * @see #setTimeBetweenEvictionRunsMillis * @see #setMinEvictableIdleTimeMillis */ @SuppressWarnings({"rawtypes", "unchecked", "serial", "deprecation"}) public class CommonsPool2TargetSource extends AbstractPoolingTargetSource implements PooledObjectFactory<Object> { private int maxIdle = GenericObjectPoolConfig.DEFAULT_MAX_IDLE; private int minIdle = GenericObjectPoolConfig.DEFAULT_MIN_IDLE; private long maxWait = GenericObjectPoolConfig.DEFAULT_MAX_WAIT_MILLIS; private long timeBetweenEvictionRunsMillis = GenericObjectPoolConfig.DEFAULT_TIME_BETWEEN_EVICTION_RUNS_MILLIS; private long minEvictableIdleTimeMillis = GenericObjectPoolConfig.DEFAULT_MIN_EVICTABLE_IDLE_TIME_MILLIS; private boolean blockWhenExhausted = GenericObjectPoolConfig.DEFAULT_BLOCK_WHEN_EXHAUSTED; /** * The Apache Commons {@code ObjectPool} used to pool target objects. */ private @Nullable ObjectPool pool; /** * Create a CommonsPoolTargetSource with default settings. * Default maximum size of the pool is 8. * @see #setMaxSize * @see GenericObjectPoolConfig#setMaxTotal */ public CommonsPool2TargetSource() { setMaxSize(GenericObjectPoolConfig.DEFAULT_MAX_TOTAL); } /** * Set the maximum number of idle objects in the pool. * Default is 8. * @see GenericObjectPool#setMaxIdle */ public void setMaxIdle(int maxIdle) { this.maxIdle = maxIdle; } /** * Return the maximum number of idle objects in the pool. */ public int getMaxIdle() { return this.maxIdle; } /** * Set the minimum number of idle objects in the pool. * Default is 0. * @see GenericObjectPool#setMinIdle */ public void setMinIdle(int minIdle) { this.minIdle = minIdle; } /** * Return the minimum number of idle objects in the pool. */ public int getMinIdle() { return this.minIdle; } /** * Set the maximum waiting time for fetching an object from the pool. * Default is -1, waiting forever. * @see GenericObjectPool#setMaxWaitMillis */ public void setMaxWait(long maxWait) { this.maxWait = maxWait; } /** * Return the maximum waiting time for fetching an object from the pool. */ public long getMaxWait() { return this.maxWait; } /** * Set the time between eviction runs that check idle objects whether * they have been idle for too long or have become invalid. * Default is -1, not performing any eviction. * @see GenericObjectPool#setTimeBetweenEvictionRunsMillis */ public void setTimeBetweenEvictionRunsMillis(long timeBetweenEvictionRunsMillis) { this.timeBetweenEvictionRunsMillis = timeBetweenEvictionRunsMillis; } /** * Return the time between eviction runs that check idle objects. */ public long getTimeBetweenEvictionRunsMillis() { return this.timeBetweenEvictionRunsMillis; } /** * Set the minimum time that an idle object can sit in the pool before * it becomes subject to eviction. Default is 1800000 (30 minutes). * <p>Note that eviction runs need to be performed to take this * setting into effect. * @see #setTimeBetweenEvictionRunsMillis * @see GenericObjectPool#setMinEvictableIdleTimeMillis */ public void setMinEvictableIdleTimeMillis(long minEvictableIdleTimeMillis) { this.minEvictableIdleTimeMillis = minEvictableIdleTimeMillis; } /** * Return the minimum time that an idle object can sit in the pool. */ public long getMinEvictableIdleTimeMillis() { return this.minEvictableIdleTimeMillis; } /** * Set whether the call should block when the pool is exhausted. */ public void setBlockWhenExhausted(boolean blockWhenExhausted) { this.blockWhenExhausted = blockWhenExhausted; } /** * Specify if the call should block when the pool is exhausted. */ public boolean isBlockWhenExhausted() { return this.blockWhenExhausted; } /** * Creates and holds an ObjectPool instance. * @see #createObjectPool() */ @Override protected final void createPool() { logger.debug("Creating Commons object pool"); this.pool = createObjectPool(); } /** * Subclasses can override this if they want to return a specific Commons pool. * They should apply any configuration properties to the pool here. * <p>Default is a GenericObjectPool instance with the given pool size. * @return an empty Commons {@code ObjectPool}. * @see GenericObjectPool * @see #setMaxSize */ protected ObjectPool createObjectPool() { GenericObjectPoolConfig config = new GenericObjectPoolConfig(); config.setMaxTotal(getMaxSize()); config.setMaxIdle(getMaxIdle()); config.setMinIdle(getMinIdle()); config.setMaxWaitMillis(getMaxWait()); config.setTimeBetweenEvictionRunsMillis(getTimeBetweenEvictionRunsMillis()); config.setMinEvictableIdleTimeMillis(getMinEvictableIdleTimeMillis()); config.setBlockWhenExhausted(isBlockWhenExhausted()); return new GenericObjectPool(this, config); } /** * Borrows an object from the {@code ObjectPool}. */ @Override public Object getTarget() throws Exception { Assert.state(this.pool != null, "No Commons ObjectPool available"); return this.pool.borrowObject(); } /** * Returns the specified object to the underlying {@code ObjectPool}. */ @Override public void releaseTarget(Object target) throws Exception { if (this.pool != null) { this.pool.returnObject(target); } } @Override public int getActiveCount() throws UnsupportedOperationException { return (this.pool != null ? this.pool.getNumActive() : 0); } @Override public int getIdleCount() throws UnsupportedOperationException { return (this.pool != null ? this.pool.getNumIdle() : 0); } /** * Closes the underlying {@code ObjectPool} when destroying this object. */ @Override public void destroy() throws Exception { if (this.pool != null) { logger.debug("Closing Commons ObjectPool"); this.pool.close(); } } //---------------------------------------------------------------------------- // Implementation of org.apache.commons.pool2.PooledObjectFactory interface //---------------------------------------------------------------------------- @Override public PooledObject<Object> makeObject() throws Exception { return new DefaultPooledObject<>(newPrototypeInstance()); } @Override public void destroyObject(PooledObject<Object> p) throws Exception { destroyPrototypeInstance(p.getObject()); } @Override public boolean validateObject(PooledObject<Object> p) { return true; } @Override public void activateObject(PooledObject<Object> p) throws Exception { } @Override public void passivateObject(PooledObject<Object> p) throws Exception { } }
java
github
https://github.com/spring-projects/spring-framework
spring-aop/src/main/java/org/springframework/aop/target/CommonsPool2TargetSource.java
<!--- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --> # Apache Hadoop Changelog ## Release 0.6.1 - 2006-09-13 ### BUG FIXES: | JIRA | Summary | Priority | Component | Reporter | Contributor | |:---- |:---- | :--- |:---- |:---- |:---- | | [HADOOP-520](https://issues.apache.org/jira/browse/HADOOP-520) | libhdfs returns success even when writing to a file fails | Major | . | Christian Kunz | Arun C Murthy | | [HADOOP-523](https://issues.apache.org/jira/browse/HADOOP-523) | TextInputformat .isSplittable() fails with NullPointerException with hadoop 0.6.1 | Major | . | Sanjay Dahiya | Owen O'Malley | | [HADOOP-521](https://issues.apache.org/jira/browse/HADOOP-521) | classloader problem for clients | Major | io | Christian Kunz | Owen O'Malley | | [HADOOP-526](https://issues.apache.org/jira/browse/HADOOP-526) | datanode lock message causes NullPointerException | Major | . | Owen O'Malley | Milind Bhandarkar | | [HADOOP-529](https://issues.apache.org/jira/browse/HADOOP-529) | SequenceFile fails task with NullPointerException in codec initialization | Blocker | . | Sanjay Dahiya | Owen O'Malley |
unknown
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/site/markdown/release/0.6.1/CHANGELOG.0.6.1.md
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The Metrics module includes the different type of metrics to measure distance among RAVEN dataobjects """ from __future__ import absolute_import ## These lines ensure that we do not have to do something like: ## 'from OutStreamManagers.OutStreamPlot import OutStreamPlot' outside ## of this submodule from .Metric import Metric from .DTW import DTW from .SklMetric import SKL from .PairwiseMetric import PairwiseMetric from .CDFAreaDifference import CDFAreaDifference from .PDFCommonArea import PDFCommonArea from .ScipyMetric import ScipyMetric from .Factory import knownTypes from .Factory import returnInstance from .Factory import returnClass __all__ = ['DTW','SKL','PairwiseMetric','CDFAreaDifference','PDFCommonArea','ScipyMetric']
unknown
codeparrot/codeparrot-clean
"""Default tags used by the template system, available to all templates.""" from __future__ import unicode_literals import re import sys import warnings from datetime import datetime from itertools import cycle as itertools_cycle, groupby from django.conf import settings from django.utils import six, timezone from django.utils.encoding import force_text, smart_text from django.utils.html import conditional_escape, format_html from django.utils.lorem_ipsum import paragraphs, words from django.utils.safestring import mark_safe from .base import ( BLOCK_TAG_END, BLOCK_TAG_START, COMMENT_TAG_END, COMMENT_TAG_START, SINGLE_BRACE_END, SINGLE_BRACE_START, VARIABLE_ATTRIBUTE_SEPARATOR, VARIABLE_TAG_END, VARIABLE_TAG_START, Context, Node, NodeList, TemplateSyntaxError, VariableDoesNotExist, kwarg_re, render_value_in_context, token_kwargs, ) from .defaultfilters import date from .library import Library from .smartif import IfParser, Literal register = Library() class AutoEscapeControlNode(Node): """Implements the actions of the autoescape tag.""" def __init__(self, setting, nodelist): self.setting, self.nodelist = setting, nodelist def render(self, context): old_setting = context.autoescape context.autoescape = self.setting output = self.nodelist.render(context) context.autoescape = old_setting if self.setting: return mark_safe(output) else: return output class CommentNode(Node): def render(self, context): return '' class CsrfTokenNode(Node): def render(self, context): csrf_token = context.get('csrf_token') if csrf_token: if csrf_token == 'NOTPROVIDED': return format_html("") else: return format_html("<input type='hidden' name='csrfmiddlewaretoken' value='{}' />", csrf_token) else: # It's very probable that the token is missing because of # misconfiguration, so we raise a warning if settings.DEBUG: warnings.warn( "A {% csrf_token %} was used in a template, but the context " "did not provide the value. This is usually caused by not " "using RequestContext." ) return '' class CycleNode(Node): def __init__(self, cyclevars, variable_name=None, silent=False): self.cyclevars = cyclevars self.variable_name = variable_name self.silent = silent def render(self, context): if self not in context.render_context: # First time the node is rendered in template context.render_context[self] = itertools_cycle(self.cyclevars) cycle_iter = context.render_context[self] value = next(cycle_iter).resolve(context) if self.variable_name: context[self.variable_name] = value if self.silent: return '' return render_value_in_context(value, context) class DebugNode(Node): def render(self, context): from pprint import pformat output = [force_text(pformat(val)) for val in context] output.append('\n\n') output.append(force_text(pformat(sys.modules))) return ''.join(output) class FilterNode(Node): def __init__(self, filter_expr, nodelist): self.filter_expr, self.nodelist = filter_expr, nodelist def render(self, context): output = self.nodelist.render(context) # Apply filters. with context.push(var=output): return self.filter_expr.resolve(context) class FirstOfNode(Node): def __init__(self, variables, asvar=None): self.vars = variables self.asvar = asvar def render(self, context): for var in self.vars: value = var.resolve(context, True) if value: first = render_value_in_context(value, context) if self.asvar: context[self.asvar] = first return '' return first return '' class ForNode(Node): child_nodelists = ('nodelist_loop', 'nodelist_empty') def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None): self.loopvars, self.sequence = loopvars, sequence self.is_reversed = is_reversed self.nodelist_loop = nodelist_loop if nodelist_empty is None: self.nodelist_empty = NodeList() else: self.nodelist_empty = nodelist_empty def __repr__(self): reversed_text = ' reversed' if self.is_reversed else '' return "<For Node: for %s in %s, tail_len: %d%s>" % \ (', '.join(self.loopvars), self.sequence, len(self.nodelist_loop), reversed_text) def __iter__(self): for node in self.nodelist_loop: yield node for node in self.nodelist_empty: yield node def render(self, context): if 'forloop' in context: parentloop = context['forloop'] else: parentloop = {} with context.push(): try: values = self.sequence.resolve(context, True) except VariableDoesNotExist: values = [] if values is None: values = [] if not hasattr(values, '__len__'): values = list(values) len_values = len(values) if len_values < 1: return self.nodelist_empty.render(context) nodelist = [] if self.is_reversed: values = reversed(values) num_loopvars = len(self.loopvars) unpack = num_loopvars > 1 # Create a forloop value in the context. We'll update counters on each # iteration just below. loop_dict = context['forloop'] = {'parentloop': parentloop} for i, item in enumerate(values): # Shortcuts for current loop iteration number. loop_dict['counter0'] = i loop_dict['counter'] = i + 1 # Reverse counter iteration numbers. loop_dict['revcounter'] = len_values - i loop_dict['revcounter0'] = len_values - i - 1 # Boolean values designating first and last times through loop. loop_dict['first'] = (i == 0) loop_dict['last'] = (i == len_values - 1) pop_context = False if unpack: # If there are multiple loop variables, unpack the item into # them. # To complete this deprecation, remove from here to the # try/except block as well as the try/except itself, # leaving `unpacked_vars = ...` and the "else" statements. if not isinstance(item, (list, tuple)): len_item = 1 else: len_item = len(item) # Check loop variable count before unpacking if num_loopvars != len_item: raise ValueError( "Need {} values to unpack in for loop; got {}. " .format(num_loopvars, len_item), ) try: unpacked_vars = dict(zip(self.loopvars, item)) except TypeError: pass else: pop_context = True context.update(unpacked_vars) else: context[self.loopvars[0]] = item for node in self.nodelist_loop: nodelist.append(node.render_annotated(context)) if pop_context: # The loop variables were pushed on to the context so pop them # off again. This is necessary because the tag lets the length # of loopvars differ to the length of each set of items and we # don't want to leave any vars from the previous loop on the # context. context.pop() return mark_safe(''.join(force_text(n) for n in nodelist)) class IfChangedNode(Node): child_nodelists = ('nodelist_true', 'nodelist_false') def __init__(self, nodelist_true, nodelist_false, *varlist): self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false self._varlist = varlist def render(self, context): # Init state storage state_frame = self._get_context_stack_frame(context) if self not in state_frame: state_frame[self] = None nodelist_true_output = None try: if self._varlist: # Consider multiple parameters. This automatically behaves # like an OR evaluation of the multiple variables. compare_to = [var.resolve(context, True) for var in self._varlist] else: # The "{% ifchanged %}" syntax (without any variables) compares the rendered output. compare_to = nodelist_true_output = self.nodelist_true.render(context) except VariableDoesNotExist: compare_to = None if compare_to != state_frame[self]: state_frame[self] = compare_to # render true block if not already rendered return nodelist_true_output or self.nodelist_true.render(context) elif self.nodelist_false: return self.nodelist_false.render(context) return '' def _get_context_stack_frame(self, context): # The Context object behaves like a stack where each template tag can create a new scope. # Find the place where to store the state to detect changes. if 'forloop' in context: # Ifchanged is bound to the local for loop. # When there is a loop-in-loop, the state is bound to the inner loop, # so it resets when the outer loop continues. return context['forloop'] else: # Using ifchanged outside loops. Effectively this is a no-op because the state is associated with 'self'. return context.render_context class IfEqualNode(Node): child_nodelists = ('nodelist_true', 'nodelist_false') def __init__(self, var1, var2, nodelist_true, nodelist_false, negate): self.var1, self.var2 = var1, var2 self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false self.negate = negate def __repr__(self): return "<IfEqualNode>" def render(self, context): val1 = self.var1.resolve(context, True) val2 = self.var2.resolve(context, True) if (self.negate and val1 != val2) or (not self.negate and val1 == val2): return self.nodelist_true.render(context) return self.nodelist_false.render(context) class IfNode(Node): def __init__(self, conditions_nodelists): self.conditions_nodelists = conditions_nodelists def __repr__(self): return "<IfNode>" def __iter__(self): for _, nodelist in self.conditions_nodelists: for node in nodelist: yield node @property def nodelist(self): return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist) def render(self, context): for condition, nodelist in self.conditions_nodelists: if condition is not None: # if / elif clause try: match = condition.eval(context) except VariableDoesNotExist: match = None else: # else clause match = True if match: return nodelist.render(context) return '' class LoremNode(Node): def __init__(self, count, method, common): self.count, self.method, self.common = count, method, common def render(self, context): try: count = int(self.count.resolve(context)) except (ValueError, TypeError): count = 1 if self.method == 'w': return words(count, common=self.common) else: paras = paragraphs(count, common=self.common) if self.method == 'p': paras = ['<p>%s</p>' % p for p in paras] return '\n\n'.join(paras) class RegroupNode(Node): def __init__(self, target, expression, var_name): self.target, self.expression = target, expression self.var_name = var_name def resolve_expression(self, obj, context): # This method is called for each object in self.target. See regroup() # for the reason why we temporarily put the object in the context. context[self.var_name] = obj return self.expression.resolve(context, True) def render(self, context): obj_list = self.target.resolve(context, True) if obj_list is None: # target variable wasn't found in context; fail silently. context[self.var_name] = [] return '' # List of dictionaries in the format: # {'grouper': 'key', 'list': [list of contents]}. context[self.var_name] = [ {'grouper': key, 'list': list(val)} for key, val in groupby(obj_list, lambda obj: self.resolve_expression(obj, context)) ] return '' class LoadNode(Node): def render(self, context): return '' class NowNode(Node): def __init__(self, format_string, asvar=None): self.format_string = format_string self.asvar = asvar def render(self, context): tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None formatted = date(datetime.now(tz=tzinfo), self.format_string) if self.asvar: context[self.asvar] = formatted return '' else: return formatted class SpacelessNode(Node): def __init__(self, nodelist): self.nodelist = nodelist def render(self, context): from django.utils.html import strip_spaces_between_tags return strip_spaces_between_tags(self.nodelist.render(context).strip()) class TemplateTagNode(Node): mapping = {'openblock': BLOCK_TAG_START, 'closeblock': BLOCK_TAG_END, 'openvariable': VARIABLE_TAG_START, 'closevariable': VARIABLE_TAG_END, 'openbrace': SINGLE_BRACE_START, 'closebrace': SINGLE_BRACE_END, 'opencomment': COMMENT_TAG_START, 'closecomment': COMMENT_TAG_END, } def __init__(self, tagtype): self.tagtype = tagtype def render(self, context): return self.mapping.get(self.tagtype, '') class URLNode(Node): def __init__(self, view_name, args, kwargs, asvar): self.view_name = view_name self.args = args self.kwargs = kwargs self.asvar = asvar def render(self, context): from django.urls import reverse, NoReverseMatch args = [arg.resolve(context) for arg in self.args] kwargs = { smart_text(k, 'ascii'): v.resolve(context) for k, v in self.kwargs.items() } view_name = self.view_name.resolve(context) try: current_app = context.request.current_app except AttributeError: try: current_app = context.request.resolver_match.namespace except AttributeError: current_app = None # Try to look up the URL. If it fails, raise NoReverseMatch unless the # {% url ... as var %} construct is used, in which case return nothing. url = '' try: url = reverse(view_name, args=args, kwargs=kwargs, current_app=current_app) except NoReverseMatch: if self.asvar is None: raise if self.asvar: context[self.asvar] = url return '' else: if context.autoescape: url = conditional_escape(url) return url class VerbatimNode(Node): def __init__(self, content): self.content = content def render(self, context): return self.content class WidthRatioNode(Node): def __init__(self, val_expr, max_expr, max_width, asvar=None): self.val_expr = val_expr self.max_expr = max_expr self.max_width = max_width self.asvar = asvar def render(self, context): try: value = self.val_expr.resolve(context) max_value = self.max_expr.resolve(context) max_width = int(self.max_width.resolve(context)) except VariableDoesNotExist: return '' except (ValueError, TypeError): raise TemplateSyntaxError("widthratio final argument must be a number") try: value = float(value) max_value = float(max_value) ratio = (value / max_value) * max_width result = str(int(round(ratio))) except ZeroDivisionError: return '0' except (ValueError, TypeError, OverflowError): return '' if self.asvar: context[self.asvar] = result return '' else: return result class WithNode(Node): def __init__(self, var, name, nodelist, extra_context=None): self.nodelist = nodelist # var and name are legacy attributes, being left in case they are used # by third-party subclasses of this Node. self.extra_context = extra_context or {} if name: self.extra_context[name] = var def __repr__(self): return "<WithNode>" def render(self, context): values = {key: val.resolve(context) for key, val in six.iteritems(self.extra_context)} with context.push(**values): return self.nodelist.render(context) @register.tag def autoescape(parser, token): """ Force autoescape behavior for this block. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 2: raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.") arg = args[1] if arg not in ('on', 'off'): raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'") nodelist = parser.parse(('endautoescape',)) parser.delete_first_token() return AutoEscapeControlNode((arg == 'on'), nodelist) @register.tag def comment(parser, token): """ Ignores everything between ``{% comment %}`` and ``{% endcomment %}``. """ parser.skip_past('endcomment') return CommentNode() @register.tag def cycle(parser, token): """ Cycles among the given strings each time this tag is encountered. Within a loop, cycles among the given strings each time through the loop:: {% for o in some_list %} <tr class="{% cycle 'row1' 'row2' %}"> ... </tr> {% endfor %} Outside of a loop, give the values a unique name the first time you call it, then use that name each successive time through:: <tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr> <tr class="{% cycle rowcolors %}">...</tr> <tr class="{% cycle rowcolors %}">...</tr> You can use any number of values, separated by spaces. Commas can also be used to separate values; if a comma is used, the cycle values are interpreted as literal strings. The optional flag "silent" can be used to prevent the cycle declaration from returning any value:: {% for o in some_list %} {% cycle 'row1' 'row2' as rowcolors silent %} <tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr> {% endfor %} """ # Note: This returns the exact same node on each {% cycle name %} call; # that is, the node object returned from {% cycle a b c as name %} and the # one returned from {% cycle name %} are the exact same object. This # shouldn't cause problems (heh), but if it does, now you know. # # Ugly hack warning: This stuffs the named template dict into parser so # that names are only unique within each template (as opposed to using # a global variable, which would make cycle names have to be unique across # *all* templates. args = token.split_contents() if len(args) < 2: raise TemplateSyntaxError("'cycle' tag requires at least two arguments") if len(args) == 2: # {% cycle foo %} case. name = args[1] if not hasattr(parser, '_namedCycleNodes'): raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name) if name not in parser._namedCycleNodes: raise TemplateSyntaxError("Named cycle '%s' does not exist" % name) return parser._namedCycleNodes[name] as_form = False if len(args) > 4: # {% cycle ... as foo [silent] %} case. if args[-3] == "as": if args[-1] != "silent": raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1]) as_form = True silent = True args = args[:-1] elif args[-2] == "as": as_form = True silent = False if as_form: name = args[-1] values = [parser.compile_filter(arg) for arg in args[1:-2]] node = CycleNode(values, name, silent=silent) if not hasattr(parser, '_namedCycleNodes'): parser._namedCycleNodes = {} parser._namedCycleNodes[name] = node else: values = [parser.compile_filter(arg) for arg in args[1:]] node = CycleNode(values) return node @register.tag def csrf_token(parser, token): return CsrfTokenNode() @register.tag def debug(parser, token): """ Outputs a whole load of debugging information, including the current context and imported modules. Sample usage:: <pre> {% debug %} </pre> """ return DebugNode() @register.tag('filter') def do_filter(parser, token): """ Filters the contents of the block through variable filters. Filters can also be piped through each other, and they can have arguments -- just like in variable syntax. Sample usage:: {% filter force_escape|lower %} This text will be HTML-escaped, and will appear in lowercase. {% endfilter %} Note that the ``escape`` and ``safe`` filters are not acceptable arguments. Instead, use the ``autoescape`` tag to manage autoescaping for blocks of template code. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments _, rest = token.contents.split(None, 1) filter_expr = parser.compile_filter("var|%s" % (rest)) for func, unused in filter_expr.filters: filter_name = getattr(func, '_filter_name', None) if filter_name in ('escape', 'safe'): raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % filter_name) nodelist = parser.parse(('endfilter',)) parser.delete_first_token() return FilterNode(filter_expr, nodelist) @register.tag def firstof(parser, token): """ Outputs the first variable passed that is not False, without escaping. Outputs nothing if all the passed variables are False. Sample usage:: {% firstof var1 var2 var3 as myvar %} This is equivalent to:: {% if var1 %} {{ var1|safe }} {% elif var2 %} {{ var2|safe }} {% elif var3 %} {{ var3|safe }} {% endif %} but obviously much cleaner! You can also use a literal string as a fallback value in case all passed variables are False:: {% firstof var1 var2 var3 "fallback value" %} If you want to disable auto-escaping of variables you can use:: {% autoescape off %} {% firstof var1 var2 var3 "<strong>fallback value</strong>" %} {% autoescape %} Or if only some variables should be escaped, you can use:: {% firstof var1 var2|safe var3 "<strong>fallback value</strong>"|safe %} """ bits = token.split_contents()[1:] asvar = None if len(bits) < 1: raise TemplateSyntaxError("'firstof' statement requires at least one argument") if len(bits) >= 2 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] return FirstOfNode([parser.compile_filter(bit) for bit in bits], asvar) @register.tag('for') def do_for(parser, token): """ Loops over each item in an array. For example, to display a list of athletes given ``athlete_list``:: <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% endfor %} </ul> You can loop over a list in reverse by using ``{% for obj in list reversed %}``. You can also unpack multiple values from a two-dimensional array:: {% for key,value in dict.items %} {{ key }}: {{ value }} {% endfor %} The ``for`` tag can take an optional ``{% empty %}`` clause that will be displayed if the given array is empty or could not be found:: <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% empty %} <li>Sorry, no athletes in this list.</li> {% endfor %} <ul> The above is equivalent to -- but shorter, cleaner, and possibly faster than -- the following:: <ul> {% if athlete_list %} {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% endfor %} {% else %} <li>Sorry, no athletes in this list.</li> {% endif %} </ul> The for loop sets a number of variables available within the loop: ========================== ================================================ Variable Description ========================== ================================================ ``forloop.counter`` The current iteration of the loop (1-indexed) ``forloop.counter0`` The current iteration of the loop (0-indexed) ``forloop.revcounter`` The number of iterations from the end of the loop (1-indexed) ``forloop.revcounter0`` The number of iterations from the end of the loop (0-indexed) ``forloop.first`` True if this is the first time through the loop ``forloop.last`` True if this is the last time through the loop ``forloop.parentloop`` For nested loops, this is the loop "above" the current one ========================== ================================================ """ bits = token.split_contents() if len(bits) < 4: raise TemplateSyntaxError("'for' statements should have at least four" " words: %s" % token.contents) is_reversed = bits[-1] == 'reversed' in_index = -3 if is_reversed else -2 if bits[in_index] != 'in': raise TemplateSyntaxError("'for' statements should use the format" " 'for x in y': %s" % token.contents) loopvars = re.split(r' *, *', ' '.join(bits[1:in_index])) for var in loopvars: if not var or ' ' in var: raise TemplateSyntaxError("'for' tag received an invalid argument:" " %s" % token.contents) sequence = parser.compile_filter(bits[in_index + 1]) nodelist_loop = parser.parse(('empty', 'endfor',)) token = parser.next_token() if token.contents == 'empty': nodelist_empty = parser.parse(('endfor',)) parser.delete_first_token() else: nodelist_empty = None return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty) def do_ifequal(parser, token, negate): bits = list(token.split_contents()) if len(bits) != 3: raise TemplateSyntaxError("%r takes two arguments" % bits[0]) end_tag = 'end' + bits[0] nodelist_true = parser.parse(('else', end_tag)) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse((end_tag,)) parser.delete_first_token() else: nodelist_false = NodeList() val1 = parser.compile_filter(bits[1]) val2 = parser.compile_filter(bits[2]) return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate) @register.tag def ifequal(parser, token): """ Outputs the contents of the block if the two arguments equal each other. Examples:: {% ifequal user.id comment.user_id %} ... {% endifequal %} {% ifnotequal user.id comment.user_id %} ... {% else %} ... {% endifnotequal %} """ return do_ifequal(parser, token, False) @register.tag def ifnotequal(parser, token): """ Outputs the contents of the block if the two arguments are not equal. See ifequal. """ return do_ifequal(parser, token, True) class TemplateLiteral(Literal): def __init__(self, value, text): self.value = value self.text = text # for better error messages def display(self): return self.text def eval(self, context): return self.value.resolve(context, ignore_failures=True) class TemplateIfParser(IfParser): error_class = TemplateSyntaxError def __init__(self, parser, *args, **kwargs): self.template_parser = parser super(TemplateIfParser, self).__init__(*args, **kwargs) def create_var(self, value): return TemplateLiteral(self.template_parser.compile_filter(value), value) @register.tag('if') def do_if(parser, token): """ The ``{% if %}`` tag evaluates a variable, and if that variable is "true" (i.e., exists, is not empty, and is not a false boolean value), the contents of the block are output: :: {% if athlete_list %} Number of athletes: {{ athlete_list|count }} {% elif athlete_in_locker_room_list %} Athletes should be out of the locker room soon! {% else %} No athletes. {% endif %} In the above, if ``athlete_list`` is not empty, the number of athletes will be displayed by the ``{{ athlete_list|count }}`` variable. As you can see, the ``if`` tag may take one or several `` {% elif %}`` clauses, as well as an ``{% else %}`` clause that will be displayed if all previous conditions fail. These clauses are optional. ``if`` tags may use ``or``, ``and`` or ``not`` to test a number of variables or to negate a given variable:: {% if not athlete_list %} There are no athletes. {% endif %} {% if athlete_list or coach_list %} There are some athletes or some coaches. {% endif %} {% if athlete_list and coach_list %} Both athletes and coaches are available. {% endif %} {% if not athlete_list or coach_list %} There are no athletes, or there are some coaches. {% endif %} {% if athlete_list and not coach_list %} There are some athletes and absolutely no coaches. {% endif %} Comparison operators are also available, and the use of filters is also allowed, for example:: {% if articles|length >= 5 %}...{% endif %} Arguments and operators _must_ have a space between them, so ``{% if 1>2 %}`` is not a valid if tag. All supported operators are: ``or``, ``and``, ``in``, ``not in`` ``==``, ``!=``, ``>``, ``>=``, ``<`` and ``<=``. Operator precedence follows Python. """ # {% if ... %} bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists = [(condition, nodelist)] token = parser.next_token() # {% elif ... %} (repeatable) while token.contents.startswith('elif'): bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists.append((condition, nodelist)) token = parser.next_token() # {% else %} (optional) if token.contents == 'else': nodelist = parser.parse(('endif',)) conditions_nodelists.append((None, nodelist)) token = parser.next_token() # {% endif %} assert token.contents == 'endif' return IfNode(conditions_nodelists) @register.tag def ifchanged(parser, token): """ Checks if a value has changed from the last iteration of a loop. The ``{% ifchanged %}`` block tag is used within a loop. It has two possible uses. 1. Checks its own rendered contents against its previous state and only displays the content if it has changed. For example, this displays a list of days, only displaying the month if it changes:: <h1>Archive for {{ year }}</h1> {% for date in days %} {% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %} <a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a> {% endfor %} 2. If given one or more variables, check whether any variable has changed. For example, the following shows the date every time it changes, while showing the hour if either the hour or the date has changed:: {% for date in days %} {% ifchanged date.date %} {{ date.date }} {% endifchanged %} {% ifchanged date.hour date.date %} {{ date.hour }} {% endifchanged %} {% endfor %} """ bits = token.split_contents() nodelist_true = parser.parse(('else', 'endifchanged')) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse(('endifchanged',)) parser.delete_first_token() else: nodelist_false = NodeList() values = [parser.compile_filter(bit) for bit in bits[1:]] return IfChangedNode(nodelist_true, nodelist_false, *values) def find_library(parser, name): try: return parser.libraries[name] except KeyError: raise TemplateSyntaxError( "'%s' is not a registered tag library. Must be one of:\n%s" % ( name, "\n".join(sorted(parser.libraries.keys())), ), ) def load_from_library(library, label, names): """ Return a subset of tags and filters from a library. """ subset = Library() for name in names: found = False if name in library.tags: found = True subset.tags[name] = library.tags[name] if name in library.filters: found = True subset.filters[name] = library.filters[name] if found is False: raise TemplateSyntaxError( "'%s' is not a valid tag or filter in tag library '%s'" % ( name, label, ), ) return subset @register.tag def load(parser, token): """ Loads a custom template tag library into the parser. For example, to load the template tags in ``django/templatetags/news/photos.py``:: {% load news.photos %} Can also be used to load an individual tag/filter from a library:: {% load byline from news %} """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) >= 4 and bits[-2] == "from": # from syntax is used; load individual tags from the library name = bits[-1] lib = find_library(parser, name) subset = load_from_library(lib, name, bits[1:-2]) parser.add_library(subset) else: # one or more libraries are specified; load and add them to the parser for name in bits[1:]: lib = find_library(parser, name) parser.add_library(lib) return LoadNode() @register.tag def lorem(parser, token): """ Creates random Latin text useful for providing test data in templates. Usage format:: {% lorem [count] [method] [random] %} ``count`` is a number (or variable) containing the number of paragraphs or words to generate (default is 1). ``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for plain-text paragraph blocks (default is ``b``). ``random`` is the word ``random``, which if given, does not use the common paragraph (starting "Lorem ipsum dolor sit amet, consectetuer..."). Examples: * ``{% lorem %}`` will output the common "lorem ipsum" paragraph * ``{% lorem 3 p %}`` will output the common "lorem ipsum" paragraph and two random paragraphs each wrapped in HTML ``<p>`` tags * ``{% lorem 2 w random %}`` will output two random latin words """ bits = list(token.split_contents()) tagname = bits[0] # Random bit common = bits[-1] != 'random' if not common: bits.pop() # Method bit if bits[-1] in ('w', 'p', 'b'): method = bits.pop() else: method = 'b' # Count bit if len(bits) > 1: count = bits.pop() else: count = '1' count = parser.compile_filter(count) if len(bits) != 1: raise TemplateSyntaxError("Incorrect format for %r tag" % tagname) return LoremNode(count, method, common) @register.tag def now(parser, token): """ Displays the date, formatted according to the given string. Uses the same format as PHP's ``date()`` function; see http://php.net/date for all the possible values. Sample usage:: It is {% now "jS F Y H:i" %} """ bits = token.split_contents() asvar = None if len(bits) == 4 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] if len(bits) != 2: raise TemplateSyntaxError("'now' statement takes one argument") format_string = bits[1][1:-1] return NowNode(format_string, asvar) @register.tag def regroup(parser, token): """ Regroups a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that ``people`` is a list of ``Person`` objects that have ``first_name``, ``last_name``, and ``gender`` attributes, and you'd like to display a list that looks like: * Male: * George Bush * Bill Clinton * Female: * Margaret Thatcher * Colendeeza Rice * Unknown: * Pat Smith The following snippet of template code would accomplish this dubious task:: {% regroup people by gender as grouped %} <ul> {% for group in grouped %} <li>{{ group.grouper }} <ul> {% for item in group.list %} <li>{{ item }}</li> {% endfor %} </ul> {% endfor %} </ul> As you can see, ``{% regroup %}`` populates a variable with a list of objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the item that was grouped by; ``list`` contains the list of objects that share that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female`` and ``Unknown``, and ``list`` is the list of people with those genders. Note that ``{% regroup %}`` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of people was not sorted by gender, you'd need to make sure it is sorted before using it, i.e.:: {% regroup people|dictsort:"gender" by gender as grouped %} """ bits = token.split_contents() if len(bits) != 6: raise TemplateSyntaxError("'regroup' tag takes five arguments") target = parser.compile_filter(bits[1]) if bits[2] != 'by': raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'") if bits[4] != 'as': raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must" " be 'as'") var_name = bits[5] # RegroupNode will take each item in 'target', put it in the context under # 'var_name', evaluate 'var_name'.'expression' in the current context, and # group by the resulting value. After all items are processed, it will # save the final result in the context under 'var_name', thus clearing the # temporary values. This hack is necessary because the template engine # doesn't provide a context-aware equivalent of Python's getattr. expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]) return RegroupNode(target, expression, var_name) @register.tag def spaceless(parser, token): """ Removes whitespace between HTML tags, including tab and newline characters. Example usage:: {% spaceless %} <p> <a href="foo/">Foo</a> </p> {% endspaceless %} This example would return this HTML:: <p><a href="foo/">Foo</a></p> Only space between *tags* is normalized -- not space between tags and text. In this example, the space around ``Hello`` won't be stripped:: {% spaceless %} <strong> Hello </strong> {% endspaceless %} """ nodelist = parser.parse(('endspaceless',)) parser.delete_first_token() return SpacelessNode(nodelist) @register.tag def templatetag(parser, token): """ Outputs one of the bits used to compose template tags. Since the template system has no concept of "escaping", to display one of the bits used in template tags, you must use the ``{% templatetag %}`` tag. The argument tells which template bit to output: ================== ======= Argument Outputs ================== ======= ``openblock`` ``{%`` ``closeblock`` ``%}`` ``openvariable`` ``{{`` ``closevariable`` ``}}`` ``openbrace`` ``{`` ``closebrace`` ``}`` ``opencomment`` ``{#`` ``closecomment`` ``#}`` ================== ======= """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) != 2: raise TemplateSyntaxError("'templatetag' statement takes one argument") tag = bits[1] if tag not in TemplateTagNode.mapping: raise TemplateSyntaxError("Invalid templatetag argument: '%s'." " Must be one of: %s" % (tag, list(TemplateTagNode.mapping))) return TemplateTagNode(tag) @register.tag def url(parser, token): """ Return an absolute URL matching the given view with its parameters. This is a way to define links that aren't tied to a particular URL configuration:: {% url "url_name" arg1 arg2 %} or {% url "url_name" name1=value1 name2=value2 %} The first argument is a django.conf.urls.url() name. Other arguments are space-separated values that will be filled in place of positional and keyword arguments in the URL. Don't mix positional and keyword arguments. All arguments for the URL must be present. For example, if you have a view ``app_name.views.client_details`` taking the client's id and the corresponding line in a URLconf looks like this:: url('^client/(\d+)/$', views.client_details, name='client-detail-view') and this app's URLconf is included into the project's URLconf under some path:: url('^clients/', include('app_name.urls')) then in a template you can create a link for a certain client like this:: {% url "client-detail-view" client.id %} The URL will look like ``/clients/client/123/``. The first argument may also be the name of a template variable that will be evaluated to obtain the view name or the URL name, e.g.:: {% with url_name="client-detail-view" %} {% url url_name client.id %} {% endwith %} """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument, the name of a url()." % bits[0]) viewname = parser.compile_filter(bits[1]) args = [] kwargs = {} asvar = None bits = bits[2:] if len(bits) >= 2 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] if len(bits): for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError("Malformed arguments to url tag") name, value = match.groups() if name: kwargs[name] = parser.compile_filter(value) else: args.append(parser.compile_filter(value)) return URLNode(viewname, args, kwargs, asvar) @register.tag def verbatim(parser, token): """ Stops the template engine from rendering the contents of this block tag. Usage:: {% verbatim %} {% don't process this %} {% endverbatim %} You can also designate a specific closing tag block (allowing the unrendered use of ``{% endverbatim %}``):: {% verbatim myblock %} ... {% endverbatim myblock %} """ nodelist = parser.parse(('endverbatim',)) parser.delete_first_token() return VerbatimNode(nodelist.render(Context())) @register.tag def widthratio(parser, token): """ For creating bar charts and such, this tag calculates the ratio of a given value to a maximum value, and then applies that ratio to a constant. For example:: <img src="bar.png" alt="Bar" height="10" width="{% widthratio this_value max_value max_width %}" /> If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100, the image in the above example will be 88 pixels wide (because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88). In some cases you might want to capture the result of widthratio in a variable. It can be useful for instance in a blocktrans like this:: {% widthratio this_value max_value max_width as width %} {% blocktrans %}The width is: {{ width }}{% endblocktrans %} """ bits = token.split_contents() if len(bits) == 4: tag, this_value_expr, max_value_expr, max_width = bits asvar = None elif len(bits) == 6: tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits if as_ != 'as': raise TemplateSyntaxError("Invalid syntax in widthratio tag. Expecting 'as' keyword") else: raise TemplateSyntaxError("widthratio takes at least three arguments") return WidthRatioNode(parser.compile_filter(this_value_expr), parser.compile_filter(max_value_expr), parser.compile_filter(max_width), asvar=asvar) @register.tag('with') def do_with(parser, token): """ Adds one or more values to the context (inside of this block) for caching and easy access. For example:: {% with total=person.some_sql_method %} {{ total }} object{{ total|pluralize }} {% endwith %} Multiple values can be added to the context:: {% with foo=1 bar=2 %} ... {% endwith %} The legacy format of ``{% with person.some_sql_method as total %}`` is still accepted. """ bits = token.split_contents() remaining_bits = bits[1:] extra_context = token_kwargs(remaining_bits, parser, support_legacy=True) if not extra_context: raise TemplateSyntaxError("%r expected at least one variable " "assignment" % bits[0]) if remaining_bits: raise TemplateSyntaxError("%r received an invalid token: %r" % (bits[0], remaining_bits[0])) nodelist = parser.parse(('endwith',)) parser.delete_first_token() return WithNode(None, None, nodelist, extra_context=extra_context)
unknown
codeparrot/codeparrot-clean
import logging from pip.basecommand import Command from pip.operations.check import check_requirements from pip.utils import get_installed_distributions logger = logging.getLogger(__name__) class CheckCommand(Command): """Verify installed packages have compatible dependencies.""" name = 'check' usage = """ %prog [options]""" summary = 'Verify installed packages have compatible dependencies.' def run(self, options, args): dists = get_installed_distributions(local_only=False, skip=()) missing_reqs_dict, incompatible_reqs_dict = check_requirements(dists) for dist in dists: key = '%s==%s' % (dist.project_name, dist.version) for requirement in missing_reqs_dict.get(key, []): logger.info( "%s %s requires %s, which is not installed.", dist.project_name, dist.version, requirement.project_name) for requirement, actual in incompatible_reqs_dict.get(key, []): logger.info( "%s %s has requirement %s, but you have %s %s.", dist.project_name, dist.version, requirement, actual.project_name, actual.version) if missing_reqs_dict or incompatible_reqs_dict: return 1 else: logger.info("No broken requirements found.")
unknown
codeparrot/codeparrot-clean
"""Tests for aiohttp/protocol.py""" import unittest import unittest.mock import asyncio import zlib from aiohttp import hdrs, protocol class TestHttpMessage(unittest.TestCase): def setUp(self): self.transport = unittest.mock.Mock() asyncio.set_event_loop(None) def test_start_request(self): msg = protocol.Request( self.transport, 'GET', '/index.html', close=True) self.assertIs(msg.transport, self.transport) self.assertIsNone(msg.status) self.assertTrue(msg.closing) self.assertEqual(msg.status_line, 'GET /index.html HTTP/1.1\r\n') def test_start_response(self): msg = protocol.Response(self.transport, 200, close=True) self.assertIs(msg.transport, self.transport) self.assertEqual(msg.status, 200) self.assertEqual(msg.reason, "OK") self.assertTrue(msg.closing) self.assertEqual(msg.status_line, 'HTTP/1.1 200 OK\r\n') def test_start_response_with_reason(self): msg = protocol.Response(self.transport, 333, close=True, reason="My Reason") self.assertEqual(msg.status, 333) self.assertEqual(msg.reason, "My Reason") self.assertEqual(msg.status_line, 'HTTP/1.1 333 My Reason\r\n') def test_start_response_with_unknown_reason(self): msg = protocol.Response(self.transport, 777, close=True) self.assertEqual(msg.status, 777) self.assertEqual(msg.reason, "777") self.assertEqual(msg.status_line, 'HTTP/1.1 777 777\r\n') def test_force_close(self): msg = protocol.Response(self.transport, 200) self.assertFalse(msg.closing) msg.force_close() self.assertTrue(msg.closing) def test_force_chunked(self): msg = protocol.Response(self.transport, 200) self.assertFalse(msg.chunked) msg.enable_chunked_encoding() self.assertTrue(msg.chunked) def test_keep_alive(self): msg = protocol.Response(self.transport, 200, close=True) self.assertFalse(msg.keep_alive()) msg.keepalive = True self.assertTrue(msg.keep_alive()) msg.force_close() self.assertFalse(msg.keep_alive()) def test_keep_alive_http10(self): msg = protocol.Response(self.transport, 200, http_version=(1, 0)) self.assertFalse(msg.keepalive) self.assertFalse(msg.keep_alive()) msg = protocol.Response(self.transport, 200, http_version=(1, 1)) self.assertIsNone(msg.keepalive) def test_add_header(self): msg = protocol.Response(self.transport, 200) self.assertEqual([], list(msg.headers)) msg.add_header('content-type', 'plain/html') self.assertEqual( [('CONTENT-TYPE', 'plain/html')], list(msg.headers.items())) def test_add_header_with_spaces(self): msg = protocol.Response(self.transport, 200) self.assertEqual([], list(msg.headers)) msg.add_header('content-type', ' plain/html ') self.assertEqual( [('CONTENT-TYPE', 'plain/html')], list(msg.headers.items())) def test_add_header_non_ascii(self): msg = protocol.Response(self.transport, 200) self.assertEqual([], list(msg.headers)) with self.assertRaises(AssertionError): msg.add_header('тип-контента', 'текст/плейн') def test_add_header_invalid_value_type(self): msg = protocol.Response(self.transport, 200) self.assertEqual([], list(msg.headers)) with self.assertRaises(AssertionError): msg.add_header('content-type', {'test': 'plain'}) with self.assertRaises(AssertionError): msg.add_header(list('content-type'), 'text/plain') def test_add_headers(self): msg = protocol.Response(self.transport, 200) self.assertEqual([], list(msg.headers)) msg.add_headers(('content-type', 'plain/html')) self.assertEqual( [('CONTENT-TYPE', 'plain/html')], list(msg.headers.items())) def test_add_headers_length(self): msg = protocol.Response(self.transport, 200) self.assertIsNone(msg.length) msg.add_headers(('content-length', '42')) self.assertEqual(42, msg.length) def test_add_headers_upgrade(self): msg = protocol.Response(self.transport, 200) self.assertFalse(msg.upgrade) msg.add_headers(('connection', 'upgrade')) self.assertTrue(msg.upgrade) def test_add_headers_upgrade_websocket(self): msg = protocol.Response(self.transport, 200) msg.add_headers(('upgrade', 'test')) self.assertEqual([], list(msg.headers)) msg.add_headers(('upgrade', 'websocket')) self.assertEqual( [('UPGRADE', 'websocket')], list(msg.headers.items())) def test_add_headers_connection_keepalive(self): msg = protocol.Response(self.transport, 200) msg.add_headers(('connection', 'keep-alive')) self.assertEqual([], list(msg.headers)) self.assertTrue(msg.keepalive) msg.add_headers(('connection', 'close')) self.assertFalse(msg.keepalive) def test_add_headers_hop_headers(self): msg = protocol.Response(self.transport, 200) msg.HOP_HEADERS = (hdrs.TRANSFER_ENCODING,) msg.add_headers(('connection', 'test'), ('transfer-encoding', 't')) self.assertEqual([], list(msg.headers)) def test_default_headers(self): msg = protocol.Response(self.transport, 200) msg._add_default_headers() headers = [r for r, _ in msg.headers.items()] self.assertIn('DATE', headers) self.assertIn('CONNECTION', headers) def test_default_headers_server(self): msg = protocol.Response(self.transport, 200) msg._add_default_headers() self.assertIn('SERVER', msg.headers) def test_default_headers_useragent(self): msg = protocol.Request(self.transport, 'GET', '/') msg._add_default_headers() self.assertNotIn('SERVER', msg.headers) self.assertIn('USER-AGENT', msg.headers) def test_default_headers_useragent_custom(self): msg = protocol.Request(self.transport, 'GET', '/') msg.add_headers(('user-agent', 'my custom agent')) msg._add_default_headers() headers = [r for r, _ in msg.headers.items() if r.lower() == 'user-agent'] self.assertEqual(len(headers), 1) def test_default_headers_chunked(self): msg = protocol.Response(self.transport, 200) msg._add_default_headers() headers = [r for r, _ in msg.headers.items()] self.assertNotIn('TRANSFER-ENCODING', headers) msg = protocol.Response(self.transport, 200) msg.enable_chunked_encoding() msg.send_headers() headers = [r for r, _ in msg.headers.items()] self.assertIn('TRANSFER-ENCODING', headers) def test_default_headers_connection_upgrade(self): msg = protocol.Response(self.transport, 200) msg.upgrade = True msg._add_default_headers() headers = [r for r in msg.headers.items() if r[0] == 'CONNECTION'] self.assertEqual([('CONNECTION', 'upgrade')], headers) def test_default_headers_connection_close(self): msg = protocol.Response(self.transport, 200) msg.force_close() msg._add_default_headers() headers = [r for r in msg.headers.items() if r[0] == 'CONNECTION'] self.assertEqual([('CONNECTION', 'close')], headers) def test_default_headers_connection_keep_alive(self): msg = protocol.Response(self.transport, 200) msg.keepalive = True msg._add_default_headers() headers = [r for r in msg.headers.items() if r[0] == 'CONNECTION'] self.assertEqual([('CONNECTION', 'keep-alive')], headers) def test_send_headers(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.add_headers(('content-type', 'plain/html')) self.assertFalse(msg.is_headers_sent()) msg.send_headers() content = b''.join([arg[1][0] for arg in list(write.mock_calls)]) self.assertTrue(content.startswith(b'HTTP/1.1 200 OK\r\n')) self.assertIn(b'CONTENT-TYPE: plain/html', content) self.assertTrue(msg.headers_sent) self.assertTrue(msg.is_headers_sent()) # cleanup msg.writer.close() def test_send_headers_non_ascii(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.add_headers(('x-header', 'текст')) self.assertFalse(msg.is_headers_sent()) msg.send_headers() content = b''.join([arg[1][0] for arg in list(write.mock_calls)]) self.assertTrue(content.startswith(b'HTTP/1.1 200 OK\r\n')) self.assertIn(b'X-HEADER: \xd1\x82\xd0\xb5\xd0\xba\xd1\x81\xd1\x82', content) self.assertTrue(msg.headers_sent) self.assertTrue(msg.is_headers_sent()) # cleanup msg.writer.close() def test_send_headers_nomore_add(self): msg = protocol.Response(self.transport, 200) msg.add_headers(('content-type', 'plain/html')) msg.send_headers() self.assertRaises(AssertionError, msg.add_header, 'content-type', 'plain/html') # cleanup msg.writer.close() def test_prepare_length(self): msg = protocol.Response(self.transport, 200) w_l_p = msg._write_length_payload = unittest.mock.Mock() w_l_p.return_value = iter([1, 2, 3]) msg.add_headers(('content-length', '42')) msg.send_headers() self.assertTrue(w_l_p.called) self.assertEqual((42,), w_l_p.call_args[0]) def test_prepare_chunked_force(self): msg = protocol.Response(self.transport, 200) msg.enable_chunked_encoding() chunked = msg._write_chunked_payload = unittest.mock.Mock() chunked.return_value = iter([1, 2, 3]) msg.add_headers(('content-length', '42')) msg.send_headers() self.assertTrue(chunked.called) def test_prepare_chunked_no_length(self): msg = protocol.Response(self.transport, 200) chunked = msg._write_chunked_payload = unittest.mock.Mock() chunked.return_value = iter([1, 2, 3]) msg.send_headers() self.assertTrue(chunked.called) def test_prepare_eof(self): msg = protocol.Response(self.transport, 200, http_version=(1, 0)) eof = msg._write_eof_payload = unittest.mock.Mock() eof.return_value = iter([1, 2, 3]) msg.send_headers() self.assertTrue(eof.called) def test_write_auto_send_headers(self): msg = protocol.Response(self.transport, 200, http_version=(1, 0)) msg._send_headers = True msg.write(b'data1') self.assertTrue(msg.headers_sent) # cleanup msg.writer.close() def test_write_payload_eof(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200, http_version=(1, 0)) msg.send_headers() msg.write(b'data1') self.assertTrue(msg.headers_sent) msg.write(b'data2') msg.write_eof() content = b''.join([c[1][0] for c in list(write.mock_calls)]) self.assertEqual( b'data1data2', content.split(b'\r\n\r\n', 1)[-1]) def test_write_payload_chunked(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.enable_chunked_encoding() msg.send_headers() msg.write(b'data') msg.write_eof() content = b''.join([c[1][0] for c in list(write.mock_calls)]) self.assertEqual( b'4\r\ndata\r\n0\r\n\r\n', content.split(b'\r\n\r\n', 1)[-1]) def test_write_payload_chunked_multiple(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.enable_chunked_encoding() msg.send_headers() msg.write(b'data1') msg.write(b'data2') msg.write_eof() content = b''.join([c[1][0] for c in list(write.mock_calls)]) self.assertEqual( b'5\r\ndata1\r\n5\r\ndata2\r\n0\r\n\r\n', content.split(b'\r\n\r\n', 1)[-1]) def test_write_payload_length(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.add_headers(('content-length', '2')) msg.send_headers() msg.write(b'd') msg.write(b'ata') msg.write_eof() content = b''.join([c[1][0] for c in list(write.mock_calls)]) self.assertEqual( b'da', content.split(b'\r\n\r\n', 1)[-1]) def test_write_payload_chunked_filter(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.send_headers() msg.add_chunking_filter(2) msg.write(b'data') msg.write_eof() content = b''.join([c[1][0] for c in list(write.mock_calls)]) self.assertTrue(content.endswith(b'2\r\nda\r\n2\r\nta\r\n0\r\n\r\n')) def test_write_payload_chunked_filter_mutiple_chunks(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.send_headers() msg.add_chunking_filter(2) msg.write(b'data1') msg.write(b'data2') msg.write_eof() content = b''.join([c[1][0] for c in list(write.mock_calls)]) self.assertTrue(content.endswith( b'2\r\nda\r\n2\r\nta\r\n2\r\n1d\r\n2\r\nat\r\n' b'2\r\na2\r\n0\r\n\r\n')) def test_write_payload_chunked_large_chunk(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.send_headers() msg.add_chunking_filter(1024) msg.write(b'data') msg.write_eof() content = b''.join([c[1][0] for c in list(write.mock_calls)]) self.assertTrue(content.endswith(b'4\r\ndata\r\n0\r\n\r\n')) _comp = zlib.compressobj(wbits=-zlib.MAX_WBITS) _COMPRESSED = b''.join([_comp.compress(b'data'), _comp.flush()]) def test_write_payload_deflate_filter(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.add_headers(('content-length', '{}'.format(len(self._COMPRESSED)))) msg.send_headers() msg.add_compression_filter('deflate') msg.write(b'data') msg.write_eof() chunks = [c[1][0] for c in list(write.mock_calls)] self.assertTrue(all(chunks)) content = b''.join(chunks) self.assertEqual( self._COMPRESSED, content.split(b'\r\n\r\n', 1)[-1]) def test_write_payload_deflate_and_chunked(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.send_headers() msg.add_compression_filter('deflate') msg.add_chunking_filter(2) msg.write(b'data') msg.write_eof() chunks = [c[1][0] for c in list(write.mock_calls)] self.assertTrue(all(chunks)) content = b''.join(chunks) self.assertEqual( b'2\r\nKI\r\n2\r\n,I\r\n2\r\n\x04\x00\r\n0\r\n\r\n', content.split(b'\r\n\r\n', 1)[-1]) def test_write_payload_chunked_and_deflate(self): write = self.transport.write = unittest.mock.Mock() msg = protocol.Response(self.transport, 200) msg.add_headers(('content-length', '{}'.format(len(self._COMPRESSED)))) msg.add_chunking_filter(2) msg.add_compression_filter('deflate') msg.send_headers() msg.write(b'data') msg.write_eof() chunks = [c[1][0] for c in list(write.mock_calls)] self.assertTrue(all(chunks)) content = b''.join(chunks) self.assertEqual( self._COMPRESSED, content.split(b'\r\n\r\n', 1)[-1]) def test_write_drain(self): msg = protocol.Response(self.transport, 200, http_version=(1, 0)) msg._send_headers = True msg.write(b'1' * (64 * 1024 * 2)) self.assertFalse(self.transport.drain.called) msg.write(b'1', drain=True) self.assertTrue(self.transport.drain.called) self.assertEqual(msg._output_size, 0) def test_dont_override_request_headers_with_default_values(self): msg = protocol.Request( self.transport, 'GET', '/index.html', close=True) msg.add_header('USER-AGENT', 'custom') msg._add_default_headers() self.assertEqual('custom', msg.headers['USER-AGENT']) def test_dont_override_response_headers_with_default_values(self): msg = protocol.Response(self.transport, 200, http_version=(1, 0)) msg.add_header('DATE', 'now') msg.add_header('SERVER', 'custom') msg._add_default_headers() self.assertEqual('custom', msg.headers['SERVER']) self.assertEqual('now', msg.headers['DATE'])
unknown
codeparrot/codeparrot-clean
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras built-in metrics functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Metrics functions. from tensorflow.python.keras._impl.keras.metrics import binary_accuracy from tensorflow.python.keras._impl.keras.metrics import binary_crossentropy from tensorflow.python.keras._impl.keras.metrics import categorical_accuracy from tensorflow.python.keras._impl.keras.metrics import categorical_crossentropy from tensorflow.python.keras._impl.keras.metrics import cosine_proximity from tensorflow.python.keras._impl.keras.metrics import hinge from tensorflow.python.keras._impl.keras.metrics import kullback_leibler_divergence from tensorflow.python.keras._impl.keras.metrics import mean_absolute_error from tensorflow.python.keras._impl.keras.metrics import mean_absolute_percentage_error from tensorflow.python.keras._impl.keras.metrics import mean_squared_error from tensorflow.python.keras._impl.keras.metrics import mean_squared_logarithmic_error from tensorflow.python.keras._impl.keras.metrics import poisson from tensorflow.python.keras._impl.keras.metrics import sparse_categorical_crossentropy from tensorflow.python.keras._impl.keras.metrics import sparse_top_k_categorical_accuracy from tensorflow.python.keras._impl.keras.metrics import squared_hinge from tensorflow.python.keras._impl.keras.metrics import top_k_categorical_accuracy # Auxiliary utils. # pylint: disable=g-bad-import-order from tensorflow.python.keras._impl.keras.metrics import deserialize from tensorflow.python.keras._impl.keras.metrics import serialize from tensorflow.python.keras._impl.keras.metrics import get del absolute_import del division del print_function
unknown
codeparrot/codeparrot-clean
# example.js ```javascript _{{example.js}}_ ``` # dist/output.js ```javascript _{{dist/output.js}}_ ``` # dist/my own chunk.output.js ```javascript _{{dist/my own chunk.output.js}}_ ``` # dist/node_modules_b_js-node_modules_d_js.output.js ```javascript _{{dist/node_modules_b_js-node_modules_d_js.output.js}}_ ``` # Info ## Unoptimized ``` _{{stdout}}_ ``` ## Production mode ``` _{{production:stdout}}_ ```
unknown
github
https://github.com/webpack/webpack
examples/named-chunks/template.md
package kotlinx.coroutines.rx2 import kotlinx.coroutines.testing.* import kotlinx.coroutines.* import kotlinx.coroutines.channels.* import kotlinx.coroutines.selects.* import org.junit.Test import kotlin.onSuccess import kotlin.test.* class ObservableSubscriptionSelectTest : TestBase() { @Test fun testSelect() = runTest { // source with n ints val n = 1000 * stressTestMultiplier val source = rxObservable { repeat(n) { send(it) } } var a = 0 var b = 0 // open two subs val channelA = source.toChannel() val channelB = source.toChannel() loop@ while (true) { val done: Int = select { channelA.onReceiveCatching { result -> result.onSuccess { assertEquals(a++, it) } if (result.isSuccess) 1 else 0 } channelB.onReceiveCatching { result -> result.onSuccess { assertEquals(b++, it) } if (result.isSuccess) 2 else 0 } } when (done) { 0 -> break@loop 1 -> { val r = channelB.receiveCatching().getOrNull() if (r != null) assertEquals(b++, r) } 2 -> { val r = channelA.receiveCatching().getOrNull() if (r != null) assertEquals(a++, r) } } } channelA.cancel() channelB.cancel() // should receive one of them fully assertTrue(a == n || b == n) } }
kotlin
github
https://github.com/Kotlin/kotlinx.coroutines
reactive/kotlinx-coroutines-rx2/test/ObservableSubscriptionSelectTest.kt
#!/usr/bin/python # # Copyright (c) 2016 Matt Davis, <mdavis@ansible.com> # Chris Houseknecht, <house@redhat.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = ''' --- module: azure_rm_virtualnetwork_facts version_added: "2.1" short_description: Get virtual network facts. description: - Get facts for a specific virtual network or all virtual networks within a resource group. options: name: description: - Only show results for a specific security group. default: null required: false resource_group: description: - Limit results by resource group. Required when filtering by name. default: null required: false tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. default: null required: false extends_documentation_fragment: - azure authors: - "Chris Houseknecht house@redhat.com" - "Matt Davis mdavis@redhat.com" ''' EXAMPLES = ''' - name: Get facts for one virtual network azure_rm_virtualnetwork_facts: resource_group: Testing name: secgroup001 - name: Get facts for all virtual networks azure_rm_virtualnetwork_facts: resource_group: Testing - name: Get facts by tags azure_rm_virtualnetwork_facts: tags: - testing ''' RETURN = ''' azure_virtualnetworks: description: List of virtual network dicts. returned: always type: list example: [{ "etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"', "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/vnet2001", "location": "eastus2", "name": "vnet2001", "properties": { "addressSpace": { "addressPrefixes": [ "10.10.0.0/16" ] }, "provisioningState": "Succeeded", "resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612", "subnets": [] }, "type": "Microsoft.Network/virtualNetworks" }] ''' from ansible.module_utils.basic import * from ansible.module_utils.azure_rm_common import * try: from msrestazure.azure_exceptions import CloudError from azure.common import AzureMissingResourceHttpError, AzureHttpError except: # This is handled in azure_rm_common pass AZURE_OBJECT_CLASS = 'VirtualNetwork' class AzureRMNetworkInterfaceFacts(AzureRMModuleBase): def __init__(self): self.module_arg_spec = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list'), ) self.results = dict( changed=False, ansible_facts=dict(azure_virtualnetworks=[]) ) self.name = None self.resource_group = None self.tags = None super(AzureRMNetworkInterfaceFacts, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True) def exec_module(self, **kwargs): for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if self.name is not None: self.results['ansible_facts']['azure_virtualnetworks'] = self.get_item() else: self.results['ansible_facts']['azure_virtualnetworks'] = self.list_items() return self.results def get_item(self): self.log('Get properties for {0}'.format(self.name)) item = None results = [] try: item = self.network_client.virtual_networks.get(self.resource_group, self.name) except CloudError: pass if item and self.has_tags(item.tags, self.tags): results = [self.serialize_obj(item, AZURE_OBJECT_CLASS)] return results def list_resource_group(self): self.log('List items for resource group') try: response = self.network_client.virtual_networks.list(self.resource_group) except AzureHttpError as exc: self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) return results def list_items(self): self.log('List all for items') try: response = self.network_client.virtual_networks.list_all() except AzureHttpError as exc: self.fail("Failed to list all items - {0}".format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS)) return results def main(): AzureRMNetworkInterfaceFacts() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_EASILYSWAPPABLEPARAMETERSCHECK_H #define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_EASILYSWAPPABLEPARAMETERSCHECK_H #include "../ClangTidyCheck.h" namespace clang::tidy::bugprone { /// Finds function definitions where parameters of convertible types follow /// each other directly, making call sites prone to calling the function with /// swapped (or badly ordered) arguments. /// /// For the user-facing documentation see: /// https://clang.llvm.org/extra/clang-tidy/checks/bugprone/easily-swappable-parameters.html class EasilySwappableParametersCheck : public ClangTidyCheck { public: EasilySwappableParametersCheck(StringRef Name, ClangTidyContext *Context); void registerMatchers(ast_matchers::MatchFinder *Finder) override; void check(const ast_matchers::MatchFinder::MatchResult &Result) override; void storeOptions(ClangTidyOptions::OptionMap &Opts) override; /// The minimum length of an adjacent swappable parameter range required for /// a diagnostic. const std::size_t MinimumLength; /// The parameter names (as written in the source text) to be ignored. const std::vector<StringRef> IgnoredParameterNames; /// The parameter typename suffixes (as written in the source code) to be /// ignored. const std::vector<StringRef> IgnoredParameterTypeSuffixes; /// Whether to consider differently qualified versions of the same type /// mixable. const bool QualifiersMix; /// Whether to model implicit conversions "in full" (conditions apply) /// during analysis and consider types that are implicitly convertible to /// one another mixable. const bool ModelImplicitConversions; /// If enabled, diagnostics for parameters that are used together in a /// similar way are not emitted. const bool SuppressParametersUsedTogether; /// The number of characters two parameter names might be dissimilar at /// either end for the report about the parameters to be silenced. /// E.g. the names "LHS" and "RHS" are 1-dissimilar suffixes of each other, /// while "Text1" and "Text2" are 1-dissimilar prefixes of each other. const std::size_t NamePrefixSuffixSilenceDissimilarityThreshold; }; } // namespace clang::tidy::bugprone #endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_EASILYSWAPPABLEPARAMETERSCHECK_H
c
github
https://github.com/llvm/llvm-project
clang-tools-extra/clang-tidy/bugprone/EasilySwappableParametersCheck.h
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat import unittest from units.compat.mock import patch, MagicMock from ansible.errors import AnsibleError, AnsibleParserError from ansible.module_utils.common._collections_compat import Container from ansible.playbook.block import Block from units.mock.loader import DictDataLoader from units.mock.path import mock_unfrackpath_noop from ansible.playbook.role import Role from ansible.playbook.role.include import RoleInclude from ansible.playbook.role import hash_params class TestHashParams(unittest.TestCase): def test(self): params = {'foo': 'bar'} res = hash_params(params) self._assert_set(res) self._assert_hashable(res) def _assert_hashable(self, res): a_dict = {} try: a_dict[res] = res except TypeError as e: self.fail('%s is not hashable: %s' % (res, e)) def _assert_set(self, res): self.assertIsInstance(res, frozenset) def test_dict_tuple(self): params = {'foo': (1, 'bar',)} res = hash_params(params) self._assert_set(res) def test_tuple(self): params = (1, None, 'foo') res = hash_params(params) self._assert_hashable(res) def test_tuple_dict(self): params = ({'foo': 'bar'}, 37) res = hash_params(params) self._assert_hashable(res) def test_list(self): params = ['foo', 'bar', 1, 37, None] res = hash_params(params) self._assert_set(res) self._assert_hashable(res) def test_dict_with_list_value(self): params = {'foo': [1, 4, 'bar']} res = hash_params(params) self._assert_set(res) self._assert_hashable(res) def test_empty_set(self): params = set([]) res = hash_params(params) self._assert_hashable(res) self._assert_set(res) def test_generator(self): def my_generator(): for i in ['a', 1, None, {}]: yield i params = my_generator() res = hash_params(params) self._assert_hashable(res) def test_container_but_not_iterable(self): # This is a Container that is not iterable, which is unlikely but... class MyContainer(Container): def __init__(self, some_thing): self.data = [] self.data.append(some_thing) def __contains__(self, item): return item in self.data def __hash__(self): return hash(self.data) def __len__(self): return len(self.data) def __call__(self): return False foo = MyContainer('foo bar') params = foo self.assertRaises(TypeError, hash_params, params) def test_param_dict_dupe_values(self): params1 = {'foo': False} params2 = {'bar': False} res1 = hash_params(params1) res2 = hash_params(params2) hash1 = hash(res1) hash2 = hash(res2) self.assertNotEqual(res1, res2) self.assertNotEqual(hash1, hash2) def test_param_dupe(self): params1 = { # 'from_files': {}, 'tags': [], u'testvalue': False, u'testvalue2': True, # 'when': [] } params2 = { # 'from_files': {}, 'tags': [], u'testvalue': True, u'testvalue2': False, # 'when': [] } res1 = hash_params(params1) res2 = hash_params(params2) self.assertNotEqual(hash(res1), hash(res2)) self.assertNotEqual(res1, res2) foo = {} foo[res1] = 'params1' foo[res2] = 'params2' self.assertEqual(len(foo), 2) del foo[res2] self.assertEqual(len(foo), 1) for key in foo: self.assertTrue(key in foo) self.assertIn(key, foo) class TestRole(unittest.TestCase): @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_with_tasks(self): fake_loader = DictDataLoader({ "/etc/ansible/roles/foo_tasks/tasks/main.yml": """ - shell: echo 'hello world' """, }) mock_play = MagicMock() mock_play.ROLE_CACHE = {} i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play) self.assertEqual(str(r), 'foo_tasks') self.assertEqual(len(r._task_blocks), 1) assert isinstance(r._task_blocks[0], Block) @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_with_tasks_dir_vs_file(self): fake_loader = DictDataLoader({ "/etc/ansible/roles/foo_tasks/tasks/custom_main/foo.yml": """ - command: bar """, "/etc/ansible/roles/foo_tasks/tasks/custom_main.yml": """ - command: baz """, }) mock_play = MagicMock() mock_play.ROLE_CACHE = {} i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play, from_files=dict(tasks='custom_main')) self.assertEqual(r._task_blocks[0]._ds[0]['command'], 'baz') @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_with_handlers(self): fake_loader = DictDataLoader({ "/etc/ansible/roles/foo_handlers/handlers/main.yml": """ - name: test handler shell: echo 'hello world' """, }) mock_play = MagicMock() mock_play.ROLE_CACHE = {} i = RoleInclude.load('foo_handlers', play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play) self.assertEqual(len(r._handler_blocks), 1) assert isinstance(r._handler_blocks[0], Block) @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_with_vars(self): fake_loader = DictDataLoader({ "/etc/ansible/roles/foo_vars/defaults/main.yml": """ foo: bar """, "/etc/ansible/roles/foo_vars/vars/main.yml": """ foo: bam """, }) mock_play = MagicMock() mock_play.ROLE_CACHE = {} i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play) self.assertEqual(r._default_vars, dict(foo='bar')) self.assertEqual(r._role_vars, dict(foo='bam')) @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_with_vars_dirs(self): fake_loader = DictDataLoader({ "/etc/ansible/roles/foo_vars/defaults/main/foo.yml": """ foo: bar """, "/etc/ansible/roles/foo_vars/vars/main/bar.yml": """ foo: bam """, }) mock_play = MagicMock() mock_play.ROLE_CACHE = {} i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play) self.assertEqual(r._default_vars, dict(foo='bar')) self.assertEqual(r._role_vars, dict(foo='bam')) @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_with_vars_nested_dirs(self): fake_loader = DictDataLoader({ "/etc/ansible/roles/foo_vars/defaults/main/foo/bar.yml": """ foo: bar """, "/etc/ansible/roles/foo_vars/vars/main/bar/foo.yml": """ foo: bam """, }) mock_play = MagicMock() mock_play.ROLE_CACHE = {} i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play) self.assertEqual(r._default_vars, dict(foo='bar')) self.assertEqual(r._role_vars, dict(foo='bam')) @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_with_vars_nested_dirs_combined(self): fake_loader = DictDataLoader({ "/etc/ansible/roles/foo_vars/defaults/main/foo/bar.yml": """ foo: bar a: 1 """, "/etc/ansible/roles/foo_vars/defaults/main/bar/foo.yml": """ foo: bam b: 2 """, }) mock_play = MagicMock() mock_play.ROLE_CACHE = {} i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play) self.assertEqual(r._default_vars, dict(foo='bar', a=1, b=2)) @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_with_vars_dir_vs_file(self): fake_loader = DictDataLoader({ "/etc/ansible/roles/foo_vars/vars/main/foo.yml": """ foo: bar """, "/etc/ansible/roles/foo_vars/vars/main.yml": """ foo: bam """, }) mock_play = MagicMock() mock_play.ROLE_CACHE = {} i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play) self.assertEqual(r._role_vars, dict(foo='bam')) @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_with_metadata(self): fake_loader = DictDataLoader({ '/etc/ansible/roles/foo_metadata/meta/main.yml': """ allow_duplicates: true dependencies: - bar_metadata galaxy_info: a: 1 b: 2 c: 3 """, '/etc/ansible/roles/bar_metadata/meta/main.yml': """ dependencies: - baz_metadata """, '/etc/ansible/roles/baz_metadata/meta/main.yml': """ dependencies: - bam_metadata """, '/etc/ansible/roles/bam_metadata/meta/main.yml': """ dependencies: [] """, '/etc/ansible/roles/bad1_metadata/meta/main.yml': """ 1 """, '/etc/ansible/roles/bad2_metadata/meta/main.yml': """ foo: bar """, '/etc/ansible/roles/recursive1_metadata/meta/main.yml': """ dependencies: ['recursive2_metadata'] """, '/etc/ansible/roles/recursive2_metadata/meta/main.yml': """ dependencies: ['recursive1_metadata'] """, }) mock_play = MagicMock() mock_play.collections = None mock_play.ROLE_CACHE = {} i = RoleInclude.load('foo_metadata', play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play) role_deps = r.get_direct_dependencies() self.assertEqual(len(role_deps), 1) self.assertEqual(type(role_deps[0]), Role) self.assertEqual(len(role_deps[0].get_parents()), 1) self.assertEqual(role_deps[0].get_parents()[0], r) self.assertEqual(r._metadata.allow_duplicates, True) self.assertEqual(r._metadata.galaxy_info, dict(a=1, b=2, c=3)) all_deps = r.get_all_dependencies() self.assertEqual(len(all_deps), 3) self.assertEqual(all_deps[0].get_name(), 'bam_metadata') self.assertEqual(all_deps[1].get_name(), 'baz_metadata') self.assertEqual(all_deps[2].get_name(), 'bar_metadata') i = RoleInclude.load('bad1_metadata', play=mock_play, loader=fake_loader) self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play) i = RoleInclude.load('bad2_metadata', play=mock_play, loader=fake_loader) self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play) # TODO: re-enable this test once Ansible has proper role dep cycle detection # that doesn't rely on stack overflows being recoverable (as they aren't in Py3.7+) # see https://github.com/ansible/ansible/issues/61527 # i = RoleInclude.load('recursive1_metadata', play=mock_play, loader=fake_loader) # self.assertRaises(AnsibleError, Role.load, i, play=mock_play) @patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop) def test_load_role_complex(self): # FIXME: add tests for the more complex uses of # params and tags/when statements fake_loader = DictDataLoader({ "/etc/ansible/roles/foo_complex/tasks/main.yml": """ - shell: echo 'hello world' """, }) mock_play = MagicMock() mock_play.ROLE_CACHE = {} i = RoleInclude.load(dict(role='foo_complex'), play=mock_play, loader=fake_loader) r = Role.load(i, play=mock_play) self.assertEqual(r.get_name(), "foo_complex")
unknown
codeparrot/codeparrot-clean
''' Created on Jan 9, 2013 Copyright © 2013 The Board of Trustees of The Leland Stanford Junior University. All Rights Reserved Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: dstrauss ''' import numpy as np D = {'solverType':'phaseSplit', 'flavor':'TE', 'numRuns':100, 'expt':'goBig', 'numProcs':16} def getMyVars(parseNumber, D): '''routine to return the parameters to test at the current iteration.''' # noFreqs,noPhis,bkg = np.meshgrid(range(1,7), range(1,7), range(100)) D['freqs'] = np.round(np.logspace(np.log10(1000), np.log10(50000), 100)) D['inc'] = [75.0*np.pi/180.0] # one freq. D['numSensors'] = 20 D['bkgNo'] = parseNumber+100; D['numProcs'] = 100 D['rho'] = 1e-3 D['xi'] = 1e-12 return D
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from django.conf.urls import url, patterns from tcms.testcases.views import SimpleTestCaseView from tcms.testcases.views import TestCaseCaseRunDetailPanelView from tcms.testcases.views import TestCaseCaseRunListPaneView from tcms.testcases.views import TestCaseReviewPaneView from tcms.testcases.views import TestCaseSimpleCaseRunView urlpatterns = patterns('tcms.testcases.views', url(r'^(?P<case_id>\d+)/$', 'get'), url(r'^(?P<case_id>\d+)/edit/$', 'edit'), url(r'^(?P<case_id>\d+)/history/$', 'text_history'), url(r'^(?P<case_id>\d+)/attachment/$', 'attachment'), url(r'^(?P<case_id>\d+)/log/$', 'get_log'), url(r'^(?P<case_id>\d+)/bug/$', 'bug'), url(r'^(?P<case_id>\d+)/plan/$', 'plan'), url(r'^(?P<case_id>\d+)/readonly-pane/$', SimpleTestCaseView.as_view(), name='case-readonly-pane'), url(r'^(?P<case_id>\d+)/review-pane/$', TestCaseReviewPaneView.as_view(), name='case-review-pane'), url(r'^(?P<case_id>\d+)/caserun-list-pane/$', TestCaseCaseRunListPaneView.as_view(), name='caserun-list-pane'), url(r'^(?P<case_id>\d+)/caserun-simple-pane/$', TestCaseSimpleCaseRunView.as_view(), name='caserun-simple-pane'), url(r'^(?P<case_id>\d+)/caserun-detail-pane/$', TestCaseCaseRunDetailPanelView.as_view(), name='caserun-detail-pane'), ) urlpatterns += patterns('tcms.testruns.views', url(r'^(?P<plan_id>\d+)/runs/$', 'load_runs_of_one_plan', name='load_runs_of_one_plan_url'), )
unknown
codeparrot/codeparrot-clean
/* * Copyright (C) 2011 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.primitives; import static com.google.common.primitives.ReflectionFreeAssertThrows.assertThrows; import static com.google.common.primitives.UnsignedInts.max; import static com.google.common.primitives.UnsignedInts.min; import static com.google.common.truth.Truth.assertThat; import com.google.common.annotations.GwtCompatible; import com.google.common.annotations.GwtIncompatible; import com.google.common.annotations.J2ktIncompatible; import com.google.common.collect.testing.Helpers; import com.google.common.testing.NullPointerTester; import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Random; import junit.framework.TestCase; import org.jspecify.annotations.NullUnmarked; /** * Tests for UnsignedInts * * @author Louis Wasserman */ @GwtCompatible @NullUnmarked public class UnsignedIntsTest extends TestCase { private static final long[] UNSIGNED_INTS = { 0L, 1L, 2L, 3L, 0x12345678L, 0x5a4316b8L, 0x6cf78a4bL, 0xff1a618bL, 0xfffffffdL, 0xfffffffeL, 0xffffffffL }; private static final int LEAST = (int) 0L; private static final int GREATEST = (int) 0xffffffffL; public void testCheckedCast() { for (long value : UNSIGNED_INTS) { assertThat(UnsignedInts.toLong(UnsignedInts.checkedCast(value))).isEqualTo(value); } assertCastFails(1L << 32); assertCastFails(-1L); assertCastFails(Long.MAX_VALUE); assertCastFails(Long.MIN_VALUE); } private static void assertCastFails(long value) { try { UnsignedInts.checkedCast(value); fail("Cast to int should have failed: " + value); } catch (IllegalArgumentException ex) { assertThat(ex).hasMessageThat().contains(String.valueOf(value)); } } public void testSaturatedCast() { for (long value : UNSIGNED_INTS) { assertThat(UnsignedInts.toLong(UnsignedInts.saturatedCast(value))).isEqualTo(value); } assertThat(UnsignedInts.saturatedCast(1L << 32)).isEqualTo(GREATEST); assertThat(UnsignedInts.saturatedCast(-1L)).isEqualTo(LEAST); assertThat(UnsignedInts.saturatedCast(Long.MAX_VALUE)).isEqualTo(GREATEST); assertThat(UnsignedInts.saturatedCast(Long.MIN_VALUE)).isEqualTo(LEAST); } public void testToLong() { for (long a : UNSIGNED_INTS) { assertThat(UnsignedInts.toLong((int) a)).isEqualTo(a); } } public void testCompare() { for (long a : UNSIGNED_INTS) { for (long b : UNSIGNED_INTS) { int cmpAsLongs = Long.compare(a, b); int cmpAsUInt = UnsignedInts.compare((int) a, (int) b); assertThat(Integer.signum(cmpAsUInt)).isEqualTo(Integer.signum(cmpAsLongs)); } } } public void testMax_noArgs() { assertThrows(IllegalArgumentException.class, () -> max()); } public void testMax() { assertThat(max(LEAST)).isEqualTo(LEAST); assertThat(max(GREATEST)).isEqualTo(GREATEST); assertThat( max( (int) 8L, (int) 6L, (int) 7L, (int) 0x12345678L, (int) 0x5a4316b8L, (int) 0xff1a618bL, (int) 0L)) .isEqualTo((int) 0xff1a618bL); } public void testMin_noArgs() { assertThrows(IllegalArgumentException.class, () -> min()); } public void testMin() { assertThat(min(LEAST)).isEqualTo(LEAST); assertThat(min(GREATEST)).isEqualTo(GREATEST); assertThat( min( (int) 8L, (int) 6L, (int) 7L, (int) 0x12345678L, (int) 0x5a4316b8L, (int) 0xff1a618bL, (int) 0L)) .isEqualTo((int) 0L); } public void testLexicographicalComparator() { List<int[]> ordered = Arrays.asList( new int[] {}, new int[] {LEAST}, new int[] {LEAST, LEAST}, new int[] {LEAST, (int) 1L}, new int[] {(int) 1L}, new int[] {(int) 1L, LEAST}, new int[] {GREATEST, (GREATEST - (int) 1L)}, new int[] {GREATEST, GREATEST}, new int[] {GREATEST, GREATEST, GREATEST}); Comparator<int[]> comparator = UnsignedInts.lexicographicalComparator(); Helpers.testComparator(comparator, ordered); } public void testSort() { testSort(new int[] {}, new int[] {}); testSort(new int[] {2}, new int[] {2}); testSort(new int[] {2, 1, 0}, new int[] {0, 1, 2}); testSort(new int[] {2, GREATEST, 1, LEAST}, new int[] {LEAST, 1, 2, GREATEST}); } static void testSort(int[] input, int[] expected) { input = Arrays.copyOf(input, input.length); UnsignedInts.sort(input); assertThat(input).isEqualTo(expected); } static void testSort(int[] input, int from, int to, int[] expected) { input = Arrays.copyOf(input, input.length); UnsignedInts.sort(input, from, to); assertThat(input).isEqualTo(expected); } public void testSortIndexed() { testSort(new int[] {}, 0, 0, new int[] {}); testSort(new int[] {2}, 0, 1, new int[] {2}); testSort(new int[] {2, 1, 0}, 0, 2, new int[] {1, 2, 0}); testSort(new int[] {2, GREATEST, 1, LEAST}, 1, 4, new int[] {2, LEAST, 1, GREATEST}); } public void testSortDescending() { testSortDescending(new int[] {}, new int[] {}); testSortDescending(new int[] {1}, new int[] {1}); testSortDescending(new int[] {1, 2}, new int[] {2, 1}); testSortDescending(new int[] {1, 3, 1}, new int[] {3, 1, 1}); testSortDescending( new int[] {GREATEST - 1, 1, GREATEST - 2, 2}, new int[] {GREATEST - 1, GREATEST - 2, 2, 1}); } private static void testSortDescending(int[] input, int[] expectedOutput) { input = Arrays.copyOf(input, input.length); UnsignedInts.sortDescending(input); assertThat(input).isEqualTo(expectedOutput); } private static void testSortDescending( int[] input, int fromIndex, int toIndex, int[] expectedOutput) { input = Arrays.copyOf(input, input.length); UnsignedInts.sortDescending(input, fromIndex, toIndex); assertThat(input).isEqualTo(expectedOutput); } public void testSortDescendingIndexed() { testSortDescending(new int[] {}, 0, 0, new int[] {}); testSortDescending(new int[] {1}, 0, 1, new int[] {1}); testSortDescending(new int[] {1, 2}, 0, 2, new int[] {2, 1}); testSortDescending(new int[] {1, 3, 1}, 0, 2, new int[] {3, 1, 1}); testSortDescending(new int[] {1, 3, 1}, 0, 1, new int[] {1, 3, 1}); testSortDescending( new int[] {GREATEST - 1, 1, GREATEST - 2, 2}, 1, 3, new int[] {GREATEST - 1, GREATEST - 2, 1, 2}); } public void testDivide() { for (long a : UNSIGNED_INTS) { for (long b : UNSIGNED_INTS) { try { assertThat(UnsignedInts.divide((int) a, (int) b)).isEqualTo((int) (a / b)); assertThat(b).isNotEqualTo(0); } catch (ArithmeticException e) { assertThat(b).isEqualTo(0); } } } } public void testRemainder() { for (long a : UNSIGNED_INTS) { for (long b : UNSIGNED_INTS) { try { assertThat(UnsignedInts.remainder((int) a, (int) b)).isEqualTo((int) (a % b)); assertThat(b).isNotEqualTo(0); } catch (ArithmeticException e) { assertThat(b).isEqualTo(0); } } } } @GwtIncompatible // Too slow in GWT (~3min fully optimized) public void testDivideRemainderEuclideanProperty() { // Use a seed so that the test is deterministic: Random r = new Random(0L); for (int i = 0; i < 1000000; i++) { int dividend = r.nextInt(); int divisor = r.nextInt(); // Test that the Euclidean property is preserved: assertThat( dividend - (divisor * UnsignedInts.divide(dividend, divisor) + UnsignedInts.remainder(dividend, divisor))) .isEqualTo(0); } } public void testParseInt() { for (long a : UNSIGNED_INTS) { assertThat(UnsignedInts.parseUnsignedInt(Long.toString(a))).isEqualTo((int) a); } } public void testParseIntFail() { assertThrows( NumberFormatException.class, () -> UnsignedInts.parseUnsignedInt(Long.toString(1L << 32))); } public void testParseIntWithRadix() { for (long a : UNSIGNED_INTS) { for (int radix = Character.MIN_RADIX; radix <= Character.MAX_RADIX; radix++) { assertThat(UnsignedInts.parseUnsignedInt(Long.toString(a, radix), radix)) .isEqualTo((int) a); } } } public void testParseIntWithRadixLimits() { // loops through all legal radix values. for (int r = Character.MIN_RADIX; r <= Character.MAX_RADIX; r++) { int radix = r; // tests can successfully parse a number string with this radix. String maxAsString = Long.toString((1L << 32) - 1, radix); assertThat(UnsignedInts.parseUnsignedInt(maxAsString, radix)).isEqualTo(-1); assertThrows( NumberFormatException.class, () -> { long overflow = 1L << 32; String overflowAsString = Long.toString(overflow, radix); UnsignedInts.parseUnsignedInt(overflowAsString, radix); }); } } public void testParseIntThrowsExceptionForInvalidRadix() { // Valid radix values are Character.MIN_RADIX to Character.MAX_RADIX, // inclusive. // // Note: According to the spec, a NumberFormatException is thrown for a number that is not // parseable, but the spec doesn't seem to say which exception is thrown for an invalid radix. // In contrast to the JVM, Kotlin native throws an Illegal argument exception in this case // (which seems to make more sense). try { UnsignedInts.parseUnsignedInt("0", Character.MIN_RADIX - 1); fail(); } catch (NumberFormatException expected) { } catch (IllegalArgumentException expected) { // Kotlin native, see above } try { UnsignedInts.parseUnsignedInt("0", Character.MAX_RADIX + 1); fail(); } catch (NumberFormatException expected) { } catch (IllegalArgumentException expected) { // Kotlin native, see above } // The radix is used as an array index, so try a negative value. try { UnsignedInts.parseUnsignedInt("0", -1); fail(); } catch (NumberFormatException expected) { } catch (IllegalArgumentException expected) { // Kotlin native, see above } } public void testDecodeInt() { assertThat(UnsignedInts.decode("0xffffffff")).isEqualTo(0xffffffff); assertThat(UnsignedInts.decode("01234567")).isEqualTo(01234567); // octal assertThat(UnsignedInts.decode("#12345678")).isEqualTo(0x12345678); assertThat(UnsignedInts.decode("76543210")).isEqualTo(76543210); assertThat(UnsignedInts.decode("0x13579135")).isEqualTo(0x13579135); assertThat(UnsignedInts.decode("0X13579135")).isEqualTo(0x13579135); assertThat(UnsignedInts.decode("0")).isEqualTo(0); } public void testDecodeIntFails() { assertThrows(NumberFormatException.class, () -> UnsignedInts.decode("0xfffffffff")); assertThrows(NumberFormatException.class, () -> UnsignedInts.decode("-5")); assertThrows(NumberFormatException.class, () -> UnsignedInts.decode("-0x5")); assertThrows(NumberFormatException.class, () -> UnsignedInts.decode("-05")); } public void testToString() { int[] bases = {2, 5, 7, 8, 10, 16}; for (long a : UNSIGNED_INTS) { for (int base : bases) { assertThat(Long.toString(a, base)).isEqualTo(UnsignedInts.toString((int) a, base)); } } } public void testJoin() { assertThat(join()).isEmpty(); assertThat(join(1)).isEqualTo("1"); assertThat(join(1, 2)).isEqualTo("1,2"); assertThat(join(-1, Integer.MIN_VALUE)).isEqualTo("4294967295,2147483648"); assertThat(UnsignedInts.join("", 1, 2, 3)).isEqualTo("123"); } private static String join(int... values) { return UnsignedInts.join(",", values); } @J2ktIncompatible @GwtIncompatible // NullPointerTester public void testNulls() { new NullPointerTester().testAllPublicStaticMethods(UnsignedInts.class); } }
java
github
https://github.com/google/guava
android/guava-tests/test/com/google/common/primitives/UnsignedIntsTest.java
import re import pytest from langchain_classic.evaluation import RegexMatchStringEvaluator @pytest.fixture def regex_match_string_evaluator() -> RegexMatchStringEvaluator: """Create a RegexMatchStringEvaluator with default configuration.""" return RegexMatchStringEvaluator() @pytest.fixture def regex_match_string_evaluator_ignore_case() -> RegexMatchStringEvaluator: """Create a RegexMatchStringEvaluator with IGNORECASE flag.""" return RegexMatchStringEvaluator(flags=re.IGNORECASE) def test_default_regex_matching( regex_match_string_evaluator: RegexMatchStringEvaluator, ) -> None: prediction = "Mindy is the CTO" reference = "^Mindy.*CTO$" result = regex_match_string_evaluator.evaluate_strings( prediction=prediction, reference=reference, ) assert result["score"] == 1.0 reference = "^Mike.*CEO$" result = regex_match_string_evaluator.evaluate_strings( prediction=prediction, reference=reference, ) assert result["score"] == 0.0 def test_regex_matching_with_ignore_case( regex_match_string_evaluator_ignore_case: RegexMatchStringEvaluator, ) -> None: prediction = "Mindy is the CTO" reference = "^mindy.*cto$" result = regex_match_string_evaluator_ignore_case.evaluate_strings( prediction=prediction, reference=reference, ) assert result["score"] == 1.0
python
github
https://github.com/langchain-ai/langchain
libs/langchain/tests/unit_tests/evaluation/regex_match/test_base.py
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsXmlUtils. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Matthias Kuhn' __date__ = '18/11/2016' __copyright__ = 'Copyright 2016, The QGIS Project' import qgis # NOQA switch sip api from qgis.core import (QgsXmlUtils, QgsProperty, QgsGeometry, QgsCoordinateReferenceSystem) from qgis.PyQt.QtXml import QDomDocument from qgis.PyQt.QtGui import QColor from qgis.testing import start_app, unittest start_app() class TestQgsXmlUtils(unittest.TestCase): def test_invalid(self): """ Test that invalid attributes are correctly loaded and written """ doc = QDomDocument("properties") elem = QgsXmlUtils.writeVariant(None, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertIsNone(prop2) def test_integer(self): """ Test that maps are correctly loaded and written """ doc = QDomDocument("properties") my_properties = {'a': 1, 'b': 2, 'c': 3, 'd': -1} elem = QgsXmlUtils.writeVariant(my_properties, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(my_properties, prop2) def test_long(self): """ Test that maps are correctly loaded and written """ doc = QDomDocument("properties") # not sure if this actually does map to a long? my_properties = {'a': 9223372036854775808} elem = QgsXmlUtils.writeVariant(my_properties, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(my_properties, prop2) def test_string(self): """ Test that strings are correctly loaded and written """ doc = QDomDocument("properties") my_properties = {'a': 'a', 'b': 'b', 'c': 'something_else', 'empty': ''} elem = QgsXmlUtils.writeVariant(my_properties, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(my_properties, prop2) def test_double(self): """ Test that maps are correctly loaded and written """ doc = QDomDocument("properties") my_properties = {'a': 0.27, 'b': 1.0, 'c': 5} elem = QgsXmlUtils.writeVariant(my_properties, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(my_properties, prop2) def test_boolean(self): """ Test that maps are correctly loaded and written """ doc = QDomDocument("properties") my_properties = {'a': True, 'b': False} elem = QgsXmlUtils.writeVariant(my_properties, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(my_properties, prop2) def test_list(self): """ Test that lists are correctly loaded and written """ doc = QDomDocument("properties") my_properties = [1, 4, 'a', 'test', 7.9] elem = QgsXmlUtils.writeVariant(my_properties, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(my_properties, prop2) def test_complex(self): """ Test that maps are correctly loaded and written """ doc = QDomDocument("properties") my_properties = {'boolean': True, 'integer': False, 'map': {'a': 1}} elem = QgsXmlUtils.writeVariant(my_properties, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(my_properties, prop2) def test_property(self): """ Test that QgsProperty values are correctly loaded and written """ doc = QDomDocument("properties") prop = QgsProperty.fromValue(1001) elem = QgsXmlUtils.writeVariant(prop, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(prop, prop2) prop = QgsProperty.fromExpression('1+2=5') elem = QgsXmlUtils.writeVariant(prop, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(prop, prop2) prop = QgsProperty.fromField('oid') elem = QgsXmlUtils.writeVariant(prop, doc) prop2 = QgsXmlUtils.readVariant(elem) self.assertEqual(prop, prop2) def test_crs(self): """ Test that QgsCoordinateReferenceSystem values are correctly loaded and written """ doc = QDomDocument("properties") crs = QgsCoordinateReferenceSystem('epsg:3111') elem = QgsXmlUtils.writeVariant(crs, doc) crs2 = QgsXmlUtils.readVariant(elem) self.assertTrue(crs2.isValid()) self.assertEqual(crs2.authid(), 'EPSG:3111') crs = QgsCoordinateReferenceSystem() elem = QgsXmlUtils.writeVariant(crs, doc) crs2 = QgsXmlUtils.readVariant(elem) self.assertFalse(crs2.isValid()) def test_geom(self): """ Test that QgsGeometry values are correctly loaded and written """ doc = QDomDocument("properties") g = QgsGeometry.fromWkt('Point(3 4)') elem = QgsXmlUtils.writeVariant(g, doc) g2 = QgsXmlUtils.readVariant(elem) self.assertEqual(g2.asWkt(), 'Point (3 4)') def test_color(self): """ Test that QColor values are correctly loaded and written """ doc = QDomDocument("properties") elem = QgsXmlUtils.writeVariant(QColor(100, 200, 210), doc) c = QgsXmlUtils.readVariant(elem) self.assertEqual(c, QColor(100, 200, 210)) elem = QgsXmlUtils.writeVariant(QColor(100, 200, 210, 50), doc) c = QgsXmlUtils.readVariant(elem) self.assertEqual(c, QColor(100, 200, 210, 50)) elem = QgsXmlUtils.writeVariant(QColor(), doc) c = QgsXmlUtils.readVariant(elem) self.assertFalse(c.isValid()) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from django.conf.urls import patterns from django.conf.urls import url import openstack_dashboard.contrib.sahara.content.data_processing. \ clusters.views as views urlpatterns = patterns('', url(r'^$', views.ClustersView.as_view(), name='index'), url(r'^$', views.ClustersView.as_view(), name='clusters'), url(r'^create-cluster$', views.CreateClusterView.as_view(), name='create-cluster'), url(r'^configure-cluster$', views.ConfigureClusterView.as_view(), name='configure-cluster'), url(r'^(?P<cluster_id>[^/]+)$', views.ClusterDetailsView.as_view(), name='details'), url(r'^(?P<cluster_id>[^/]+)/scale$', views.ScaleClusterView.as_view(), name='scale'))
unknown
codeparrot/codeparrot-clean
import time from proboscis.asserts import assert_equal from proboscis.asserts import assert_not_equal from proboscis.asserts import assert_false from proboscis import SkipTest from proboscis import test from proboscis import before_class from proboscis import after_class from json import loads from modules.logger import Log from modules.amqp import AMQPWorker from modules.worker import WorkerThread, WorkerTasks from config.api1_1_config import * from config.amqp import * from on_http_api1_1 import NodesApi as Nodes from on_http_api1_1 import WorkflowApi as Workflows from tests.api.v1_1.discovery_tests import DiscoveryTests from tests.api.v1_1.poller_tests import PollerTests from tests.api.v1_1.workflows_tests import WorkflowsTests from benchmark.tests import ansible_ctl from benchmark.utils import parser from benchmark.utils.case_recorder import caseRecorder LOG = Log(__name__) class BenchmarkTests(object): def __init__(self, name): ansible_ctl.render_case_name(name) self.__data_path = ansible_ctl.get_data_path_per_case() self.case_recorder = caseRecorder(self.__data_path) self.client = config.api_client self.__node_count = 0 self.__finished = 0 self.__graph_name = None def _prepare_case_env(self): self.__node_count = self.__check_compute_count() self.case_recorder.write_interval(ansible_ctl.get_data_interval()) self.case_recorder.write_start() self.case_recorder.write_node_number(self.__node_count) assert_equal(True, ansible_ctl.start_daemon(), \ message='Failed to start data collection daemon!') def _collect_case_data(self): assert_equal(True, ansible_ctl.collect_data(), message='Failed to collect footprint data!') self.case_recorder.write_end() LOG.info('Parse log and generate html reports') try: parser.parse(self.__data_path) except RuntimeError as err: LOG.warning('Error on parsing log or generating reports: ') LOG.warning(err) def _wait_until_graph_finish(self, graph_name, timevalue): self.__graph_name = graph_name self.__task = WorkerThread(AMQPWorker(queue=QUEUE_GRAPH_FINISH, \ callbacks=[self.__handle_graph_finish]), \ graph_name) def start(worker, id): worker.start() tasks = WorkerTasks(tasks=[self.__task], func=start) tasks.run() tasks.wait_for_completion(timeout_sec=timevalue) assert_false(self.__task.timeout, \ message='timeout waiting for task {0}'.format(self.__task.id)) def __handle_graph_finish(self, body, message): routeId = message.delivery_info.get('routing_key').split('graph.finished.')[1] Workflows().workflows_get() workflows = loads(self.client.last_response.data) message.ack() for w in workflows: definition = w['definition'] injectableName = definition.get('injectableName') if injectableName == self.__graph_name: graphId = w['context'].get('graphId') if graphId == routeId: nodeid = w['context'].get('target') if nodeid == None: nodeid = w['definition']['options']['defaults'].get('nodeId','') status = body.get('status') if status == 'succeeded': self.__finished += 1 self.case_recorder.write_event('finish {0} on node {1} {2}' .format(self.__graph_name, self.__finished, nodeid)) break if self.__node_count == self.__finished: self.__task.worker.stop() self.__task.running = False self.__finished = 0 self._collect_case_data() LOG.info('Fetch {0} log finished'.format(self.__graph_name)) def __check_compute_count(self): Nodes().nodes_get() nodes = loads(self.client.last_response.data) count = 0 for n in nodes: type = n.get('type') if type == 'compute': count += 1 return count @test(groups=["benchmark.poller"]) class BenchmarkPollerTests(BenchmarkTests): def __init__(self): BenchmarkTests.__init__(self, 'poller') @test(groups=["test-bm-poller"], depends_on_groups=["test-node-poller"]) def test_poller(self): """ Wait for 15 mins to let RackHD run pollers """ self._prepare_case_env() time.sleep(900) self._collect_case_data() LOG.info('Fetch poller log finished') @test(groups=["benchmark.discovery"]) class BenchmarkDiscoveryTests(BenchmarkTests): def __init__(self): BenchmarkTests.__init__(self, 'discovery') @test(groups=["test-bm-discovery-prepare"], depends_on_groups=["test-node-poller"]) def test_prepare_discovery(self): """ Prepare discovery """ self._prepare_case_env() @test(groups=["test-bm-discovery"], depends_on_groups=["test-bm-discovery-prepare", "test_discovery_delete_node"]) def test_discovery(self): """ Wait for discovery finished """ self.case_recorder.write_event('start all discovery') self._wait_until_graph_finish('Graph.SKU.Discovery', 1200) @test(groups=["test-bm-discovery-post"], depends_on_groups=["test_discovery_add_obm"]) def test_discovery_post(self): pass @test(groups=["benchmark.bootstrap"]) class BenchmarkBootstrapTests(BenchmarkTests): def __init__(self): BenchmarkTests.__init__(self, 'bootstrap') self.__base = defaults.get('RACKHD_BASE_REPO_URL', \ 'http://{0}:{1}'.format(HOST_IP, HOST_PORT)) self.__os_repo = defaults.get('RACKHD_CENTOS_REPO_PATH', \ self.__base + '/repo/centos/7') @test(groups=["test-bm-bootstrap-prepare"], depends_on_groups=["test-node-poller"]) def test_prepare_bootstrap(self): """ Prepare bootstrap """ self._prepare_case_env() @test(groups=['test-bm-bootstrap-post-centos7'], depends_on_groups=["test-bm-bootstrap-prepare"]) def test_install_centos7(self): """ Testing CentOS 7 Installer Workflow """ self.case_recorder.write_event('start all bootstrap') body = { "options": { "defaults": { "version": "7", "repo": self.__os_repo } } } WorkflowsTests().post_workflows("Graph.InstallCentOS", nodes=[], data=body, run_now=False) @test(groups=["test-bm-bootstrap"], depends_on_groups=["test-bm-bootstrap-prepare", "test-bm-bootstrap-post-centos7"]) def test_bootstrap_centos(self): """ Wait for bootstrap finished """ self.case_recorder.write_event('start all bootstrap') self._wait_until_graph_finish('Graph.InstallCentOS', -1)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A lightweight wrapper around MySQLdb.""" import copy import MySQLdb import MySQLdb.constants import MySQLdb.converters import MySQLdb.cursors import itertools import logging class Connection(object): """A lightweight wrapper around MySQLdb DB-API connections. The main value we provide is wrapping rows in a dict/object so that columns can be accessed by name. Typical usage: db = database.Connection("localhost", "mydatabase") for article in db.query("SELECT * FROM articles"): print article.title Cursors are hidden by the implementation, but other than that, the methods are very similar to the DB-API. We explicitly set the timezone to UTC and the character encoding to UTF-8 on all connections to avoid time zone and encoding errors. """ def __init__(self, host, database, user=None, password=None): self.host = host self.database = database args = dict(conv=CONVERSIONS, use_unicode=True, charset="utf8", db=database, init_command='SET time_zone = "+0:00"', sql_mode="TRADITIONAL") if user is not None: args["user"] = user if password is not None: args["passwd"] = password # We accept a path to a MySQL socket file or a host(:port) string if "/" in host: args["unix_socket"] = host else: self.socket = None pair = host.split(":") if len(pair) == 2: args["host"] = pair[0] args["port"] = int(pair[1]) else: args["host"] = host args["port"] = 3306 self._db = None self._db_args = args try: self.reconnect() except: logging.error("Cannot connect to MySQL on %s", self.host, exc_info=True) def __del__(self): self.close() def close(self): """Closes this database connection.""" if self._db is not None: self._db.close() self._db = None def reconnect(self): """Closes the existing database connection and re-opens it.""" self.close() self._db = MySQLdb.connect(**self._db_args) self._db.autocommit(True) def iter(self, query, *parameters): """Returns an iterator for the given query and parameters.""" if self._db is None: self.reconnect() cursor = MySQLdb.cursors.SSCursor(self._db) try: self._execute(cursor, query, parameters) column_names = [d[0] for d in cursor.description] for row in cursor: yield Row(zip(column_names, row)) finally: cursor.close() def query(self, query, *parameters): """Returns a row list for the given query and parameters.""" cursor = self._cursor() try: self._execute(cursor, query, parameters) column_names = [d[0] for d in cursor.description] return [Row(itertools.izip(column_names, row)) for row in cursor] finally: cursor.close() def get(self, query, *parameters): """Returns the first row returned for the given query.""" rows = self.query(query, *parameters) if not rows: return None elif len(rows) > 1: raise Exception("Multiple rows returned for Database.get() query") else: return rows[0] def execute(self, query, *parameters): """Executes the given query, returning the lastrowid from the query.""" cursor = self._cursor() try: self._execute(cursor, query, parameters) return cursor.lastrowid finally: cursor.close() def executemany(self, query, parameters): """Executes the given query against all the given param sequences. We return the lastrowid from the query. """ cursor = self._cursor() try: cursor.executemany(query, parameters) return cursor.lastrowid finally: cursor.close() def _cursor(self): if self._db is None: self.reconnect() return self._db.cursor() def _execute(self, cursor, query, parameters): try: return cursor.execute(query, parameters) except OperationalError: logging.error("Error connecting to MySQL on %s", self.host) self.close() raise class Row(dict): """A dict that allows for object-like property access syntax.""" def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) # Fix the access conversions to properly recognize unicode/binary FIELD_TYPE = MySQLdb.constants.FIELD_TYPE FLAG = MySQLdb.constants.FLAG CONVERSIONS = copy.deepcopy(MySQLdb.converters.conversions) for field_type in \ [FIELD_TYPE.BLOB, FIELD_TYPE.STRING, FIELD_TYPE.VAR_STRING] + \ ([FIELD_TYPE.VARCHAR] if 'VARCHAR' in vars(FIELD_TYPE) else []): CONVERSIONS[field_type].insert(0, (FLAG.BINARY, str)) # Alias some common MySQL exceptions IntegrityError = MySQLdb.IntegrityError OperationalError = MySQLdb.OperationalError
unknown
codeparrot/codeparrot-clean
# validateNoDerivedComputationsInEffects ## File `src/Validation/ValidateNoDerivedComputationsInEffects.ts` ## Purpose Validates that `useEffect` is not used for derived computations that could and should be performed during render. This catches a common anti-pattern where developers use effects to synchronize derived state, which causes unnecessary re-renders and complexity. See: https://react.dev/learn/you-might-not-need-an-effect#updating-state-based-on-props-or-state ## Input Invariants - Operates on HIRFunction (pre-reactive scope inference) - Effect hooks must be identified (`isUseEffectHookType`) - setState functions must be identified (`isSetStateType`) ## Validation Rules The pass detects when an effect: 1. Has a dependency array (2nd argument) 2. The effect function only captures the dependencies and setState functions 3. The effect calls setState with a value derived solely from the dependencies 4. The effect has no control flow (loops with back edges) When detected, it produces: ``` Error: Values derived from props and state should be calculated during render, not in an effect. (https://react.dev/learn/you-might-not-need-an-effect#updating-state-based-on-props-or-state) ``` ## Algorithm 1. **Collection Phase**: Traverse all instructions to collect: - `candidateDependencies`: Map of ArrayExpression identifiers (potential deps arrays) - `functions`: Map of FunctionExpression identifiers (potential effect callbacks) - `locals`: Map of LoadLocal sources for identifier resolution 2. **Detection Phase**: When a `useEffect` call is found with 2 arguments: - Look up the effect function and dependencies array - Verify all dependency array elements are identifiers - Call `validateEffect()` on the effect function 3. **Effect Validation** (`validateEffect`): - Check that the effect only captures dependencies or setState functions - Check that all dependencies are actually used in the effect - Skip if any block has a back edge (loop) - Track data flow through instructions: - `LoadLocal`: Propagate dependency tracking - `PropertyLoad`, `BinaryExpression`, `TemplateLiteral`, `CallExpression`, `MethodCall`: Aggregate dependencies from operands - When `setState` is called with a single argument that depends on ALL effect dependencies, record the location - If any dependency is used in a terminal operand (control flow), abort validation - Push errors for all recorded setState locations ### Value Tracking The pass maintains a `values` map from `IdentifierId` to `Array<IdentifierId>` tracking which effect dependencies each value derives from. When setState is called, if the argument derives from all dependencies, it's flagged as a derived computation. ## Edge Cases ### Allowed: Effects with side effects ```javascript // Valid - effect captures external values, not just deps useEffect(() => { logToServer(firstName); setFullName(firstName); }, [firstName]); ``` ### Allowed: Effects with loops ```javascript // Valid - has control flow, not a simple derivation useEffect(() => { let result = ''; for (const item of items) { result += item; } setResult(result); }, [items]); ``` ### Allowed: Effects with conditional setState ```javascript // Valid - setState is conditional on control flow useEffect(() => { if (condition) { setFullName(firstName + lastName); } }, [firstName, lastName]); ``` ### Not detected: Subset of dependencies ```javascript // Not flagged - only uses firstName, not lastName useEffect(() => { setResult(firstName); }, [firstName, lastName]); ``` ## TODOs None in source code. ## Example ### Fixture: `error.invalid-derived-computation-in-effect.js` **Input:** ```javascript // @validateNoDerivedComputationsInEffects import {useEffect, useState} from 'react'; function BadExample() { const [firstName, setFirstName] = useState('Taylor'); const [lastName, setLastName] = useState('Swift'); // Avoid: redundant state and unnecessary Effect const [fullName, setFullName] = useState(''); useEffect(() => { setFullName(firstName + ' ' + lastName); }, [firstName, lastName]); return <div>{fullName}</div>; } ``` **Error:** ``` Found 1 error: Error: Values derived from props and state should be calculated during render, not in an effect. (https://react.dev/learn/you-might-not-need-an-effect#updating-state-based-on-props-or-state) error.invalid-derived-computation-in-effect.ts:11:4 9 | const [fullName, setFullName] = useState(''); 10 | useEffect(() => { > 11 | setFullName(firstName + ' ' + lastName); | ^^^^^^^^^^^ Values derived from props and state should be calculated during render, not in an effect. 12 | }, [firstName, lastName]); 13 | 14 | return <div>{fullName}</div>; ``` **Why it fails:** The effect computes `fullName` purely from `firstName` and `lastName` (the dependencies) and then sets state. This is a derived computation that should be calculated during render: ```javascript // Correct approach const fullName = firstName + ' ' + lastName; ```
unknown
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/docs/passes/45-validateNoDerivedComputationsInEffects.md
# -*- coding: utf-8 -*- """ tipfyext.jinja2 ~~~~~~~~~~~~~~~ Jinja2 template support for Tipfy. Learn more about Jinja2 at http://jinja.pocoo.org/2/ :copyright: 2011 by tipfy.org. :license: BSD, see LICENSE.txt for more details. """ import blinker from jinja2 import Environment, FileSystemLoader, ModuleLoader from werkzeug import cached_property, import_string from tipfy.local import get_request from tipfy.routing import url_for #: Default configuration values for this module. Keys are: #: #: templates_dir #: Directory for templates. Default is `templates`. #: #: templates_compiled_target #: Target for compiled templates. If set, uses the loader for compiled #: templates in production. If it ends with a '.zip' it will be treated #: as a zip file. Default is None. #: #: force_use_compiled #: Forces the use of compiled templates even in the development server. #: #: environment_args #: Keyword arguments used to instantiate the Jinja2 environment. By #: default autoescaping is enabled and two extensions are set: #: 'jinja2.ext.autoescape' and 'jinja2.ext.with_'. For production it may #: be a godd idea to set 'auto_reload' to False -- we don't need to check #: if templates changed after deployed. #: #: after_environment_created #: [DEPRECATED: use the environment_created hook instead] #: A function called after the environment is created. Can also be defined #: as a string to be imported dynamically. Use this to set extra filters, #: global variables, extensions etc. It is called passing the environment #: as argument. default_config = { 'templates_dir': 'templates', 'templates_compiled_target': None, 'force_use_compiled': False, 'environment_args': { 'autoescape': True, 'extensions': ['jinja2.ext.autoescape', 'jinja2.ext.with_'], }, 'after_environment_created': None, } class Jinja2(object): def __init__(self, app, _globals=None, filters=None): self.app = app config = app.config[__name__] kwargs = config['environment_args'].copy() enable_i18n = 'jinja2.ext.i18n' in kwargs.get('extensions', []) if not kwargs.get('loader'): templates_compiled_target = config['templates_compiled_target'] use_compiled = not app.debug or config['force_use_compiled'] if templates_compiled_target and use_compiled: # Use precompiled templates loaded from a module or zip. kwargs['loader'] = ModuleLoader(templates_compiled_target) else: # Parse templates for every new environment instances. kwargs['loader'] = FileSystemLoader(config['templates_dir']) # Initialize the environment. env = Environment(**kwargs) if _globals: env.globals.update(_globals) if filters: env.filters.update(filters) if enable_i18n: # Install i18n. from tipfy import i18n env.install_gettext_callables( lambda x: get_request().i18n.translations.ugettext(x), lambda s, p, n: get_request().i18n.translations.ungettext(s, p, n), newstyle=True) format_functions = { 'format_date': i18n.format_date, 'format_time': i18n.format_time, 'format_datetime': i18n.format_datetime, 'format_timedelta': i18n.format_timedelta, } env.globals.update(format_functions) env.filters.update(format_functions) env.globals['url_for'] = url_for after_creation_func = config['after_environment_created'] if after_creation_func: if isinstance(after_creation_func, basestring): after_creation_func = import_string(after_creation_func) after_creation_func(env) environment_created.send(self, environment=env) self.environment = env def render(self, _filename, **context): """Renders a template and returns a response object. :param _filename: The template filename, related to the templates directory. :param context: Keyword arguments used as variables in the rendered template. These will override values set in the request context. :returns: A rendered template. """ res = self.environment.get_template(_filename).render(**context) template_rendered.send(self, template=_filename, context=context, result=res) return res def render_template(self, _handler, _filename, **context): """Renders a template and returns a response object. :param _filename: The template filename, related to the templates directory. :param context: Keyword arguments used as variables in the rendered template. These will override values set in the request context. :returns: A rendered template. """ ctx = _handler.context.copy() ctx.update(context) return self.render(_filename, **ctx) def render_response(self, _handler, _filename, **context): """Returns a response object with a rendered template. :param _filename: The template filename, related to the templates directory. :param context: Keyword arguments used as variables in the rendered template. These will override values set in the request context. """ res = self.render_template(_handler, _filename, **context) return self.app.response_class(res) def get_template_attribute(self, filename, attribute): """Loads a macro (or variable) a template exports. This can be used to invoke a macro from within Python code. If you for example have a template named `_foo.html` with the following contents: .. sourcecode:: html+jinja {% macro hello(name) %}Hello {{ name }}!{% endmacro %} You can access this from Python code like this:: hello = get_template_attribute('_foo.html', 'hello') return hello('World') This function is borrowed from `Flask`. :param filename: The template filename. :param attribute: The name of the variable of macro to acccess. """ template = self.environment.get_template(filename) return getattr(template.module, attribute) @classmethod def factory(cls, _app, _name, **kwargs): if _name not in _app.registry: _app.registry[_name] = cls(_app, **kwargs) return _app.registry[_name] class Jinja2Mixin(object): """Mixin that adds ``render_template`` and ``render_response`` methods to a :class:`tipfy.RequestHandler`. It will use the request context to render templates. """ # The Jinja2 creator. jinja2_class = Jinja2 @cached_property def jinja2(self): return self.jinja2_class.factory(self.app, 'jinja2') def render_template(self, _filename, **context): return self.jinja2.render_template(self, _filename, **context) def render_response(self, _filename, **context): return self.jinja2.render_response(self, _filename, **context) _signals = blinker.Namespace() environment_created = _signals.signal('environment-created') template_rendered = _signals.signal('template-rendered')
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # Copyright (C) 2014 IBM Corporation and Others. All Rights Reserved. # # @author Steven R. Loomis <srl@icu-project.org> # # This tool slims down an ICU data (.dat) file according to a config file. # # See: http://bugs.icu-project.org/trac/ticket/10922 # # Usage: # Use "-h" to get help options. import sys import shutil # for utf-8 reload(sys) sys.setdefaultencoding("utf-8") import optparse import os import json import re endian=sys.byteorder parser = optparse.OptionParser(usage="usage: mkdir tmp ; %prog -D ~/Downloads/icudt53l.dat -T tmp -F trim_en.json -O icudt53l.dat" ) parser.add_option("-P","--tool-path", action="store", dest="toolpath", help="set the prefix directory for ICU tools") parser.add_option("-D","--input-file", action="store", dest="datfile", help="input data file (icudt__.dat)", ) # required parser.add_option("-F","--filter-file", action="store", dest="filterfile", help="filter file (JSON format)", ) # required parser.add_option("-T","--tmp-dir", action="store", dest="tmpdir", help="working directory.", ) # required parser.add_option("--delete-tmp", action="count", dest="deltmpdir", help="delete working directory.", default=0) parser.add_option("-O","--outfile", action="store", dest="outfile", help="outfile (NOT a full path)", ) # required parser.add_option("-v","--verbose", action="count", default=0) parser.add_option('-L',"--locales", action="store", dest="locales", help="sets the 'locales.only' variable", default=None) parser.add_option('-e', '--endian', action='store', dest='endian', help='endian, big, little or host, your default is "%s".' % endian, default=endian, metavar='endianness') (options, args) = parser.parse_args() optVars = vars(options) for opt in [ "datfile", "filterfile", "tmpdir", "outfile" ]: if optVars[opt] is None: print "Missing required option: %s" % opt sys.exit(1) if options.verbose>0: print "Options: "+str(options) if (os.path.isdir(options.tmpdir) and options.deltmpdir): if options.verbose>1: print "Deleting tmp dir %s.." % (options.tmpdir) shutil.rmtree(options.tmpdir) if not (os.path.isdir(options.tmpdir)): os.mkdir(options.tmpdir) else: print "Please delete tmpdir %s before beginning." % options.tmpdir sys.exit(1) if options.endian not in ("big","little","host"): print "Unknown endianness: %s" % options.endian sys.exit(1) if options.endian is "host": options.endian = endian if not os.path.isdir(options.tmpdir): print "Error, tmpdir not a directory: %s" % (options.tmpdir) sys.exit(1) if not os.path.isfile(options.filterfile): print "Filterfile doesn't exist: %s" % (options.filterfile) sys.exit(1) if not os.path.isfile(options.datfile): print "Datfile doesn't exist: %s" % (options.datfile) sys.exit(1) if not options.datfile.endswith(".dat"): print "Datfile doesn't end with .dat: %s" % (options.datfile) sys.exit(1) outfile = os.path.join(options.tmpdir, options.outfile) if os.path.isfile(outfile): print "Error, output file does exist: %s" % (outfile) sys.exit(1) if not options.outfile.endswith(".dat"): print "Outfile doesn't end with .dat: %s" % (options.outfile) sys.exit(1) dataname=options.outfile[0:-4] ## TODO: need to improve this. Quotes, etc. def runcmd(tool, cmd, doContinue=False): if(options.toolpath): cmd = os.path.join(options.toolpath, tool) + " " + cmd else: cmd = tool + " " + cmd if(options.verbose>4): print "# " + cmd rc = os.system(cmd) if rc is not 0 and not doContinue: print "FAILED: %s" % cmd sys.exit(1) return rc ## STEP 0 - read in json config fi= open(options.filterfile, "rb") config=json.load(fi) fi.close() if (options.locales): if not config.has_key("variables"): config["variables"] = {} if not config["variables"].has_key("locales"): config["variables"]["locales"] = {} config["variables"]["locales"]["only"] = options.locales.split(',') if (options.verbose > 6): print config if(config.has_key("comment")): print "%s: %s" % (options.filterfile, config["comment"]) ## STEP 1 - copy the data file, swapping endianness ## The first letter of endian_letter will be 'b' or 'l' for big or little endian_letter = options.endian[0] runcmd("icupkg", "-t%s %s %s""" % (endian_letter, options.datfile, outfile)) ## STEP 2 - get listing listfile = os.path.join(options.tmpdir,"icudata.lst") runcmd("icupkg", "-l %s > %s""" % (outfile, listfile)) fi = open(listfile, 'rb') items = fi.readlines() items = [items[i].strip() for i in range(len(items))] fi.close() itemset = set(items) if (options.verbose>1): print "input file: %d items" % (len(items)) # list of all trees trees = {} RES_INDX = "res_index.res" remove = None # remove - always remove these if config.has_key("remove"): remove = set(config["remove"]) else: remove = set() # keep - always keep these if config.has_key("keep"): keep = set(config["keep"]) else: keep = set() def queueForRemoval(tree): global remove if not config.has_key("trees"): # no config return if not config["trees"].has_key(tree): return mytree = trees[tree] if(options.verbose>0): print "* %s: %d items" % (tree, len(mytree["locs"])) # do varible substitution for this tree here if type(config["trees"][tree]) == str or type(config["trees"][tree]) == unicode: treeStr = config["trees"][tree] if(options.verbose>5): print " Substituting $%s for tree %s" % (treeStr, tree) if(not config.has_key("variables") or not config["variables"].has_key(treeStr)): print " ERROR: no variable: variables.%s for tree %s" % (treeStr, tree) sys.exit(1) config["trees"][tree] = config["variables"][treeStr] myconfig = config["trees"][tree] if(options.verbose>4): print " Config: %s" % (myconfig) # Process this tree if(len(myconfig)==0 or len(mytree["locs"])==0): if(options.verbose>2): print " No processing for %s - skipping" % (tree) else: only = None if myconfig.has_key("only"): only = set(myconfig["only"]) if (len(only)==0) and (mytree["treeprefix"] != ""): thePool = "%spool.res" % (mytree["treeprefix"]) if (thePool in itemset): if(options.verbose>0): print "Removing %s because tree %s is empty." % (thePool, tree) remove.add(thePool) else: print "tree %s - no ONLY" for l in range(len(mytree["locs"])): loc = mytree["locs"][l] if (only is not None) and not loc in only: # REMOVE loc toRemove = "%s%s%s" % (mytree["treeprefix"], loc, mytree["extension"]) if(options.verbose>6): print "Queueing for removal: %s" % toRemove remove.add(toRemove) def addTreeByType(tree, mytree): if(options.verbose>1): print "(considering %s): %s" % (tree, mytree) trees[tree] = mytree mytree["locs"]=[] for i in range(len(items)): item = items[i] if item.startswith(mytree["treeprefix"]) and item.endswith(mytree["extension"]): mytree["locs"].append(item[len(mytree["treeprefix"]):-4]) # now, process queueForRemoval(tree) addTreeByType("converters",{"treeprefix":"", "extension":".cnv"}) addTreeByType("stringprep",{"treeprefix":"", "extension":".spp"}) addTreeByType("translit",{"treeprefix":"translit/", "extension":".res"}) addTreeByType("brkfiles",{"treeprefix":"brkitr/", "extension":".brk"}) addTreeByType("brkdict",{"treeprefix":"brkitr/", "extension":"dict"}) addTreeByType("confusables",{"treeprefix":"", "extension":".cfu"}) for i in range(len(items)): item = items[i] if item.endswith(RES_INDX): treeprefix = item[0:item.rindex(RES_INDX)] tree = None if treeprefix == "": tree = "ROOT" else: tree = treeprefix[0:-1] if(options.verbose>6): print "procesing %s" % (tree) trees[tree] = { "extension": ".res", "treeprefix": treeprefix, "hasIndex": True } # read in the resource list for the tree treelistfile = os.path.join(options.tmpdir,"%s.lst" % tree) runcmd("iculslocs", "-i %s -N %s -T %s -l > %s" % (outfile, dataname, tree, treelistfile)) fi = open(treelistfile, 'rb') treeitems = fi.readlines() trees[tree]["locs"] = [treeitems[i].strip() for i in range(len(treeitems))] fi.close() if(not config.has_key("trees") or not config["trees"].has_key(tree)): print " Warning: filter file %s does not mention trees.%s - will be kept as-is" % (options.filterfile, tree) else: queueForRemoval(tree) def removeList(count=0): # don't allow "keep" items to creep in here. global remove remove = remove - keep if(count > 10): print "Giving up - %dth attempt at removal." % count sys.exit(1) if(options.verbose>1): print "%d items to remove - try #%d" % (len(remove),count) if(len(remove)>0): oldcount = len(remove) hackerrfile=os.path.join(options.tmpdir, "REMOVE.err") removefile = os.path.join(options.tmpdir, "REMOVE.lst") fi = open(removefile, 'wb') for i in remove: print >>fi, i fi.close() rc = runcmd("icupkg","-r %s %s 2> %s" % (removefile,outfile,hackerrfile),True) if rc is not 0: if(options.verbose>5): print "## Damage control, trying to parse stderr from icupkg.." fi = open(hackerrfile, 'rb') erritems = fi.readlines() fi.close() #Item zone/zh_Hant_TW.res depends on missing item zone/zh_Hant.res pat = re.compile("""^Item ([^ ]+) depends on missing item ([^ ]+).*""") for i in range(len(erritems)): line = erritems[i].strip() m = pat.match(line) if m: toDelete = m.group(1) if(options.verbose > 5): print "<< %s added to delete" % toDelete remove.add(toDelete) else: print "ERROR: could not match errline: %s" % line sys.exit(1) if(options.verbose > 5): print " now %d items to remove" % len(remove) if(oldcount == len(remove)): print " ERROR: could not add any mor eitems to remove. Fail." sys.exit(1) removeList(count+1) # fire it up removeList(1) # now, fixup res_index, one at a time for tree in trees: # skip trees that don't have res_index if not trees[tree].has_key("hasIndex"): continue treebunddir = options.tmpdir if(trees[tree]["treeprefix"]): treebunddir = os.path.join(treebunddir, trees[tree]["treeprefix"]) if not (os.path.isdir(treebunddir)): os.mkdir(treebunddir) treebundres = os.path.join(treebunddir,RES_INDX) treebundtxt = "%s.txt" % (treebundres[0:-4]) runcmd("iculslocs", "-i %s -N %s -T %s -b %s" % (outfile, dataname, tree, treebundtxt)) runcmd("genrb","-d %s -s %s res_index.txt" % (treebunddir, treebunddir)) runcmd("icupkg","-s %s -a %s%s %s" % (options.tmpdir, trees[tree]["treeprefix"], RES_INDX, outfile))
unknown
codeparrot/codeparrot-clean
#!/bin/bash set -euo pipefail curl_with_retry() { local output_path="$1" local url="$2" echo "Downloading $(basename "${output_path}")" curl --fail --location --show-error --silent \ --retry 5 --retry-all-errors --retry-delay 2 --retry-max-time 120 \ -o "${output_path}" "${url}" } # Fetch beats artifacts export ES_VERSION=$(grep 'elasticsearch' build-tools-internal/version.properties | awk '{print $3}') export BEATS_DIR=$(pwd)/distribution/docker/build/artifacts/beats mkdir -p ${BEATS_DIR} curl_with_retry "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-x86_64.tar.gz" "https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz" curl_with_retry "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-arm64.tar.gz" "https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-arm64.tar.gz" curl_with_retry "${BEATS_DIR}/metricbeat-fips-${ES_VERSION}-linux-x86_64.tar.gz" "https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-fips-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz" curl_with_retry "${BEATS_DIR}/metricbeat-fips-${ES_VERSION}-linux-arm64.tar.gz" "https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-fips-${ES_VERSION}-SNAPSHOT-linux-arm64.tar.gz" curl_with_retry "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" "https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz" curl_with_retry "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-arm64.tar.gz" "https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-arm64.tar.gz" curl_with_retry "${BEATS_DIR}/filebeat-fips-${ES_VERSION}-linux-x86_64.tar.gz" "https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/filebeat/filebeat-fips-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz" curl_with_retry "${BEATS_DIR}/filebeat-fips-${ES_VERSION}-linux-arm64.tar.gz" "https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/filebeat/filebeat-fips-${ES_VERSION}-SNAPSHOT-linux-arm64.tar.gz" # Fetch ML artifacts export ML_IVY_REPO=$(mktemp -d) mkdir -p ${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION} curl_with_retry "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/ml-cpp-${ES_VERSION}-deps.zip" "https://artifacts-snapshot.elastic.co/ml-cpp/${ES_VERSION}-SNAPSHOT/downloads/ml-cpp/ml-cpp-${ES_VERSION}-SNAPSHOT-deps.zip" curl_with_retry "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/ml-cpp-${ES_VERSION}-nodeps.zip" "https://artifacts-snapshot.elastic.co/ml-cpp/${ES_VERSION}-SNAPSHOT/downloads/ml-cpp/ml-cpp-${ES_VERSION}-SNAPSHOT-nodeps.zip" curl_with_retry "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/ml-cpp-${ES_VERSION}.zip" "https://artifacts-snapshot.elastic.co/ml-cpp/${ES_VERSION}-SNAPSHOT/downloads/ml-cpp/ml-cpp-${ES_VERSION}-SNAPSHOT.zip" .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dbuild.snapshot=false -Dbuild.ml_cpp.repo=file://${ML_IVY_REPO} \ -Dtests.jvm.argline=-Dbuild.snapshot=false -Dlicense.key=${WORKSPACE}/x-pack/license-tools/src/test/resources/public.key -Dbuild.id=deadbeef ${@:-functionalTests}
unknown
github
https://github.com/elastic/elasticsearch
.buildkite/scripts/release-tests.sh
from __future__ import unicode_literals from django.contrib.auth.views import redirect_to_login from django.core.exceptions import MiddlewareNotUsed from django.http import HttpResponse, Http404 from mezzanine.conf import settings from mezzanine.pages import context_processors, page_processors from mezzanine.pages.models import Page from mezzanine.pages.views import page as page_view from mezzanine.utils.conf import middlewares_or_subclasses_installed from mezzanine.utils.deprecation import (MiddlewareMixin, is_authenticated) from mezzanine.utils.urls import path_to_slug class PageMiddleware(MiddlewareMixin): """ Adds a page to the template context for the current response. If no page matches the URL, and the view function is not the fall-back page view, we try and find the page with the deepest URL that matches within the current URL, as in this situation, the app's urlpattern is considered to sit "under" a given page, for example the blog page will be used when individual blog posts are viewed. We want the page for things like breadcrumb nav, and page processors, but most importantly so the page's ``login_required`` flag can be honoured. If a page is matched, and the fall-back page view is called, we add the page to the ``extra_context`` arg of the page view, which it can then use to choose which template to use. In either case, we add the page to the response's template context, so that the current page is always available. """ def __init__(self, *args, **kwargs): super(PageMiddleware, self).__init__(*args, **kwargs) if "mezzanine.pages" not in settings.INSTALLED_APPS: raise MiddlewareNotUsed @classmethod def installed(cls): """ Used in ``mezzanine.pages.views.page`` to ensure ``PageMiddleware`` or a subclass has been installed. We cache the result on the ``PageMiddleware._installed`` to only run this once. """ try: return cls._installed except AttributeError: name = "mezzanine.pages.middleware.PageMiddleware" installed = middlewares_or_subclasses_installed([name]) setattr(cls, "_installed", installed) return installed def process_view(self, request, view_func, view_args, view_kwargs): """ Per-request mechanics for the current page object. """ # Load the closest matching page by slug, and assign it to the # request object. If none found, skip all further processing. slug = path_to_slug(request.path_info) pages = Page.objects.with_ascendants_for_slug(slug, for_user=request.user, include_login_required=True) if pages: page = pages[0] setattr(request, "page", page) context_processors.page(request) else: return # Handle ``page.login_required``. if page.login_required and not is_authenticated(request.user): return redirect_to_login(request.get_full_path()) # If the view isn't Mezzanine's page view, try to return the result # immediately. In the case of a 404 with an URL slug that matches a # page exactly, swallow the exception and try Mezzanine's page view. # # This allows us to set up pages with URLs that also match non-page # urlpatterns. For example, a page could be created with the URL # /blog/about/, which would match the blog urlpattern, and assuming # there wasn't a blog post with the slug "about", would raise a 404 # and subsequently be rendered by Mezzanine's page view. if view_func != page_view: try: return view_func(request, *view_args, **view_kwargs) except Http404: if page.slug != slug: raise # Run page processors. extra_context = {} if request.resolver_match: extra_context = request.resolver_match.kwargs.get("extra_context", {}) model_processors = page_processors.processors[page.content_model] slug_processors = page_processors.processors["slug:%s" % page.slug] for (processor, exact_page) in slug_processors + model_processors: if exact_page and not page.is_current: continue processor_response = processor(request, page) if isinstance(processor_response, HttpResponse): return processor_response elif processor_response: try: for k, v in processor_response.items(): if k not in extra_context: extra_context[k] = v except (TypeError, ValueError): name = "%s.%s" % (processor.__module__, processor.__name__) error = ("The page processor %s returned %s but must " "return HttpResponse or dict." % (name, type(processor_response))) raise ValueError(error) return page_view(request, slug, extra_context=extra_context)
unknown
codeparrot/codeparrot-clean