code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import pytest import re from ansible import context from ansible.cli.adhoc import AdHocCLI, display from ansible.errors import AnsibleOptionsError def test_parse(): """ Test adhoc parse""" with pytest.raises(ValueError, match='A non-empty list for args is required'): adhoc_cli = AdHocCLI([]) adhoc_cli = AdHocCLI(['ansibletest']) with pytest.raises(SystemExit): adhoc_cli.parse() def test_with_command(): """ Test simple adhoc command""" module_name = 'command' adhoc_cli = AdHocCLI(args=['ansible', '-m', module_name, '-vv', 'localhost']) adhoc_cli.parse() assert context.CLIARGS['module_name'] == module_name assert display.verbosity == 2 def test_simple_command(): """ Test valid command and its run""" adhoc_cli = AdHocCLI(['/bin/ansible', '-m', 'command', 'localhost', '-a', 'echo "hi"']) adhoc_cli.parse() ret = adhoc_cli.run() assert ret == 0 def test_no_argument(): """ Test no argument command""" adhoc_cli = AdHocCLI(['/bin/ansible', '-m', 'command', 'localhost']) adhoc_cli.parse() with pytest.raises(AnsibleOptionsError) as exec_info: adhoc_cli.run() assert 'No argument passed to command module' == str(exec_info.value) def test_did_you_mean_playbook(): """ Test adhoc with yml file as argument parameter""" adhoc_cli = AdHocCLI(['/bin/ansible', '-m', 'command', 'localhost.yml']) adhoc_cli.parse() with pytest.raises(AnsibleOptionsError) as exec_info: adhoc_cli.run() assert 'No argument passed to command module (did you mean to run ansible-playbook?)' == str(exec_info.value) def test_play_ds_positive(): """ Test _play_ds""" adhoc_cli = AdHocCLI(args=['/bin/ansible', 'localhost', '-m', 'command']) adhoc_cli.parse() ret = adhoc_cli._play_ds('command', 10, 2) assert ret['name'] == 'Ansible Ad-Hoc' assert ret['tasks'] == [{'action': {'module': 'command', 'args': {}}, 'async_val': 10, 'poll': 2}] def test_play_ds_with_include_role(): """ Test include_role command with poll""" adhoc_cli = AdHocCLI(args=['/bin/ansible', 'localhost', '-m', 'include_role']) adhoc_cli.parse() ret = adhoc_cli._play_ds('include_role', None, 2) assert ret['name'] == 'Ansible Ad-Hoc' assert ret['gather_facts'] == 'no' def test_run_import_playbook(): """ Test import_playbook which is not allowed with ad-hoc command""" import_playbook = 'import_playbook' adhoc_cli = AdHocCLI(args=['/bin/ansible', '-m', import_playbook, 'localhost']) adhoc_cli.parse() with pytest.raises(AnsibleOptionsError) as exec_info: adhoc_cli.run() assert context.CLIARGS['module_name'] == import_playbook assert "'%s' is not a valid action for ad-hoc commands" % import_playbook == str(exec_info.value) def test_run_no_extra_vars(): adhoc_cli = AdHocCLI(args=['/bin/ansible', 'localhost', '-e']) with pytest.raises(SystemExit) as exec_info: adhoc_cli.parse() assert exec_info.value.code == 2 def test_ansible_version(capsys, mocker): adhoc_cli = AdHocCLI(args=['/bin/ansible', '--version']) with pytest.raises(SystemExit): adhoc_cli.run() version = capsys.readouterr() try: version_lines = version.out.splitlines() except AttributeError: # Python 2.6 does return a named tuple, so get the first item version_lines = version[0].splitlines() assert len(version_lines) == 6, 'Incorrect number of lines in "ansible --version" output' assert re.match('ansible [0-9.a-z]+$', version_lines[0]), 'Incorrect ansible version line in "ansible --version" output' assert re.match(' config file = .*$', version_lines[1]), 'Incorrect config file line in "ansible --version" output' assert re.match(' configured module search path = .*$', version_lines[2]), 'Incorrect module search path in "ansible --version" output' assert re.match(' ansible python module location = .*$', version_lines[3]), 'Incorrect python module location in "ansible --version" output' assert re.match(' executable location = .*$', version_lines[4]), 'Incorrect executable locaction in "ansible --version" output' assert re.match(' python version = .*$', version_lines[5]), 'Incorrect python version in "ansible --version" output'
unknown
codeparrot/codeparrot-clean
import json from django import template from django.template.defaultfilters import stringfilter from django.utils.safestring import mark_safe from django.utils.text import normalize_newlines from ..models import LogRecord from ..settings import EXTRA_DATA_INDENT, PAGINATOR_RANGE register = template.Library() @register.simple_tag def level_css_class(level): return { LogRecord.LEVEL.NOTSET: 'label-default', LogRecord.LEVEL.DEBUG: 'label-success', LogRecord.LEVEL.INFO: 'label-info', LogRecord.LEVEL.WARNING: 'label-warning', LogRecord.LEVEL.ERROR: 'label-primary', LogRecord.LEVEL.CRITICAL: 'label-danger' }[int(level)] @register.simple_tag def extra_data(record): return json.dumps(record.extra, indent=EXTRA_DATA_INDENT) @register.inclusion_tag('log4django/bootstrap/templatetags/pagination.html', takes_context=True) def pagination(context, page): if PAGINATOR_RANGE > page.paginator.num_pages: range_length = page.paginator.num_pages else: range_length = PAGINATOR_RANGE range_length -= 1 range_min = max(page.number - (range_length / 2), 1) range_max = min(page.number + (range_length / 2), page.paginator.num_pages) range_diff = range_max - range_min if range_diff < range_length: shift = range_length - range_diff if range_min - shift > 0: range_min -= shift else: range_max += shift page_range = range(range_min, range_max + 1) getvars = context['request'].GET.copy() getvars.pop('page', None) return dict( page=page, page_range=page_range, getvars=getvars ) @register.filter @stringfilter def remove_newlines(text): normalized_text = normalize_newlines(text) return mark_safe(normalized_text.replace('\n', ' ')) remove_newlines.is_safe = True
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from openerp.tests import common KARMA = { 'ask': 5, 'ans': 10, 'com_own': 5, 'com_all': 10, 'com_conv_all': 50, 'upv': 5, 'dwv': 10, 'edit_own': 10, 'edit_all': 20, 'close_own': 10, 'close_all': 20, 'unlink_own': 10, 'unlink_all': 20, 'gen_que_new': 1, 'gen_que_upv': 5, 'gen_que_dwv': -10, 'gen_ans_upv': 10, 'gen_ans_dwv': -20, } class TestForumCommon(common.SavepointCase): @classmethod def setUpClass(cls): super(TestForumCommon, cls).setUpClass() Forum = cls.env['forum.forum'] Post = cls.env['forum.post'] # Test users TestUsersEnv = cls.env['res.users'].with_context({'no_reset_password': True}) group_employee_id = cls.env.ref('base.group_user').id group_portal_id = cls.env.ref('base.group_portal').id group_public_id = cls.env.ref('base.group_public').id cls.user_employee = TestUsersEnv.create({ 'name': 'Armande Employee', 'login': 'Armande', 'alias_name': 'armande', 'email': 'armande.employee@example.com', 'karma': 0, 'groups_id': [(6, 0, [group_employee_id])] }) cls.user_portal = TestUsersEnv.create({ 'name': 'Beatrice Portal', 'login': 'Beatrice', 'alias_name': 'beatrice', 'email': 'beatrice.employee@example.com', 'karma': 0, 'groups_id': [(6, 0, [group_portal_id])] }) cls.user_public = TestUsersEnv.create({ 'name': 'Cedric Public', 'login': 'Cedric', 'alias_name': 'cedric', 'email': 'cedric.employee@example.com', 'karma': 0, 'groups_id': [(6, 0, [group_public_id])] }) # Test forum cls.forum = Forum.create({ 'name': 'TestForum', 'karma_ask': KARMA['ask'], 'karma_answer': KARMA['ans'], 'karma_comment_own': KARMA['com_own'], 'karma_comment_all': KARMA['com_all'], 'karma_answer_accept_own': 9999, 'karma_answer_accept_all': 9999, 'karma_upvote': KARMA['upv'], 'karma_downvote': KARMA['dwv'], 'karma_edit_own': KARMA['edit_own'], 'karma_edit_all': KARMA['edit_all'], 'karma_close_own': KARMA['close_own'], 'karma_close_all': KARMA['close_all'], 'karma_unlink_own': KARMA['unlink_own'], 'karma_unlink_all': KARMA['unlink_all'], 'karma_comment_convert_all': KARMA['com_conv_all'], 'karma_gen_question_new': KARMA['gen_que_new'], 'karma_gen_question_upvote': KARMA['gen_que_upv'], 'karma_gen_question_downvote': KARMA['gen_que_dwv'], 'karma_gen_answer_upvote': KARMA['gen_ans_upv'], 'karma_gen_answer_downvote': KARMA['gen_ans_dwv'], 'karma_gen_answer_accept': 9999, 'karma_gen_answer_accepted': 9999, }) cls.post = Post.create({ 'name': 'TestQuestion', 'content': 'I am not a bird.', 'forum_id': cls.forum.id, 'tag_ids': [(0, 0, {'name': 'Tag0', 'forum_id': cls.forum.id})] }) cls.answer = Post.create({ 'name': 'TestAnswer', 'content': 'I am an anteater.', 'forum_id': cls.forum.id, 'parent_id': cls.post.id, })
unknown
codeparrot/codeparrot-clean
// SPDX-License-Identifier: GPL-2.0 /* * device_cgroup.c - device cgroup subsystem * * Copyright 2007 IBM Corp */ #include <linux/bpf-cgroup.h> #include <linux/device_cgroup.h> #include <linux/cgroup.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/uaccess.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/mutex.h> #ifdef CONFIG_CGROUP_DEVICE static DEFINE_MUTEX(devcgroup_mutex); enum devcg_behavior { DEVCG_DEFAULT_NONE, DEVCG_DEFAULT_ALLOW, DEVCG_DEFAULT_DENY, }; /* * exception list locking rules: * hold devcgroup_mutex for update/read. * hold rcu_read_lock() for read. */ struct dev_exception_item { u32 major, minor; short type; short access; struct list_head list; struct rcu_head rcu; }; struct dev_cgroup { struct cgroup_subsys_state css; struct list_head exceptions; enum devcg_behavior behavior; }; static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) { return s ? container_of(s, struct dev_cgroup, css) : NULL; } static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) { return css_to_devcgroup(task_css(task, devices_cgrp_id)); } /* * called under devcgroup_mutex */ static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig) { struct dev_exception_item *ex, *tmp, *new; lockdep_assert_held(&devcgroup_mutex); list_for_each_entry(ex, orig, list) { new = kmemdup(ex, sizeof(*ex), GFP_KERNEL); if (!new) goto free_and_exit; list_add_tail(&new->list, dest); } return 0; free_and_exit: list_for_each_entry_safe(ex, tmp, dest, list) { list_del(&ex->list); kfree(ex); } return -ENOMEM; } static void dev_exceptions_move(struct list_head *dest, struct list_head *orig) { struct dev_exception_item *ex, *tmp; lockdep_assert_held(&devcgroup_mutex); list_for_each_entry_safe(ex, tmp, orig, list) { list_move_tail(&ex->list, dest); } } /* * called under devcgroup_mutex */ static int dev_exception_add(struct dev_cgroup *dev_cgroup, struct dev_exception_item *ex) { struct dev_exception_item *excopy, *walk; lockdep_assert_held(&devcgroup_mutex); excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL); if (!excopy) return -ENOMEM; list_for_each_entry(walk, &dev_cgroup->exceptions, list) { if (walk->type != ex->type) continue; if (walk->major != ex->major) continue; if (walk->minor != ex->minor) continue; walk->access |= ex->access; kfree(excopy); excopy = NULL; } if (excopy != NULL) list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions); return 0; } /* * called under devcgroup_mutex */ static void dev_exception_rm(struct dev_cgroup *dev_cgroup, struct dev_exception_item *ex) { struct dev_exception_item *walk, *tmp; lockdep_assert_held(&devcgroup_mutex); list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) { if (walk->type != ex->type) continue; if (walk->major != ex->major) continue; if (walk->minor != ex->minor) continue; walk->access &= ~ex->access; if (!walk->access) { list_del_rcu(&walk->list); kfree_rcu(walk, rcu); } } } static void __dev_exception_clean(struct dev_cgroup *dev_cgroup) { struct dev_exception_item *ex, *tmp; list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) { list_del_rcu(&ex->list); kfree_rcu(ex, rcu); } } /** * dev_exception_clean - frees all entries of the exception list * @dev_cgroup: dev_cgroup with the exception list to be cleaned * * called under devcgroup_mutex */ static void dev_exception_clean(struct dev_cgroup *dev_cgroup) { lockdep_assert_held(&devcgroup_mutex); __dev_exception_clean(dev_cgroup); } static inline bool is_devcg_online(const struct dev_cgroup *devcg) { return (devcg->behavior != DEVCG_DEFAULT_NONE); } /** * devcgroup_online - initializes devcgroup's behavior and exceptions based on * parent's * @css: css getting online * returns 0 in case of success, error code otherwise */ static int devcgroup_online(struct cgroup_subsys_state *css) { struct dev_cgroup *dev_cgroup = css_to_devcgroup(css); struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent); int ret = 0; mutex_lock(&devcgroup_mutex); if (parent_dev_cgroup == NULL) dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW; else { ret = dev_exceptions_copy(&dev_cgroup->exceptions, &parent_dev_cgroup->exceptions); if (!ret) dev_cgroup->behavior = parent_dev_cgroup->behavior; } mutex_unlock(&devcgroup_mutex); return ret; } static void devcgroup_offline(struct cgroup_subsys_state *css) { struct dev_cgroup *dev_cgroup = css_to_devcgroup(css); mutex_lock(&devcgroup_mutex); dev_cgroup->behavior = DEVCG_DEFAULT_NONE; mutex_unlock(&devcgroup_mutex); } /* * called from kernel/cgroup/cgroup.c with cgroup_lock() held. */ static struct cgroup_subsys_state * devcgroup_css_alloc(struct cgroup_subsys_state *parent_css) { struct dev_cgroup *dev_cgroup; dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL); if (!dev_cgroup) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&dev_cgroup->exceptions); dev_cgroup->behavior = DEVCG_DEFAULT_NONE; return &dev_cgroup->css; } static void devcgroup_css_free(struct cgroup_subsys_state *css) { struct dev_cgroup *dev_cgroup = css_to_devcgroup(css); __dev_exception_clean(dev_cgroup); kfree(dev_cgroup); } #define DEVCG_ALLOW 1 #define DEVCG_DENY 2 #define DEVCG_LIST 3 static void seq_putaccess(struct seq_file *m, short access) { if (access & DEVCG_ACC_READ) seq_putc(m, 'r'); if (access & DEVCG_ACC_WRITE) seq_putc(m, 'w'); if (access & DEVCG_ACC_MKNOD) seq_putc(m, 'm'); } static void seq_puttype(struct seq_file *m, short type) { if (type == DEVCG_DEV_ALL) seq_putc(m, 'a'); else if (type == DEVCG_DEV_CHAR) seq_putc(m, 'c'); else if (type == DEVCG_DEV_BLOCK) seq_putc(m, 'b'); else seq_putc(m, 'X'); } static void seq_putversion(struct seq_file *m, unsigned int version) { if (version == ~0) seq_putc(m, '*'); else seq_printf(m, "%u", version); } static int devcgroup_seq_show(struct seq_file *m, void *v) { struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m)); struct dev_exception_item *ex; rcu_read_lock(); /* * To preserve the compatibility: * - Only show the "all devices" when the default policy is to allow * - List the exceptions in case the default policy is to deny * This way, the file remains as a "whitelist of devices" */ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { seq_puts(m, "a *:* rwm\n"); } else { list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) { seq_puttype(m, ex->type); seq_putc(m, ' '); seq_putversion(m, ex->major); seq_putc(m, ':'); seq_putversion(m, ex->minor); seq_putc(m, ' '); seq_putaccess(m, ex->access); seq_putc(m, '\n'); } } rcu_read_unlock(); return 0; } /** * match_exception - iterates the exception list trying to find a complete match * @exceptions: list of exceptions * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR) * @major: device file major number, ~0 to match all * @minor: device file minor number, ~0 to match all * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD) * * It is considered a complete match if an exception is found that will * contain the entire range of provided parameters. * * Return: true in case it matches an exception completely */ static bool match_exception(struct list_head *exceptions, short type, u32 major, u32 minor, short access) { struct dev_exception_item *ex; list_for_each_entry_rcu(ex, exceptions, list) { if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK)) continue; if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR)) continue; if (ex->major != ~0 && ex->major != major) continue; if (ex->minor != ~0 && ex->minor != minor) continue; /* provided access cannot have more than the exception rule */ if (access & (~ex->access)) continue; return true; } return false; } /** * match_exception_partial - iterates the exception list trying to find a partial match * @exceptions: list of exceptions * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR) * @major: device file major number, ~0 to match all * @minor: device file minor number, ~0 to match all * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD) * * It is considered a partial match if an exception's range is found to * contain *any* of the devices specified by provided parameters. This is * used to make sure no extra access is being granted that is forbidden by * any of the exception list. * * Return: true in case the provided range mat matches an exception completely */ static bool match_exception_partial(struct list_head *exceptions, short type, u32 major, u32 minor, short access) { struct dev_exception_item *ex; list_for_each_entry_rcu(ex, exceptions, list, lockdep_is_held(&devcgroup_mutex)) { if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK)) continue; if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR)) continue; /* * We must be sure that both the exception and the provided * range aren't masking all devices */ if (ex->major != ~0 && major != ~0 && ex->major != major) continue; if (ex->minor != ~0 && minor != ~0 && ex->minor != minor) continue; /* * In order to make sure the provided range isn't matching * an exception, all its access bits shouldn't match the * exception's access bits */ if (!(access & ex->access)) continue; return true; } return false; } /** * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions * @dev_cgroup: dev cgroup to be tested against * @refex: new exception * @behavior: behavior of the exception's dev_cgroup * * This is used to make sure a child cgroup won't have more privileges * than its parent */ static bool verify_new_ex(struct dev_cgroup *dev_cgroup, struct dev_exception_item *refex, enum devcg_behavior behavior) { bool match = false; RCU_LOCKDEP_WARN(!rcu_read_lock_held() && !lockdep_is_held(&devcgroup_mutex), "device_cgroup:verify_new_ex called without proper synchronization"); if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { if (behavior == DEVCG_DEFAULT_ALLOW) { /* * new exception in the child doesn't matter, only * adding extra restrictions */ return true; } else { /* * new exception in the child will add more devices * that can be accessed, so it can't match any of * parent's exceptions, even slightly */ match = match_exception_partial(&dev_cgroup->exceptions, refex->type, refex->major, refex->minor, refex->access); if (match) return false; return true; } } else { /* * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore * the new exception will add access to more devices and must * be contained completely in an parent's exception to be * allowed */ match = match_exception(&dev_cgroup->exceptions, refex->type, refex->major, refex->minor, refex->access); if (match) /* parent has an exception that matches the proposed */ return true; else return false; } return false; } /* * parent_has_perm: * when adding a new allow rule to a device exception list, the rule * must be allowed in the parent device */ static int parent_has_perm(struct dev_cgroup *childcg, struct dev_exception_item *ex) { struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent); if (!parent) return 1; return verify_new_ex(parent, ex, childcg->behavior); } /** * parent_allows_removal - verify if it's ok to remove an exception * @childcg: child cgroup from where the exception will be removed * @ex: exception being removed * * When removing an exception in cgroups with default ALLOW policy, it must * be checked if removing it will give the child cgroup more access than the * parent. * * Return: true if it's ok to remove exception, false otherwise */ static bool parent_allows_removal(struct dev_cgroup *childcg, struct dev_exception_item *ex) { struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent); if (!parent) return true; /* It's always allowed to remove access to devices */ if (childcg->behavior == DEVCG_DEFAULT_DENY) return true; /* * Make sure you're not removing part or a whole exception existing in * the parent cgroup */ return !match_exception_partial(&parent->exceptions, ex->type, ex->major, ex->minor, ex->access); } /** * may_allow_all - checks if it's possible to change the behavior to * allow based on parent's rules. * @parent: device cgroup's parent * returns: != 0 in case it's allowed, 0 otherwise */ static inline int may_allow_all(struct dev_cgroup *parent) { if (!parent) return 1; return parent->behavior == DEVCG_DEFAULT_ALLOW; } /** * revalidate_active_exceptions - walks through the active exception list and * revalidates the exceptions based on parent's * behavior and exceptions. The exceptions that * are no longer valid will be removed. * Called with devcgroup_mutex held. * @devcg: cgroup which exceptions will be checked * * This is one of the three key functions for hierarchy implementation. * This function is responsible for re-evaluating all the cgroup's active * exceptions due to a parent's exception change. * Refer to Documentation/admin-guide/cgroup-v1/devices.rst for more details. */ static void revalidate_active_exceptions(struct dev_cgroup *devcg) { struct dev_exception_item *ex; struct list_head *this, *tmp; list_for_each_safe(this, tmp, &devcg->exceptions) { ex = container_of(this, struct dev_exception_item, list); if (!parent_has_perm(devcg, ex)) dev_exception_rm(devcg, ex); } } /** * propagate_exception - propagates a new exception to the children * @devcg_root: device cgroup that added a new exception * @ex: new exception to be propagated * * returns: 0 in case of success, != 0 in case of error */ static int propagate_exception(struct dev_cgroup *devcg_root, struct dev_exception_item *ex) { struct cgroup_subsys_state *pos; int rc = 0; rcu_read_lock(); css_for_each_descendant_pre(pos, &devcg_root->css) { struct dev_cgroup *devcg = css_to_devcgroup(pos); /* * Because devcgroup_mutex is held, no devcg will become * online or offline during the tree walk (see on/offline * methods), and online ones are safe to access outside RCU * read lock without bumping refcnt. */ if (pos == &devcg_root->css || !is_devcg_online(devcg)) continue; rcu_read_unlock(); /* * in case both root's behavior and devcg is allow, a new * restriction means adding to the exception list */ if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW && devcg->behavior == DEVCG_DEFAULT_ALLOW) { rc = dev_exception_add(devcg, ex); if (rc) return rc; } else { /* * in the other possible cases: * root's behavior: allow, devcg's: deny * root's behavior: deny, devcg's: deny * the exception will be removed */ dev_exception_rm(devcg, ex); } revalidate_active_exceptions(devcg); rcu_read_lock(); } rcu_read_unlock(); return rc; } /* * Modify the exception list using allow/deny rules. * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD * so we can give a container CAP_MKNOD to let it create devices but not * modify the exception list. * It seems likely we'll want to add a CAP_CONTAINER capability to allow * us to also grant CAP_SYS_ADMIN to containers without giving away the * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN * * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting * new access is only allowed if you're in the top-level cgroup, or your * parent cgroup has the access you're asking for. */ static int devcgroup_update_access(struct dev_cgroup *devcgroup, int filetype, char *buffer) { const char *b; char temp[12]; /* 11 + 1 characters needed for a u32 */ int count, rc = 0; struct dev_exception_item ex; struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent); struct dev_cgroup tmp_devcgrp; if (!capable(CAP_SYS_ADMIN)) return -EPERM; memset(&ex, 0, sizeof(ex)); memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp)); b = buffer; switch (*b) { case 'a': switch (filetype) { case DEVCG_ALLOW: if (css_has_online_children(&devcgroup->css)) return -EINVAL; if (!may_allow_all(parent)) return -EPERM; if (!parent) { devcgroup->behavior = DEVCG_DEFAULT_ALLOW; dev_exception_clean(devcgroup); break; } INIT_LIST_HEAD(&tmp_devcgrp.exceptions); rc = dev_exceptions_copy(&tmp_devcgrp.exceptions, &devcgroup->exceptions); if (rc) return rc; dev_exception_clean(devcgroup); rc = dev_exceptions_copy(&devcgroup->exceptions, &parent->exceptions); if (rc) { dev_exceptions_move(&devcgroup->exceptions, &tmp_devcgrp.exceptions); return rc; } devcgroup->behavior = DEVCG_DEFAULT_ALLOW; dev_exception_clean(&tmp_devcgrp); break; case DEVCG_DENY: if (css_has_online_children(&devcgroup->css)) return -EINVAL; dev_exception_clean(devcgroup); devcgroup->behavior = DEVCG_DEFAULT_DENY; break; default: return -EINVAL; } return 0; case 'b': ex.type = DEVCG_DEV_BLOCK; break; case 'c': ex.type = DEVCG_DEV_CHAR; break; default: return -EINVAL; } b++; if (!isspace(*b)) return -EINVAL; b++; if (*b == '*') { ex.major = ~0; b++; } else if (isdigit(*b)) { memset(temp, 0, sizeof(temp)); for (count = 0; count < sizeof(temp) - 1; count++) { temp[count] = *b; b++; if (!isdigit(*b)) break; } rc = kstrtou32(temp, 10, &ex.major); if (rc) return -EINVAL; } else { return -EINVAL; } if (*b != ':') return -EINVAL; b++; /* read minor */ if (*b == '*') { ex.minor = ~0; b++; } else if (isdigit(*b)) { memset(temp, 0, sizeof(temp)); for (count = 0; count < sizeof(temp) - 1; count++) { temp[count] = *b; b++; if (!isdigit(*b)) break; } rc = kstrtou32(temp, 10, &ex.minor); if (rc) return -EINVAL; } else { return -EINVAL; } if (!isspace(*b)) return -EINVAL; for (b++, count = 0; count < 3; count++, b++) { switch (*b) { case 'r': ex.access |= DEVCG_ACC_READ; break; case 'w': ex.access |= DEVCG_ACC_WRITE; break; case 'm': ex.access |= DEVCG_ACC_MKNOD; break; case '\n': case '\0': count = 3; break; default: return -EINVAL; } } switch (filetype) { case DEVCG_ALLOW: /* * If the default policy is to allow by default, try to remove * an matching exception instead. And be silent about it: we * don't want to break compatibility */ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { /* Check if the parent allows removing it first */ if (!parent_allows_removal(devcgroup, &ex)) return -EPERM; dev_exception_rm(devcgroup, &ex); break; } if (!parent_has_perm(devcgroup, &ex)) return -EPERM; rc = dev_exception_add(devcgroup, &ex); break; case DEVCG_DENY: /* * If the default policy is to deny by default, try to remove * an matching exception instead. And be silent about it: we * don't want to break compatibility */ if (devcgroup->behavior == DEVCG_DEFAULT_DENY) dev_exception_rm(devcgroup, &ex); else rc = dev_exception_add(devcgroup, &ex); if (rc) break; /* we only propagate new restrictions */ rc = propagate_exception(devcgroup, &ex); break; default: rc = -EINVAL; } return rc; } static ssize_t devcgroup_access_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { int retval; mutex_lock(&devcgroup_mutex); retval = devcgroup_update_access(css_to_devcgroup(of_css(of)), of_cft(of)->private, strstrip(buf)); mutex_unlock(&devcgroup_mutex); return retval ?: nbytes; } static struct cftype dev_cgroup_files[] = { { .name = "allow", .write = devcgroup_access_write, .private = DEVCG_ALLOW, }, { .name = "deny", .write = devcgroup_access_write, .private = DEVCG_DENY, }, { .name = "list", .seq_show = devcgroup_seq_show, .private = DEVCG_LIST, }, { } /* terminate */ }; struct cgroup_subsys devices_cgrp_subsys = { .css_alloc = devcgroup_css_alloc, .css_free = devcgroup_css_free, .css_online = devcgroup_online, .css_offline = devcgroup_offline, .legacy_cftypes = dev_cgroup_files, }; /** * devcgroup_legacy_check_permission - checks if an inode operation is permitted * @type: device type * @major: device major number * @minor: device minor number * @access: combination of DEVCG_ACC_WRITE, DEVCG_ACC_READ and DEVCG_ACC_MKNOD * * returns 0 on success, -EPERM case the operation is not permitted */ static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor, short access) { struct dev_cgroup *dev_cgroup; bool rc; rcu_read_lock(); dev_cgroup = task_devcgroup(current); if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) /* Can't match any of the exceptions, even partially */ rc = !match_exception_partial(&dev_cgroup->exceptions, type, major, minor, access); else /* Need to match completely one exception to be allowed */ rc = match_exception(&dev_cgroup->exceptions, type, major, minor, access); rcu_read_unlock(); if (!rc) return -EPERM; return 0; } #endif /* CONFIG_CGROUP_DEVICE */ #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) int devcgroup_check_permission(short type, u32 major, u32 minor, short access) { int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access); if (rc) return rc; #ifdef CONFIG_CGROUP_DEVICE return devcgroup_legacy_check_permission(type, major, minor, access); #else /* CONFIG_CGROUP_DEVICE */ return 0; #endif /* CONFIG_CGROUP_DEVICE */ } EXPORT_SYMBOL(devcgroup_check_permission); #endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */
c
github
https://github.com/torvalds/linux
security/device_cgroup.c
import {getNull} from 'shared-runtime'; function Component(props) { const items = (() => { return getNull() ?? []; })(); items.push(props.a); return items; } export const FIXTURE_ENTRYPOINT = { fn: Component, params: [{a: {}}], };
typescript
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/align-scopes-iife-return-modified-later-logical.ts
"""The tests for the Logger component.""" from collections import namedtuple import logging import unittest from homeassistant.setup import setup_component from homeassistant.components import logger from tests.common import get_test_home_assistant RECORD = namedtuple('record', ('name', 'levelno')) NO_LOGS_CONFIG = {'logger': {'default': 'info'}} TEST_CONFIG = { 'logger': { 'default': 'warning', 'logs': {'test': 'info'} } } class TestUpdater(unittest.TestCase): """Test logger component.""" def setUp(self): """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant() self.log_filter = None def tearDown(self): """Stop everything that was started.""" del logging.root.handlers[-1] self.hass.stop() def setup_logger(self, config): """Setup logger and save log filter.""" setup_component(self.hass, logger.DOMAIN, config) self.log_filter = logging.root.handlers[-1].filters[0] def assert_logged(self, name, level): """Assert that a certain record was logged.""" self.assertTrue(self.log_filter.filter(RECORD(name, level))) def assert_not_logged(self, name, level): """Assert that a certain record was not logged.""" self.assertFalse(self.log_filter.filter(RECORD(name, level))) def test_logger_setup(self): """Use logger to create a logging filter.""" self.setup_logger(TEST_CONFIG) self.assertTrue(len(logging.root.handlers) > 0) handler = logging.root.handlers[-1] self.assertEqual(len(handler.filters), 1) log_filter = handler.filters[0].logfilter self.assertEqual(log_filter['default'], logging.WARNING) self.assertEqual(log_filter['logs']['test'], logging.INFO) def test_logger_test_filters(self): """Test resulting filter operation.""" self.setup_logger(TEST_CONFIG) # Blocked default record self.assert_not_logged('asdf', logging.DEBUG) # Allowed default record self.assert_logged('asdf', logging.WARNING) # Blocked named record self.assert_not_logged('test', logging.DEBUG) # Allowed named record self.assert_logged('test', logging.INFO) def test_set_filter_empty_config(self): """Test change log level from empty configuration.""" self.setup_logger(NO_LOGS_CONFIG) self.assert_not_logged('test', logging.DEBUG) self.hass.services.call( logger.DOMAIN, 'set_level', {'test': 'debug'}) self.hass.block_till_done() self.assert_logged('test', logging.DEBUG) def test_set_filter(self): """Test change log level of existing filter.""" self.setup_logger(TEST_CONFIG) self.assert_not_logged('asdf', logging.DEBUG) self.assert_logged('dummy', logging.WARNING) self.hass.services.call(logger.DOMAIN, 'set_level', {'asdf': 'debug', 'dummy': 'info'}) self.hass.block_till_done() self.assert_logged('asdf', logging.DEBUG) self.assert_logged('dummy', logging.WARNING)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp.osv import osv from openerp.tools.translate import _ from openerp.exceptions import UserError class analytic_plan_create_model(osv.osv_memory): _name = "analytic.plan.create.model" _description = "analytic.plan.create.model" def activate(self, cr, uid, ids, context=None): plan_obj = self.pool.get('account.analytic.plan.instance') mod_obj = self.pool.get('ir.model.data') anlytic_plan_obj = self.pool.get('account.analytic.plan') if context is None: context = {} if 'active_id' in context and context['active_id']: plan = plan_obj.browse(cr, uid, context['active_id'], context=context) if (not plan.name) or (not plan.code): raise UserError(_('Please put a name and a code before saving the model.')) pids = anlytic_plan_obj.search(cr, uid, [], context=context) if not pids: raise UserError(_('There is no analytic plan defined.')) plan_obj.write(cr, uid, [context['active_id']], {'plan_id':pids[0]}, context=context) model_data_ids = mod_obj.search(cr, uid, [('model', '=', 'ir.ui.view'),('name', '=', 'view_analytic_plan_create_model')], context=context) resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id'] return { 'name': _('Distribution Model Saved'), 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'analytic.plan.create.model', 'views': [(resource_id,'form')], 'type': 'ir.actions.act_window', 'target': 'new', } else: return {'type': 'ir.actions.act_window_close'}
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.test.context.filter.annotation; import java.lang.annotation.Annotation; import java.util.Collections; import java.util.HashSet; import java.util.Locale; import java.util.Set; import org.springframework.context.annotation.ComponentScan.Filter; import org.springframework.core.ResolvableType; import org.springframework.core.annotation.MergedAnnotation; import org.springframework.core.annotation.MergedAnnotations; import org.springframework.core.annotation.MergedAnnotations.SearchStrategy; import org.springframework.util.Assert; /** * {@link AnnotationCustomizableTypeExcludeFilter} that can be used to any test annotation * that uses the standard {@code includeFilters}, {@code excludeFilters} and * {@code useDefaultFilters} attributes. * * @param <A> the annotation type * @author Phillip Webb * @since 4.0.0 */ public abstract class StandardAnnotationCustomizableTypeExcludeFilter<A extends Annotation> extends AnnotationCustomizableTypeExcludeFilter { private static final Filter[] NO_FILTERS = {}; private static final String[] FILTER_TYPE_ATTRIBUTES; static { FilterType[] filterValues = FilterType.values(); FILTER_TYPE_ATTRIBUTES = new String[filterValues.length]; for (int i = 0; i < filterValues.length; i++) { FILTER_TYPE_ATTRIBUTES[i] = filterValues[i].name().toLowerCase(Locale.ROOT) + "Filters"; } } private final MergedAnnotation<A> annotation; protected StandardAnnotationCustomizableTypeExcludeFilter(Class<?> testClass) { this.annotation = MergedAnnotations.from(testClass, SearchStrategy.INHERITED_ANNOTATIONS) .get(getAnnotationType()); } protected final MergedAnnotation<A> getAnnotation() { return this.annotation; } @Override protected boolean hasAnnotation() { return this.annotation.isPresent(); } @Override protected Filter[] getFilters(FilterType type) { return this.annotation.getValue(FILTER_TYPE_ATTRIBUTES[type.ordinal()], Filter[].class).orElse(NO_FILTERS); } @Override protected boolean isUseDefaultFilters() { return this.annotation.getValue("useDefaultFilters", Boolean.class).orElse(false); } @Override protected final Set<Class<?>> getDefaultIncludes() { Set<Class<?>> defaultIncludes = new HashSet<>(); defaultIncludes.addAll(getKnownIncludes()); defaultIncludes.addAll(TypeIncludes.load(this.annotation.getType(), getClass().getClassLoader()).getIncludes()); return defaultIncludes; } protected Set<Class<?>> getKnownIncludes() { return Collections.emptySet(); } @Override protected Set<Class<?>> getComponentIncludes() { return Collections.emptySet(); } @SuppressWarnings("unchecked") protected Class<A> getAnnotationType() { ResolvableType type = ResolvableType.forClass(StandardAnnotationCustomizableTypeExcludeFilter.class, getClass()); Class<A> generic = (Class<A>) type.resolveGeneric(); Assert.state(generic != null, "'generic' must not be null"); return generic; } }
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot-test/src/main/java/org/springframework/boot/test/context/filter/annotation/StandardAnnotationCustomizableTypeExcludeFilter.java
# Kubernetes (K8s) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/569/badge)](https://bestpractices.coreinfrastructure.org/projects/569) [![Go Report Card](https://goreportcard.com/badge/github.com/kubernetes/kubernetes)](https://goreportcard.com/report/github.com/kubernetes/kubernetes) ![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/kubernetes/kubernetes?sort=semver) <img src="https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png" width="100"> ---- Kubernetes, also known as K8s, is an open source system for managing [containerized applications] across multiple hosts. It provides basic mechanisms for the deployment, maintenance, and scaling of applications. Kubernetes builds upon a decade and a half of experience at Google running production workloads at scale using a system called [Borg], combined with best-of-breed ideas and practices from the community. Kubernetes is hosted by the Cloud Native Computing Foundation ([CNCF]). If your company wants to help shape the evolution of technologies that are container-packaged, dynamically scheduled, and microservices-oriented, consider joining the CNCF. For details about who's involved and how Kubernetes plays a role, read the CNCF [announcement]. ---- ## To start using K8s See our documentation on [kubernetes.io]. Take a free course on [Scalable Microservices with Kubernetes]. To use Kubernetes code as a library in other applications, see the [list of published components](https://git.k8s.io/kubernetes/staging/README.md). Use of the `k8s.io/kubernetes` module or `k8s.io/kubernetes/...` packages as libraries is not supported. ## To start developing K8s The [community repository] hosts all information about building Kubernetes from source, how to contribute code and documentation, who to contact about what, etc. If you want to build Kubernetes right away there are two options: ##### You have a working [Go environment]. ``` git clone https://github.com/kubernetes/kubernetes cd kubernetes make ``` ##### You have a working [Docker environment]. ``` git clone https://github.com/kubernetes/kubernetes cd kubernetes make quick-release ``` For the full story, head over to the [developer's documentation]. ## Support If you need support, start with the [troubleshooting guide], and work your way through the process that we've outlined. That said, if you have questions, reach out to us [one way or another][communication]. [announcement]: https://cncf.io/news/announcement/2015/07/new-cloud-native-computing-foundation-drive-alignment-among-container [Borg]: https://research.google.com/pubs/pub43438.html?authuser=1 [CNCF]: https://www.cncf.io/about [communication]: https://git.k8s.io/community/communication [community repository]: https://git.k8s.io/community [containerized applications]: https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/ [developer's documentation]: https://git.k8s.io/community/contributors/devel#readme [Docker environment]: https://docs.docker.com/engine [Go environment]: https://go.dev/doc/install [kubernetes.io]: https://kubernetes.io [Scalable Microservices with Kubernetes]: https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615 [troubleshooting guide]: https://kubernetes.io/docs/tasks/debug/ ## Community Meetings The [Calendar](https://www.kubernetes.dev/resources/calendar/) has the list of all the meetings in the Kubernetes community in a single location. ## Adopters The [User Case Studies](https://kubernetes.io/case-studies/) website has real-world use cases of organizations across industries that are deploying/migrating to Kubernetes. ## Governance Kubernetes project is governed by a framework of principles, values, policies and processes to help our community and constituents towards our shared goals. The [Kubernetes Community](https://github.com/kubernetes/community/blob/master/governance.md) is the launching point for learning about how we organize ourselves. The [Kubernetes Steering community repo](https://github.com/kubernetes/steering) is used by the Kubernetes Steering Committee, which oversees governance of the Kubernetes project. ## Roadmap The [Kubernetes Enhancements repo](https://github.com/kubernetes/enhancements) provides information about Kubernetes releases, as well as feature tracking and backlogs.
unknown
github
https://github.com/kubernetes/kubernetes
README.md
<?php /* * This file is part of the Symfony package. * * (c) Fabien Potencier <fabien@symfony.com> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace Symfony\Bridge\PhpUnit\Tests\Fixtures; enum ExistingEnumReal { }
php
github
https://github.com/symfony/symfony
src/Symfony/Bridge/PhpUnit/Tests/Fixtures/ExistingEnumReal.php
{ "openFiles": ["src/app/app.ts"], "title": "Control flow in components - @if", "type": "editor" }
json
github
https://github.com/angular/angular
adev/src/content/tutorials/learn-angular/steps/4-control-flow-if/config.json
# frozen_string_literal: true # :markup: markdown require "active_support/core_ext/hash/keys" require "action_dispatch/middleware/session/abstract_store" require "rack/session/cookie" module ActionDispatch module Session # # Action Dispatch Session CookieStore # # This cookie-based session store is the Rails default. It is dramatically # faster than the alternatives. # # Sessions typically contain at most a user ID and flash message; both fit # within the 4096 bytes cookie size limit. A `CookieOverflow` exception is # raised if you attempt to store more than 4096 bytes of data. # # The cookie jar used for storage is automatically configured to be the best # possible option given your application's configuration. # # Your cookies will be encrypted using your application's `secret_key_base`. # This goes a step further than signed cookies in that encrypted cookies cannot # be altered or read by users. This is the default starting in Rails 4. # # Configure your session store in an initializer: # # Rails.application.config.session_store :cookie_store, key: '_your_app_session' # # In the development and test environments your application's `secret_key_base` # is generated by Rails and stored in a temporary file in # `tmp/local_secret.txt`. In all other environments, it is stored encrypted in # the `config/credentials.yml.enc` file. # # If your application was not updated to Rails 5.2 defaults, the # `secret_key_base` will be found in the old `config/secrets.yml` file. # # Note that changing your `secret_key_base` will invalidate all existing # session. Additionally, you should take care to make sure you are not relying # on the ability to decode signed cookies generated by your app in external # applications or JavaScript before changing it. # # Because CookieStore extends `Rack::Session::Abstract::Persisted`, many of the # options described there can be used to customize the session cookie that is # generated. For example: # # Rails.application.config.session_store :cookie_store, expire_after: 14.days # # would set the session cookie to expire automatically 14 days after creation. # Other useful options include `:key`, `:secure`, `:httponly`, and `:same_site`. class CookieStore < AbstractSecureStore class SessionId < ActiveSupport::Delegation::DelegateClass(Rack::Session::SessionId) attr_reader :cookie_value def initialize(session_id, cookie_value = {}) super(session_id) @cookie_value = cookie_value end end DEFAULT_SAME_SITE = proc { |request| request.cookies_same_site_protection } # :nodoc: def initialize(app, options = {}) options[:cookie_only] = true options[:same_site] = DEFAULT_SAME_SITE if !options.key?(:same_site) super end def delete_session(req, session_id, options) new_sid = generate_sid unless options[:drop] # Reset hash and Assign the new session id req.set_header("action_dispatch.request.unsigned_session_cookie", new_sid ? { "session_id" => new_sid.public_id } : {}) new_sid end def load_session(req) stale_session_check! do data = unpacked_cookie_data(req) data = persistent_session_id!(data) [Rack::Session::SessionId.new(data["session_id"]), data] end end private def extract_session_id(req) stale_session_check! do sid = unpacked_cookie_data(req)["session_id"] sid && Rack::Session::SessionId.new(sid) end end def unpacked_cookie_data(req) req.fetch_header("action_dispatch.request.unsigned_session_cookie") do |k| v = stale_session_check! do if data = get_cookie(req) data.stringify_keys! end data || {} end req.set_header k, v end end def persistent_session_id!(data, sid = nil) data ||= {} data["session_id"] ||= sid || generate_sid.public_id data end def write_session(req, sid, session_data, options) session_data["session_id"] = sid.public_id SessionId.new(sid, session_data) end def set_cookie(request, session_id, cookie) cookie_jar(request)[@key] = cookie end def get_cookie(req) cookie_jar(req)[@key] end def cookie_jar(request) request.cookie_jar.signed_or_encrypted end end end end
ruby
github
https://github.com/rails/rails
actionpack/lib/action_dispatch/middleware/session/cookie_store.rb
/* * Copyright 2002-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.beans.factory.aot; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.UnsatisfiedDependencyException; import org.springframework.beans.factory.config.DependencyDescriptor; import org.springframework.beans.factory.support.DefaultListableBeanFactory; import org.springframework.beans.factory.support.RegisteredBean; import org.springframework.beans.factory.support.RootBeanDefinition; import org.springframework.core.env.Environment; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.assertj.core.api.Assertions.assertThatIllegalArgumentException; import static org.mockito.Mockito.mock; /** * Tests for {@link AutowiredMethodArgumentsResolver}. * * @author Phillip Webb * @author Stephane Nicoll */ class AutowiredMethodArgumentsResolverTests { private final DefaultListableBeanFactory beanFactory = new DefaultListableBeanFactory(); @Test void forMethodWhenMethodNameIsEmptyThrowsException() { String message = "'methodName' must not be empty"; assertThatIllegalArgumentException() .isThrownBy(() -> AutowiredMethodArgumentsResolver.forMethod(null)) .withMessage(message); assertThatIllegalArgumentException() .isThrownBy(() -> AutowiredMethodArgumentsResolver.forMethod("")) .withMessage(message); assertThatIllegalArgumentException() .isThrownBy( () -> AutowiredMethodArgumentsResolver.forRequiredMethod(null)) .withMessage(message); assertThatIllegalArgumentException() .isThrownBy(() -> AutowiredMethodArgumentsResolver.forRequiredMethod(" ")) .withMessage(message); } @Test void resolveWhenRegisteredBeanIsNullThrowsException() { assertThatIllegalArgumentException() .isThrownBy(() -> AutowiredMethodArgumentsResolver .forMethod("injectString", String.class).resolve(null)) .withMessage("'registeredBean' must not be null"); } @Test void resolveWhenMethodIsMissingThrowsException() { RegisteredBean registeredBean = registerTestBean(this.beanFactory); AutowiredMethodArgumentsResolver resolver = AutowiredMethodArgumentsResolver.forMethod("missing", InputStream.class); assertThatIllegalArgumentException() .isThrownBy(() -> resolver.resolve(registeredBean)) .withMessage("Method 'missing' with parameter types [java.io.InputStream] declared on %s could not be found.", TestBean.class.getName()); } @Test void resolveRequiredWithSingleDependencyReturnsValue() { this.beanFactory.registerSingleton("test", "testValue"); RegisteredBean registeredBean = registerTestBean(this.beanFactory); AutowiredMethodArgumentsResolver resolver = AutowiredMethodArgumentsResolver .forRequiredMethod("injectString", String.class); AutowiredArguments resolved = resolver.resolve(registeredBean); assertThat(resolved.toArray()).containsExactly("testValue"); } @Test void resolveRequiredWhenNoSuchBeanThrowsUnsatisfiedDependencyException() { RegisteredBean registeredBean = registerTestBean(this.beanFactory); AutowiredMethodArgumentsResolver resolver = AutowiredMethodArgumentsResolver .forRequiredMethod("injectString", String.class); assertThatExceptionOfType(UnsatisfiedDependencyException.class) .isThrownBy(() -> resolver.resolve(registeredBean)).satisfies(ex -> { assertThat(ex.getBeanName()).isEqualTo("testBean"); assertThat(ex.getInjectionPoint()).isNotNull(); assertThat(ex.getInjectionPoint().getMember().getName()) .isEqualTo("injectString"); }); } @Test void resolveNonRequiredWhenNoSuchBeanReturnsNull() { RegisteredBean registeredBean = registerTestBean(this.beanFactory); AutowiredMethodArgumentsResolver resolver = AutowiredMethodArgumentsResolver .forMethod("injectString", String.class); assertThat(resolver.resolve(registeredBean)).isNull(); } @Test void resolveRequiredWithMultipleDependenciesReturnsValue() { Environment environment = mock(); this.beanFactory.registerSingleton("test", "testValue"); this.beanFactory.registerSingleton("environment", environment); RegisteredBean registeredBean = registerTestBean(this.beanFactory); AutowiredMethodArgumentsResolver resolver = AutowiredMethodArgumentsResolver .forRequiredMethod("injectStringAndEnvironment", String.class, Environment.class); AutowiredArguments resolved = resolver.resolve(registeredBean); assertThat(resolved.toArray()).containsExactly("testValue", environment); } @Test void resolveAndInvokeWhenInstanceIsNullThrowsException() { RegisteredBean registeredBean = registerTestBean(this.beanFactory); assertThatIllegalArgumentException() .isThrownBy(() -> AutowiredMethodArgumentsResolver .forMethod("injectString", String.class) .resolveAndInvoke(registeredBean, null)) .withMessage("'instance' must not be null"); } @Test void resolveAndInvokeInvokesMethod() { this.beanFactory.registerSingleton("test", "testValue"); RegisteredBean registeredBean = registerTestBean(this.beanFactory); AutowiredMethodArgumentsResolver resolver = AutowiredMethodArgumentsResolver .forRequiredMethod("injectString", String.class); TestBean instance = new TestBean(); resolver.resolveAndInvoke(registeredBean, instance); assertThat(instance.getString()).isEqualTo("testValue"); } @Test void resolveWithActionWhenActionIsNullThrowsException() { RegisteredBean registeredBean = registerTestBean(this.beanFactory); assertThatIllegalArgumentException() .isThrownBy(() -> AutowiredMethodArgumentsResolver .forMethod("injectString", String.class) .resolve(registeredBean, null)) .withMessage("'action' must not be null"); } @Test void resolveWithActionCallsAction() { this.beanFactory.registerSingleton("test", "testValue"); RegisteredBean registeredBean = registerTestBean(this.beanFactory); List<Object> result = new ArrayList<>(); AutowiredMethodArgumentsResolver.forMethod("injectString", String.class) .resolve(registeredBean, result::add); assertThat(result).hasSize(1); assertThat(((AutowiredArguments) result.get(0)).toArray()) .containsExactly("testValue"); } @Test void resolveWhenUsingShortcutsInjectsDirectly() { DefaultListableBeanFactory beanFactory = new DefaultListableBeanFactory() { @Override protected Map<String, Object> findAutowireCandidates(String beanName, Class<?> requiredType, DependencyDescriptor descriptor) { throw new AssertionError("Should be shortcut"); } }; beanFactory.registerSingleton("test", "testValue"); RegisteredBean registeredBean = registerTestBean(beanFactory); AutowiredMethodArgumentsResolver resolver = AutowiredMethodArgumentsResolver .forRequiredMethod("injectString", String.class); assertThatExceptionOfType(AssertionError.class) .isThrownBy(() -> resolver.resolve(registeredBean)); assertThat(resolver.withShortcut("test").resolve(registeredBean).getObject(0)) .isEqualTo("testValue"); } @Test void resolveRegistersDependantBeans() { this.beanFactory.registerSingleton("test", "testValue"); RegisteredBean registeredBean = registerTestBean(this.beanFactory); AutowiredMethodArgumentsResolver.forMethod("injectString", String.class) .resolve(registeredBean); assertThat(this.beanFactory.getDependentBeans("test")) .containsExactly("testBean"); } private RegisteredBean registerTestBean(DefaultListableBeanFactory beanFactory) { beanFactory.registerBeanDefinition("testBean", new RootBeanDefinition(TestBean.class)); return RegisteredBean.of(beanFactory, "testBean"); } @SuppressWarnings("unused") static class TestBean { private String string; void injectString(String string) { this.string = string; } void injectStringAndEnvironment(String string, Environment environment) { } String getString() { return this.string; } } }
java
github
https://github.com/spring-projects/spring-framework
spring-beans/src/test/java/org/springframework/beans/factory/aot/AutowiredMethodArgumentsResolverTests.java
/* * contrib/tablefunc/tablefunc.c * * * tablefunc * * Sample to demonstrate C functions which return setof scalar * and setof composite. * Joe Conway <mail@joeconway.com> * And contributors: * Nabil Sayegh <postgresql@e-trolley.de> * * Copyright (c) 2002-2026, PostgreSQL Global Development Group * * Permission to use, copy, modify, and distribute this software and its * documentation for any purpose, without fee, and without a written agreement * is hereby granted, provided that the above copyright notice and this * paragraph and the following two paragraphs appear in all copies. * * IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS * DOCUMENTATION, EVEN IF THE AUTHOR OR DISTRIBUTORS HAVE BEEN ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS * ON AN "AS IS" BASIS, AND THE AUTHOR AND DISTRIBUTORS HAS NO OBLIGATIONS TO * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * */ #include "postgres.h" #include <math.h> #include "access/htup_details.h" #include "catalog/pg_type.h" #include "common/pg_prng.h" #include "executor/spi.h" #include "fmgr.h" #include "funcapi.h" #include "lib/stringinfo.h" #include "miscadmin.h" #include "utils/builtins.h" PG_MODULE_MAGIC_EXT( .name = "tablefunc", .version = PG_VERSION ); static HTAB *load_categories_hash(char *cats_sql, MemoryContext per_query_ctx); static Tuplestorestate *get_crosstab_tuplestore(char *sql, HTAB *crosstab_hash, TupleDesc tupdesc, bool randomAccess); static void validateConnectbyTupleDesc(TupleDesc td, bool show_branch, bool show_serial); static void compatCrosstabTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc); static void compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc); static void get_normal_pair(float8 *x1, float8 *x2); static Tuplestorestate *connectby(char *relname, char *key_fld, char *parent_key_fld, char *orderby_fld, char *branch_delim, char *start_with, int max_depth, bool show_branch, bool show_serial, MemoryContext per_query_ctx, bool randomAccess, AttInMetadata *attinmeta); static void build_tuplestore_recursively(char *key_fld, char *parent_key_fld, char *relname, char *orderby_fld, char *branch_delim, char *start_with, char *branch, int level, int *serial, int max_depth, bool show_branch, bool show_serial, MemoryContext per_query_ctx, AttInMetadata *attinmeta, Tuplestorestate *tupstore); typedef struct { float8 mean; /* mean of the distribution */ float8 stddev; /* stddev of the distribution */ float8 carry_val; /* hold second generated value */ bool use_carry; /* use second generated value */ } normal_rand_fctx; #define xpfree(var_) \ do { \ if (var_ != NULL) \ { \ pfree(var_); \ var_ = NULL; \ } \ } while (0) #define xpstrdup(tgtvar_, srcvar_) \ do { \ if (srcvar_) \ tgtvar_ = pstrdup(srcvar_); \ else \ tgtvar_ = NULL; \ } while (0) #define xstreq(tgtvar_, srcvar_) \ (((tgtvar_ == NULL) && (srcvar_ == NULL)) || \ ((tgtvar_ != NULL) && (srcvar_ != NULL) && (strcmp(tgtvar_, srcvar_) == 0))) /* sign, 10 digits, '\0' */ #define INT32_STRLEN 12 /* stored info for a crosstab category */ typedef struct crosstab_cat_desc { char *catname; /* full category name */ uint64 attidx; /* zero based */ } crosstab_cat_desc; #define MAX_CATNAME_LEN NAMEDATALEN #define INIT_CATS 64 #define crosstab_HashTableLookup(HASHTAB, CATNAME, CATDESC) \ do { \ crosstab_HashEnt *hentry; char key[MAX_CATNAME_LEN]; \ \ MemSet(key, 0, MAX_CATNAME_LEN); \ snprintf(key, MAX_CATNAME_LEN - 1, "%s", CATNAME); \ hentry = (crosstab_HashEnt*) hash_search(HASHTAB, \ key, HASH_FIND, NULL); \ if (hentry) \ CATDESC = hentry->catdesc; \ else \ CATDESC = NULL; \ } while(0) #define crosstab_HashTableInsert(HASHTAB, CATDESC) \ do { \ crosstab_HashEnt *hentry; bool found; char key[MAX_CATNAME_LEN]; \ \ MemSet(key, 0, MAX_CATNAME_LEN); \ snprintf(key, MAX_CATNAME_LEN - 1, "%s", CATDESC->catname); \ hentry = (crosstab_HashEnt*) hash_search(HASHTAB, \ key, HASH_ENTER, &found); \ if (found) \ ereport(ERROR, \ (errcode(ERRCODE_DUPLICATE_OBJECT), \ errmsg("duplicate category name"))); \ hentry->catdesc = CATDESC; \ } while(0) /* hash table */ typedef struct crosstab_hashent { char internal_catname[MAX_CATNAME_LEN]; crosstab_cat_desc *catdesc; } crosstab_HashEnt; /* * normal_rand - return requested number of random values * with a Gaussian (Normal) distribution. * * inputs are int numvals, float8 mean, and float8 stddev * returns setof float8 */ PG_FUNCTION_INFO_V1(normal_rand); Datum normal_rand(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; uint64 call_cntr; uint64 max_calls; normal_rand_fctx *fctx; float8 mean; float8 stddev; float8 carry_val; bool use_carry; MemoryContext oldcontext; /* stuff done only on the first call of the function */ if (SRF_IS_FIRSTCALL()) { int32 num_tuples; /* create a function context for cross-call persistence */ funcctx = SRF_FIRSTCALL_INIT(); /* * switch to memory context appropriate for multiple function calls */ oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); /* total number of tuples to be returned */ num_tuples = PG_GETARG_INT32(0); if (num_tuples < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("number of rows cannot be negative"))); funcctx->max_calls = num_tuples; /* allocate memory for user context */ fctx = palloc_object(normal_rand_fctx); /* * Use fctx to keep track of upper and lower bounds from call to call. * It will also be used to carry over the spare value we get from the * Box-Muller algorithm so that we only actually calculate a new value * every other call. */ fctx->mean = PG_GETARG_FLOAT8(1); fctx->stddev = PG_GETARG_FLOAT8(2); fctx->carry_val = 0; fctx->use_carry = false; funcctx->user_fctx = fctx; MemoryContextSwitchTo(oldcontext); } /* stuff done on every call of the function */ funcctx = SRF_PERCALL_SETUP(); call_cntr = funcctx->call_cntr; max_calls = funcctx->max_calls; fctx = funcctx->user_fctx; mean = fctx->mean; stddev = fctx->stddev; carry_val = fctx->carry_val; use_carry = fctx->use_carry; if (call_cntr < max_calls) /* do when there is more left to send */ { float8 result; if (use_carry) { /* * reset use_carry and use second value obtained on last pass */ fctx->use_carry = false; result = carry_val; } else { float8 normval_1; float8 normval_2; /* Get the next two normal values */ get_normal_pair(&normval_1, &normval_2); /* use the first */ result = mean + (stddev * normval_1); /* and save the second */ fctx->carry_val = mean + (stddev * normval_2); fctx->use_carry = true; } /* send the result */ SRF_RETURN_NEXT(funcctx, Float8GetDatum(result)); } else /* do when there is no more left */ SRF_RETURN_DONE(funcctx); } /* * get_normal_pair() * Assigns normally distributed (Gaussian) values to a pair of provided * parameters, with mean 0, standard deviation 1. * * This routine implements Algorithm P (Polar method for normal deviates) * from Knuth's _The_Art_of_Computer_Programming_, Volume 2, 3rd ed., pages * 122-126. Knuth cites his source as "The polar method", G. E. P. Box, M. E. * Muller, and G. Marsaglia, _Annals_Math,_Stat._ 29 (1958), 610-611. * */ static void get_normal_pair(float8 *x1, float8 *x2) { float8 u1, u2, v1, v2, s; do { u1 = pg_prng_double(&pg_global_prng_state); u2 = pg_prng_double(&pg_global_prng_state); v1 = (2.0 * u1) - 1.0; v2 = (2.0 * u2) - 1.0; s = v1 * v1 + v2 * v2; } while (s >= 1.0); if (s == 0) { *x1 = 0; *x2 = 0; } else { s = sqrt((-2.0 * log(s)) / s); *x1 = v1 * s; *x2 = v2 * s; } } /* * crosstab - create a crosstab of rowids and values columns from a * SQL statement returning one rowid column, one category column, * and one value column. * * e.g. given sql which produces: * * rowid cat value * ------+-------+------- * row1 cat1 val1 * row1 cat2 val2 * row1 cat3 val3 * row1 cat4 val4 * row2 cat1 val5 * row2 cat2 val6 * row2 cat3 val7 * row2 cat4 val8 * * crosstab returns: * <===== values columns =====> * rowid cat1 cat2 cat3 cat4 * ------+-------+-------+-------+------- * row1 val1 val2 val3 val4 * row2 val5 val6 val7 val8 * * NOTES: * 1. SQL result must be ordered by 1,2. * 2. The number of values columns depends on the tuple description * of the function's declared return type. The return type's columns * must match the datatypes of the SQL query's result. The datatype * of the category column can be anything, however. * 3. Missing values (i.e. not enough adjacent rows of same rowid to * fill the number of result values columns) are filled in with nulls. * 4. Extra values (i.e. too many adjacent rows of same rowid to fill * the number of result values columns) are skipped. * 5. Rows with all nulls in the values columns are skipped. */ PG_FUNCTION_INFO_V1(crosstab); Datum crosstab(PG_FUNCTION_ARGS) { char *sql = text_to_cstring(PG_GETARG_TEXT_PP(0)); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; Tuplestorestate *tupstore; TupleDesc tupdesc; uint64 call_cntr; uint64 max_calls; AttInMetadata *attinmeta; SPITupleTable *spi_tuptable; TupleDesc spi_tupdesc; bool firstpass; char *lastrowid; int i; int num_categories; MemoryContext per_query_ctx; MemoryContext oldcontext; int ret; uint64 proc; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; /* Connect to SPI manager */ SPI_connect(); /* Retrieve the desired rows */ ret = SPI_execute(sql, true, 0); proc = SPI_processed; /* If no qualifying tuples, fall out early */ if (ret != SPI_OK_SELECT || proc == 0) { SPI_finish(); rsinfo->isDone = ExprEndResult; PG_RETURN_NULL(); } spi_tuptable = SPI_tuptable; spi_tupdesc = spi_tuptable->tupdesc; /*---------- * The provided SQL query must always return three columns. * * 1. rowname * the label or identifier for each row in the final result * 2. category * the label or identifier for each column in the final result * 3. values * the value for each column in the final result *---------- */ if (spi_tupdesc->natts != 3) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid crosstab source data query"), errdetail("The query must return 3 columns: row_name, category, and value."))); /* get a tuple descriptor for our result type */ switch (get_call_result_type(fcinfo, NULL, &tupdesc)) { case TYPEFUNC_COMPOSITE: /* success */ break; case TYPEFUNC_RECORD: /* failed to determine actual type of RECORD */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("function returning record called in context " "that cannot accept type record"))); break; default: /* result type isn't composite */ ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("return type must be a row type"))); break; } /* * Check that return tupdesc is compatible with the data we got from SPI, * at least based on number and type of attributes */ compatCrosstabTupleDescs(tupdesc, spi_tupdesc); /* * switch to long-lived memory context */ oldcontext = MemoryContextSwitchTo(per_query_ctx); /* make sure we have a persistent copy of the result tupdesc */ tupdesc = CreateTupleDescCopy(tupdesc); /* initialize our tuplestore in long-lived context */ tupstore = tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random, false, work_mem); MemoryContextSwitchTo(oldcontext); /* * Generate attribute metadata needed later to produce tuples from raw C * strings */ attinmeta = TupleDescGetAttInMetadata(tupdesc); /* total number of tuples to be examined */ max_calls = proc; /* the return tuple always must have 1 rowid + num_categories columns */ num_categories = tupdesc->natts - 1; firstpass = true; lastrowid = NULL; for (call_cntr = 0; call_cntr < max_calls; call_cntr++) { bool skip_tuple = false; char **values; /* allocate and zero space */ values = (char **) palloc0((1 + num_categories) * sizeof(char *)); /* * now loop through the sql results and assign each value in sequence * to the next category */ for (i = 0; i < num_categories; i++) { HeapTuple spi_tuple; char *rowid; /* see if we've gone too far already */ if (call_cntr >= max_calls) break; /* get the next sql result tuple */ spi_tuple = spi_tuptable->vals[call_cntr]; /* get the rowid from the current sql result tuple */ rowid = SPI_getvalue(spi_tuple, spi_tupdesc, 1); /* * If this is the first pass through the values for this rowid, * set the first column to rowid */ if (i == 0) { xpstrdup(values[0], rowid); /* * Check to see if the rowid is the same as that of the last * tuple sent -- if so, skip this tuple entirely */ if (!firstpass && xstreq(lastrowid, rowid)) { xpfree(rowid); skip_tuple = true; break; } } /* * If rowid hasn't changed on us, continue building the output * tuple. */ if (xstreq(rowid, values[0])) { /* * Get the next category item value, which is always attribute * number three. * * Be careful to assign the value to the array index based on * which category we are presently processing. */ values[1 + i] = SPI_getvalue(spi_tuple, spi_tupdesc, 3); /* * increment the counter since we consume a row for each * category, but not for last pass because the outer loop will * do that for us */ if (i < (num_categories - 1)) call_cntr++; xpfree(rowid); } else { /* * We'll fill in NULLs for the missing values, but we need to * decrement the counter since this sql result row doesn't * belong to the current output tuple. */ call_cntr--; xpfree(rowid); break; } } if (!skip_tuple) { HeapTuple tuple; /* build the tuple and store it */ tuple = BuildTupleFromCStrings(attinmeta, values); tuplestore_puttuple(tupstore, tuple); heap_freetuple(tuple); } /* Remember current rowid */ xpfree(lastrowid); xpstrdup(lastrowid, values[0]); firstpass = false; /* Clean up */ for (i = 0; i < num_categories + 1; i++) if (values[i] != NULL) pfree(values[i]); pfree(values); } /* let the caller know we're sending back a tuplestore */ rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = tupstore; rsinfo->setDesc = tupdesc; /* release SPI related resources (and return to caller's context) */ SPI_finish(); return (Datum) 0; } /* * crosstab_hash - reimplement crosstab as materialized function and * properly deal with missing values (i.e. don't pack remaining * values to the left) * * crosstab - create a crosstab of rowids and values columns from a * SQL statement returning one rowid column, one category column, * and one value column. * * e.g. given sql which produces: * * rowid cat value * ------+-------+------- * row1 cat1 val1 * row1 cat2 val2 * row1 cat4 val4 * row2 cat1 val5 * row2 cat2 val6 * row2 cat3 val7 * row2 cat4 val8 * * crosstab returns: * <===== values columns =====> * rowid cat1 cat2 cat3 cat4 * ------+-------+-------+-------+------- * row1 val1 val2 null val4 * row2 val5 val6 val7 val8 * * NOTES: * 1. SQL result must be ordered by 1. * 2. The number of values columns depends on the tuple description * of the function's declared return type. * 3. Missing values (i.e. missing category) are filled in with nulls. * 4. Extra values (i.e. not in category results) are skipped. */ PG_FUNCTION_INFO_V1(crosstab_hash); Datum crosstab_hash(PG_FUNCTION_ARGS) { char *sql = text_to_cstring(PG_GETARG_TEXT_PP(0)); char *cats_sql = text_to_cstring(PG_GETARG_TEXT_PP(1)); ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupdesc; MemoryContext per_query_ctx; MemoryContext oldcontext; HTAB *crosstab_hash; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize) || rsinfo->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); /* get the requested return tuple description */ tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); /* * Check to make sure we have a reasonable tuple descriptor * * Note we will attempt to coerce the values into whatever the return * attribute type is and depend on the "in" function to complain if * needed. */ if (tupdesc->natts < 2) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid crosstab return type"), errdetail("Return row must have at least two columns."))); /* load up the categories hash table */ crosstab_hash = load_categories_hash(cats_sql, per_query_ctx); /* let the caller know we're sending back a tuplestore */ rsinfo->returnMode = SFRM_Materialize; /* now go build it */ rsinfo->setResult = get_crosstab_tuplestore(sql, crosstab_hash, tupdesc, rsinfo->allowedModes & SFRM_Materialize_Random); /* * SFRM_Materialize mode expects us to return a NULL Datum. The actual * tuples are in our tuplestore and passed back through rsinfo->setResult. * rsinfo->setDesc is set to the tuple description that we actually used * to build our tuples with, so the caller can verify we did what it was * expecting. */ rsinfo->setDesc = tupdesc; MemoryContextSwitchTo(oldcontext); return (Datum) 0; } /* * load up the categories hash table */ static HTAB * load_categories_hash(char *cats_sql, MemoryContext per_query_ctx) { HTAB *crosstab_hash; HASHCTL ctl; int ret; uint64 proc; MemoryContext SPIcontext; /* initialize the category hash table */ ctl.keysize = MAX_CATNAME_LEN; ctl.entrysize = sizeof(crosstab_HashEnt); ctl.hcxt = per_query_ctx; /* * use INIT_CATS, defined above as a guess of how many hash table entries * to create, initially */ crosstab_hash = hash_create("crosstab hash", INIT_CATS, &ctl, HASH_ELEM | HASH_STRINGS | HASH_CONTEXT); /* Connect to SPI manager */ SPI_connect(); /* Retrieve the category name rows */ ret = SPI_execute(cats_sql, true, 0); proc = SPI_processed; /* Check for qualifying tuples */ if ((ret == SPI_OK_SELECT) && (proc > 0)) { SPITupleTable *spi_tuptable = SPI_tuptable; TupleDesc spi_tupdesc = spi_tuptable->tupdesc; uint64 i; /* * The provided categories SQL query must always return one column: * category - the label or identifier for each column */ if (spi_tupdesc->natts != 1) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid crosstab categories query"), errdetail("The query must return one column."))); for (i = 0; i < proc; i++) { crosstab_cat_desc *catdesc; char *catname; HeapTuple spi_tuple; /* get the next sql result tuple */ spi_tuple = spi_tuptable->vals[i]; /* get the category from the current sql result tuple */ catname = SPI_getvalue(spi_tuple, spi_tupdesc, 1); if (catname == NULL) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("crosstab category value must not be null"))); SPIcontext = MemoryContextSwitchTo(per_query_ctx); catdesc = palloc_object(crosstab_cat_desc); catdesc->catname = catname; catdesc->attidx = i; /* Add the proc description block to the hashtable */ crosstab_HashTableInsert(crosstab_hash, catdesc); MemoryContextSwitchTo(SPIcontext); } } if (SPI_finish() != SPI_OK_FINISH) /* internal error */ elog(ERROR, "load_categories_hash: SPI_finish() failed"); return crosstab_hash; } /* * create and populate the crosstab tuplestore using the provided source query */ static Tuplestorestate * get_crosstab_tuplestore(char *sql, HTAB *crosstab_hash, TupleDesc tupdesc, bool randomAccess) { Tuplestorestate *tupstore; int num_categories = hash_get_num_entries(crosstab_hash); AttInMetadata *attinmeta = TupleDescGetAttInMetadata(tupdesc); char **values; HeapTuple tuple; int ret; uint64 proc; /* initialize our tuplestore (while still in query context!) */ tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); /* Connect to SPI manager */ SPI_connect(); /* Now retrieve the crosstab source rows */ ret = SPI_execute(sql, true, 0); proc = SPI_processed; /* Check for qualifying tuples */ if ((ret == SPI_OK_SELECT) && (proc > 0)) { SPITupleTable *spi_tuptable = SPI_tuptable; TupleDesc spi_tupdesc = spi_tuptable->tupdesc; int ncols = spi_tupdesc->natts; char *rowid; char *lastrowid = NULL; bool firstpass = true; uint64 i; int j; int result_ncols; if (num_categories == 0) { /* no qualifying category tuples */ ereport(ERROR, (errcode(ERRCODE_CARDINALITY_VIOLATION), errmsg("crosstab categories query must return at least one row"))); } /* * The provided SQL query must always return at least three columns: * * 1. rowname the label for each row - column 1 in the final result * 2. category the label for each value-column in the final result 3. * value the values used to populate the value-columns * * If there are more than three columns, the last two are taken as * "category" and "values". The first column is taken as "rowname". * Additional columns (2 thru N-2) are assumed the same for the same * "rowname", and are copied into the result tuple from the first time * we encounter a particular rowname. */ if (ncols < 3) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid crosstab source data query"), errdetail("The query must return at least 3 columns: row_name, category, and value."))); result_ncols = (ncols - 2) + num_categories; /* Recheck to make sure output tuple descriptor looks reasonable */ if (tupdesc->natts != result_ncols) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid crosstab return type"), errdetail("Return row must have %d columns, not %d.", result_ncols, tupdesc->natts))); /* allocate space and make sure it's clear */ values = (char **) palloc0(result_ncols * sizeof(char *)); for (i = 0; i < proc; i++) { HeapTuple spi_tuple; crosstab_cat_desc *catdesc; char *catname; /* get the next sql result tuple */ spi_tuple = spi_tuptable->vals[i]; /* get the rowid from the current sql result tuple */ rowid = SPI_getvalue(spi_tuple, spi_tupdesc, 1); /* * if we're on a new output row, grab the column values up to * column N-2 now */ if (firstpass || !xstreq(lastrowid, rowid)) { /* * a new row means we need to flush the old one first, unless * we're on the very first row */ if (!firstpass) { /* rowid changed, flush the previous output row */ tuple = BuildTupleFromCStrings(attinmeta, values); tuplestore_puttuple(tupstore, tuple); for (j = 0; j < result_ncols; j++) xpfree(values[j]); } values[0] = rowid; for (j = 1; j < ncols - 2; j++) values[j] = SPI_getvalue(spi_tuple, spi_tupdesc, j + 1); /* we're no longer on the first pass */ firstpass = false; } /* look up the category and fill in the appropriate column */ catname = SPI_getvalue(spi_tuple, spi_tupdesc, ncols - 1); if (catname != NULL) { crosstab_HashTableLookup(crosstab_hash, catname, catdesc); if (catdesc) values[catdesc->attidx + ncols - 2] = SPI_getvalue(spi_tuple, spi_tupdesc, ncols); } xpfree(lastrowid); xpstrdup(lastrowid, rowid); } /* flush the last output row */ tuple = BuildTupleFromCStrings(attinmeta, values); tuplestore_puttuple(tupstore, tuple); } if (SPI_finish() != SPI_OK_FINISH) /* internal error */ elog(ERROR, "get_crosstab_tuplestore: SPI_finish() failed"); return tupstore; } /* * connectby_text - produce a result set from a hierarchical (parent/child) * table. * * e.g. given table foo: * * keyid parent_keyid pos * ------+------------+-- * row1 NULL 0 * row2 row1 0 * row3 row1 0 * row4 row2 1 * row5 row2 0 * row6 row4 0 * row7 row3 0 * row8 row6 0 * row9 row5 0 * * * connectby(text relname, text keyid_fld, text parent_keyid_fld * [, text orderby_fld], text start_with, int max_depth * [, text branch_delim]) * connectby('foo', 'keyid', 'parent_keyid', 'pos', 'row2', 0, '~') returns: * * keyid parent_id level branch serial * ------+-----------+--------+----------------------- * row2 NULL 0 row2 1 * row5 row2 1 row2~row5 2 * row9 row5 2 row2~row5~row9 3 * row4 row2 1 row2~row4 4 * row6 row4 2 row2~row4~row6 5 * row8 row6 3 row2~row4~row6~row8 6 * */ PG_FUNCTION_INFO_V1(connectby_text); #define CONNECTBY_NCOLS 4 #define CONNECTBY_NCOLS_NOBRANCH 3 Datum connectby_text(PG_FUNCTION_ARGS) { char *relname = text_to_cstring(PG_GETARG_TEXT_PP(0)); char *key_fld = text_to_cstring(PG_GETARG_TEXT_PP(1)); char *parent_key_fld = text_to_cstring(PG_GETARG_TEXT_PP(2)); char *start_with = text_to_cstring(PG_GETARG_TEXT_PP(3)); int max_depth = PG_GETARG_INT32(4); char *branch_delim = NULL; bool show_branch = false; bool show_serial = false; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupdesc; AttInMetadata *attinmeta; MemoryContext per_query_ctx; MemoryContext oldcontext; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize) || rsinfo->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); if (fcinfo->nargs == 6) { branch_delim = text_to_cstring(PG_GETARG_TEXT_PP(5)); show_branch = true; } else /* default is no show, tilde for the delimiter */ branch_delim = pstrdup("~"); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); /* get the requested return tuple description */ tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); /* does it meet our needs */ validateConnectbyTupleDesc(tupdesc, show_branch, show_serial); /* OK, use it then */ attinmeta = TupleDescGetAttInMetadata(tupdesc); /* OK, go to work */ rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = connectby(relname, key_fld, parent_key_fld, NULL, branch_delim, start_with, max_depth, show_branch, show_serial, per_query_ctx, rsinfo->allowedModes & SFRM_Materialize_Random, attinmeta); rsinfo->setDesc = tupdesc; MemoryContextSwitchTo(oldcontext); /* * SFRM_Materialize mode expects us to return a NULL Datum. The actual * tuples are in our tuplestore and passed back through rsinfo->setResult. * rsinfo->setDesc is set to the tuple description that we actually used * to build our tuples with, so the caller can verify we did what it was * expecting. */ return (Datum) 0; } PG_FUNCTION_INFO_V1(connectby_text_serial); Datum connectby_text_serial(PG_FUNCTION_ARGS) { char *relname = text_to_cstring(PG_GETARG_TEXT_PP(0)); char *key_fld = text_to_cstring(PG_GETARG_TEXT_PP(1)); char *parent_key_fld = text_to_cstring(PG_GETARG_TEXT_PP(2)); char *orderby_fld = text_to_cstring(PG_GETARG_TEXT_PP(3)); char *start_with = text_to_cstring(PG_GETARG_TEXT_PP(4)); int max_depth = PG_GETARG_INT32(5); char *branch_delim = NULL; bool show_branch = false; bool show_serial = true; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupdesc; AttInMetadata *attinmeta; MemoryContext per_query_ctx; MemoryContext oldcontext; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot accept a set"))); if (!(rsinfo->allowedModes & SFRM_Materialize) || rsinfo->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not allowed in this context"))); if (fcinfo->nargs == 7) { branch_delim = text_to_cstring(PG_GETARG_TEXT_PP(6)); show_branch = true; } else /* default is no show, tilde for the delimiter */ branch_delim = pstrdup("~"); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); /* get the requested return tuple description */ tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); /* does it meet our needs */ validateConnectbyTupleDesc(tupdesc, show_branch, show_serial); /* OK, use it then */ attinmeta = TupleDescGetAttInMetadata(tupdesc); /* OK, go to work */ rsinfo->returnMode = SFRM_Materialize; rsinfo->setResult = connectby(relname, key_fld, parent_key_fld, orderby_fld, branch_delim, start_with, max_depth, show_branch, show_serial, per_query_ctx, rsinfo->allowedModes & SFRM_Materialize_Random, attinmeta); rsinfo->setDesc = tupdesc; MemoryContextSwitchTo(oldcontext); /* * SFRM_Materialize mode expects us to return a NULL Datum. The actual * tuples are in our tuplestore and passed back through rsinfo->setResult. * rsinfo->setDesc is set to the tuple description that we actually used * to build our tuples with, so the caller can verify we did what it was * expecting. */ return (Datum) 0; } /* * connectby - does the real work for connectby_text() */ static Tuplestorestate * connectby(char *relname, char *key_fld, char *parent_key_fld, char *orderby_fld, char *branch_delim, char *start_with, int max_depth, bool show_branch, bool show_serial, MemoryContext per_query_ctx, bool randomAccess, AttInMetadata *attinmeta) { Tuplestorestate *tupstore = NULL; MemoryContext oldcontext; int serial = 1; /* Connect to SPI manager */ SPI_connect(); /* switch to longer term context to create the tuple store */ oldcontext = MemoryContextSwitchTo(per_query_ctx); /* initialize our tuplestore */ tupstore = tuplestore_begin_heap(randomAccess, false, work_mem); MemoryContextSwitchTo(oldcontext); /* now go get the whole tree */ build_tuplestore_recursively(key_fld, parent_key_fld, relname, orderby_fld, branch_delim, start_with, start_with, /* current_branch */ 0, /* initial level is 0 */ &serial, /* initial serial is 1 */ max_depth, show_branch, show_serial, per_query_ctx, attinmeta, tupstore); SPI_finish(); return tupstore; } static void build_tuplestore_recursively(char *key_fld, char *parent_key_fld, char *relname, char *orderby_fld, char *branch_delim, char *start_with, char *branch, int level, int *serial, int max_depth, bool show_branch, bool show_serial, MemoryContext per_query_ctx, AttInMetadata *attinmeta, Tuplestorestate *tupstore) { TupleDesc tupdesc = attinmeta->tupdesc; int ret; uint64 proc; int serial_column; StringInfoData sql; char **values; char *current_key; char *current_key_parent; char current_level[INT32_STRLEN]; char serial_str[INT32_STRLEN]; char *current_branch; HeapTuple tuple; if (max_depth > 0 && level > max_depth) return; initStringInfo(&sql); /* Build initial sql statement */ if (!show_serial) { appendStringInfo(&sql, "SELECT %s, %s FROM %s WHERE %s = %s AND %s IS NOT NULL AND %s <> %s", key_fld, parent_key_fld, relname, parent_key_fld, quote_literal_cstr(start_with), key_fld, key_fld, parent_key_fld); serial_column = 0; } else { appendStringInfo(&sql, "SELECT %s, %s FROM %s WHERE %s = %s AND %s IS NOT NULL AND %s <> %s ORDER BY %s", key_fld, parent_key_fld, relname, parent_key_fld, quote_literal_cstr(start_with), key_fld, key_fld, parent_key_fld, orderby_fld); serial_column = 1; } if (show_branch) values = (char **) palloc((CONNECTBY_NCOLS + serial_column) * sizeof(char *)); else values = (char **) palloc((CONNECTBY_NCOLS_NOBRANCH + serial_column) * sizeof(char *)); /* First time through, do a little setup */ if (level == 0) { /* root value is the one we initially start with */ values[0] = start_with; /* root value has no parent */ values[1] = NULL; /* root level is 0 */ sprintf(current_level, "%d", level); values[2] = current_level; /* root branch is just starting root value */ if (show_branch) values[3] = start_with; /* root starts the serial with 1 */ if (show_serial) { sprintf(serial_str, "%d", (*serial)++); if (show_branch) values[4] = serial_str; else values[3] = serial_str; } /* construct the tuple */ tuple = BuildTupleFromCStrings(attinmeta, values); /* now store it */ tuplestore_puttuple(tupstore, tuple); /* increment level */ level++; } /* Retrieve the desired rows */ ret = SPI_execute(sql.data, true, 0); proc = SPI_processed; /* Check for qualifying tuples */ if ((ret == SPI_OK_SELECT) && (proc > 0)) { HeapTuple spi_tuple; SPITupleTable *tuptable = SPI_tuptable; TupleDesc spi_tupdesc = tuptable->tupdesc; uint64 i; StringInfoData branchstr; StringInfoData chk_branchstr; StringInfoData chk_current_key; /* * Check that return tupdesc is compatible with the one we got from * the query. */ compatConnectbyTupleDescs(tupdesc, spi_tupdesc); initStringInfo(&branchstr); initStringInfo(&chk_branchstr); initStringInfo(&chk_current_key); for (i = 0; i < proc; i++) { /* initialize branch for this pass */ appendStringInfoString(&branchstr, branch); appendStringInfo(&chk_branchstr, "%s%s%s", branch_delim, branch, branch_delim); /* get the next sql result tuple */ spi_tuple = tuptable->vals[i]; /* get the current key (might be NULL) */ current_key = SPI_getvalue(spi_tuple, spi_tupdesc, 1); /* get the parent key (might be NULL) */ current_key_parent = SPI_getvalue(spi_tuple, spi_tupdesc, 2); /* get the current level */ sprintf(current_level, "%d", level); /* check to see if this key is also an ancestor */ if (current_key) { appendStringInfo(&chk_current_key, "%s%s%s", branch_delim, current_key, branch_delim); if (strstr(chk_branchstr.data, chk_current_key.data)) ereport(ERROR, (errcode(ERRCODE_INVALID_RECURSION), errmsg("infinite recursion detected"))); } /* OK, extend the branch */ if (current_key) appendStringInfo(&branchstr, "%s%s", branch_delim, current_key); current_branch = branchstr.data; /* build a tuple */ values[0] = current_key; values[1] = current_key_parent; values[2] = current_level; if (show_branch) values[3] = current_branch; if (show_serial) { sprintf(serial_str, "%d", (*serial)++); if (show_branch) values[4] = serial_str; else values[3] = serial_str; } tuple = BuildTupleFromCStrings(attinmeta, values); /* store the tuple for later use */ tuplestore_puttuple(tupstore, tuple); heap_freetuple(tuple); /* recurse using current_key as the new start_with */ if (current_key) build_tuplestore_recursively(key_fld, parent_key_fld, relname, orderby_fld, branch_delim, current_key, current_branch, level + 1, serial, max_depth, show_branch, show_serial, per_query_ctx, attinmeta, tupstore); xpfree(current_key); xpfree(current_key_parent); /* reset branch for next pass */ resetStringInfo(&branchstr); resetStringInfo(&chk_branchstr); resetStringInfo(&chk_current_key); } xpfree(branchstr.data); xpfree(chk_branchstr.data); xpfree(chk_current_key.data); } } /* * Check expected (query runtime) tupdesc suitable for Connectby */ static void validateConnectbyTupleDesc(TupleDesc td, bool show_branch, bool show_serial) { int expected_cols; /* are there the correct number of columns */ if (show_branch) expected_cols = CONNECTBY_NCOLS; else expected_cols = CONNECTBY_NCOLS_NOBRANCH; if (show_serial) expected_cols++; if (td->natts != expected_cols) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid connectby return type"), errdetail("Return row must have %d columns, not %d.", expected_cols, td->natts))); /* the first two columns will be checked against the input tuples later */ /* check that the type of the third column is INT4 */ if (TupleDescAttr(td, 2)->atttypid != INT4OID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid connectby return type"), errdetail("Third return column (depth) must be type %s.", format_type_be(INT4OID)))); /* check that the type of the branch column is TEXT if applicable */ if (show_branch && TupleDescAttr(td, 3)->atttypid != TEXTOID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid connectby return type"), errdetail("Fourth return column (branch) must be type %s.", format_type_be(TEXTOID)))); /* check that the type of the serial column is INT4 if applicable */ if (show_branch && show_serial && TupleDescAttr(td, 4)->atttypid != INT4OID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid connectby return type"), errdetail("Fifth return column (serial) must be type %s.", format_type_be(INT4OID)))); if (!show_branch && show_serial && TupleDescAttr(td, 3)->atttypid != INT4OID) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid connectby return type"), errdetail("Fourth return column (serial) must be type %s.", format_type_be(INT4OID)))); /* OK, the tupdesc is valid for our purposes */ } /* * Check if output tupdesc and SQL query's tupdesc are compatible */ static void compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) { Oid ret_atttypid; Oid sql_atttypid; int32 ret_atttypmod; int32 sql_atttypmod; /* * Query result must have at least 2 columns. */ if (sql_tupdesc->natts < 2) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid connectby source data query"), errdetail("The query must return at least two columns."))); /* * These columns must match the result type indicated by the calling * query. */ ret_atttypid = TupleDescAttr(ret_tupdesc, 0)->atttypid; sql_atttypid = TupleDescAttr(sql_tupdesc, 0)->atttypid; ret_atttypmod = TupleDescAttr(ret_tupdesc, 0)->atttypmod; sql_atttypmod = TupleDescAttr(sql_tupdesc, 0)->atttypmod; if (ret_atttypid != sql_atttypid || (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid connectby return type"), errdetail("Source key type %s does not match return key type %s.", format_type_with_typemod(sql_atttypid, sql_atttypmod), format_type_with_typemod(ret_atttypid, ret_atttypmod)))); ret_atttypid = TupleDescAttr(ret_tupdesc, 1)->atttypid; sql_atttypid = TupleDescAttr(sql_tupdesc, 1)->atttypid; ret_atttypmod = TupleDescAttr(ret_tupdesc, 1)->atttypmod; sql_atttypmod = TupleDescAttr(sql_tupdesc, 1)->atttypmod; if (ret_atttypid != sql_atttypid || (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid connectby return type"), errdetail("Source parent key type %s does not match return parent key type %s.", format_type_with_typemod(sql_atttypid, sql_atttypmod), format_type_with_typemod(ret_atttypid, ret_atttypmod)))); /* OK, the two tupdescs are compatible for our purposes */ } /* * Check if crosstab output tupdesc agrees with input tupdesc */ static void compatCrosstabTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) { int i; Oid ret_atttypid; Oid sql_atttypid; int32 ret_atttypmod; int32 sql_atttypmod; if (ret_tupdesc->natts < 2) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid crosstab return type"), errdetail("Return row must have at least two columns."))); Assert(sql_tupdesc->natts == 3); /* already checked by caller */ /* check the row_name types match */ ret_atttypid = TupleDescAttr(ret_tupdesc, 0)->atttypid; sql_atttypid = TupleDescAttr(sql_tupdesc, 0)->atttypid; ret_atttypmod = TupleDescAttr(ret_tupdesc, 0)->atttypmod; sql_atttypmod = TupleDescAttr(sql_tupdesc, 0)->atttypmod; if (ret_atttypid != sql_atttypid || (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid crosstab return type"), errdetail("Source row_name datatype %s does not match return row_name datatype %s.", format_type_with_typemod(sql_atttypid, sql_atttypmod), format_type_with_typemod(ret_atttypid, ret_atttypmod)))); /* * attribute [1] of sql tuple is the category; no need to check it * attribute [2] of sql tuple should match attributes [1] to [natts - 1] * of the return tuple */ sql_atttypid = TupleDescAttr(sql_tupdesc, 2)->atttypid; sql_atttypmod = TupleDescAttr(sql_tupdesc, 2)->atttypmod; for (i = 1; i < ret_tupdesc->natts; i++) { ret_atttypid = TupleDescAttr(ret_tupdesc, i)->atttypid; ret_atttypmod = TupleDescAttr(ret_tupdesc, i)->atttypmod; if (ret_atttypid != sql_atttypid || (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid crosstab return type"), errdetail("Source value datatype %s does not match return value datatype %s in column %d.", format_type_with_typemod(sql_atttypid, sql_atttypmod), format_type_with_typemod(ret_atttypid, ret_atttypmod), i + 1))); } /* OK, the two tupdescs are compatible for our purposes */ }
c
github
https://github.com/postgres/postgres
contrib/tablefunc/tablefunc.c
#!/usr/bin/env python3 import argparse import json import subprocess import sys def dump_json(obj, out, pretty): if pretty: json.dump(obj, out, indent=2, sort_keys=True) else: json.dump(obj, out, separators=(",", ":"), sort_keys=True) return def main(): parser = argparse.ArgumentParser(description="Generate a patch file that adds init accounts") parser.add_argument("-o", "--output", metavar="OUT", default="-", help="output filename (default: stdout)") parser.add_argument("-a", "--accounts", metavar="ACCOUNTS", default="-", help="file containing name, balances to create") parser.add_argument("-p", "--pretty", action="store_true", default=False, help="pretty print output") parser.add_argument("-s", "--secret", metavar="SECRET", default=None, help="private key generation secret") opts = parser.parse_args() if opts.secret is None: sys.stderr.write("missing required parameter --secret\n") sys.stderr.flush() sys.exit(1) with open(opts.accounts, "r") as f: accounts = json.load(f) initial_accounts = [] initial_balances = [] for e in accounts: name = e["name"] owner_str = subprocess.check_output(["programs/genesis_util/get_dev_key", opts.secret, "owner-"+name]).decode("utf-8") active_str = subprocess.check_output(["programs/genesis_util/get_dev_key", opts.secret, "active-"+name]).decode("utf-8") owner = json.loads(owner_str) active = json.loads(active_str) initial_accounts.append({ "name" : name, "owner_key" : owner[0]["public_key"], "active_key" : active[0]["public_key"], "is_lifetime_member" : True, }) for bal in e.get("balances", []): bal = dict(bal) bal["owner"] = active[0]["address"] initial_balances.append(bal) result = { "append" : { "initial_accounts" : initial_accounts }, } if len(initial_balances) > 0: result["append"]["initial_balances"] = initial_balances if opts.output == "-": dump_json( result, sys.stdout, opts.pretty ) sys.stdout.flush() else: with open(opts.output, "w") as f: dump_json( result, f, opts.pretty ) return if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python from string import strip from sys import stdin targets = {} smilies = {'slowdown': '&#9785;' , 'speedup': '&#9786;'} for line in stdin: line = map(strip, filter(None, line.split(' '))) if 9 == len(line): target, name = line[0:2] factor, dir = line[-2:] name = name.split('-') name, size = '-'.join(name[:-1]), name[-1] target_tests = targets.get(target, {}) name_tests = target_tests.get(name, {}) name_tests[int(size)] = (factor, dir) target_tests[name] = name_tests targets[target] = target_tests print '''\ <html><head> <title>Performance Changes</title> <style type="text/css">/*<![CDATA[*/ body { background: white; color: black; } table { border-collapse: collapse; } th, td { border: 1px solid silver; padding: 0.2em; } td { text-align: center; } th:first-child { text-align: left; } th { background: #eee; } /* those colors also should work for color blinds */ td.slowdown { background: #f93; } td.speedup { background: #6f9; } /*]]>*/</style> </head><body> <h1>Performance Changes</h1>''' targets = targets.items() targets.sort(lambda a, b: cmp(a[0], b[0])) for target, names in targets: sizes = {} for tests in names.values(): for size in tests.keys(): sizes[size] = True sizes = sizes.keys() sizes.sort() names = names.items() names.sort(lambda a, b: cmp(a[0], b[0])) print '<h2><a name="%s">%s</a></h2>' % (target, target) print '<table><thead><tr><th>&nbsp;</th>' for size in sizes: print '<th>%s</th>' % size print '</tr></thead><tbody>' for name, tests in names: print '<tr><th>%s</th>' % name for size in sizes: result = tests.get(size) if result: factor, dir = result print '<td class="%s">%s %s</td>' % ( dir, factor, smilies[dir]) else: print '<td>&nbsp;</td>' print '</tr>' print '</tbody></table>' print '</body></html>'
unknown
codeparrot/codeparrot-clean
import doctest import json import logging import os import unittest import sys sys.path.insert(0, "..") import configr class Tests(unittest.TestCase): ''' Test suite. ''' def tests_metadata(_): _.assertTrue(hasattr(configr, "version")) _.assertTrue(hasattr(configr.version, "__version__")) _.assertTrue(hasattr(configr.version, "__version_info__")) def test_details(_): try: for file in (f for f in os.listdir() if f.endswith(configr.EXTENSION + ".bak")): try: os.unlink(file) except: pass except: pass c = configr.Configr("myapp", data = {"d": 2}, defaults = {"e": 1}) _.assertEqual("myapp", c.__name) _.assertEqual("myapp", c["__name"]) try: c["c"]; raise Exception("Should have crashed") # not existing data via dictionary access case except: pass try: c.c; raise Exception("Should have crashed") # not existing data via attribute access case except: pass _.assertEqual(2, c.d) # pre-defined data case _.assertEqual(1, c["e"]) # default case # Create some contents c.a = "a" c["b"] = "b" _.assertEqual("a", c["a"]) _.assertEqual("b", c.b) # Save to file value = c.saveSettings(location = os.getcwd(), keys = ["a", "b"], clientCodeLocation = __file__) # CWD should be "tests" folder _.assertIsNotNone(value.path) _.assertIsNone(value.error) _.assertEqual(value, c.__savedTo) _.assertEqual(os.getcwd(), os.path.dirname(c.__savedTo.path)) _.assertEqual("a", c["a"]) _.assertEqual("b", c.b) name = c.__savedTo.path with open(name, "r") as fd: contents = json.loads(fd.read()) _.assertEqual({"a": "a", "b": "b"}, contents) # Now load and see if all is correct c = configr.Configr("myapp") value = c.loadSettings(location = os.getcwd(), data = {"c": 33}, clientCodeLocation = __file__) _.assertEqual(name, c.__loadedFrom.path) _.assertIsNotNone(value.path) _.assertIsNone(value.error) _.assertEqual(value, c.__loadedFrom) _.assertEqual(c.a, "a") _.assertEqual(c["b"], "b") _.assertEqual(c.c, 33) os.unlink(value.path) value = c.loadSettings(location = "bla", clientCodeLocation = __file__) # provoke error _.assertIsNone(value.path) _.assertIsNotNone(value.error) # Now test removal del c["b"] del c.a _.assertEqual(1, len(c.keys())) _.assertIn("c", c.keys()) # Now stringify _.assertEqual("Configr(c: 33)", str(c)) _.assertEqual("Configr(c: 33)", repr(c)) # Testing map functions: already done in doctest # TODO test ignores option for saveSettings def testNested(_): c = configr.Configr(data = {"a": "a"}, defaults = configr.Configr(data = {"b": "b"}, defaults = configr.Configr(data = {"c": "c"}))) _.assertEqual("a", c.a) _.assertEqual("b", c["b"]) _.assertEqual("c", c.c) _.assertTrue("a" in c) _.assertTrue("b" in c) _.assertTrue("c" in c) _.assertFalse("d" in c) def load_tests(loader, tests, ignore): ''' The function name suffix "_tests" tells the unittest module about a test case. ''' tests.addTests(doctest.DocTestSuite(configr)) return tests if __name__ == "__main__": logging.basicConfig(level = logging.DEBUG, stream = sys.stderr, format = "%(asctime)-25s %(levelname)-8s %(name)-12s | %(message)s") print(unittest.main())
unknown
codeparrot/codeparrot-clean
import sys if sys.version_info[0] < 3: from ConfigParser import SafeConfigParser, NoOptionError else: from configparser import SafeConfigParser, NoOptionError import re import os import shlex __all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet', 'read_config', 'parse_flags'] _VAR = re.compile('\$\{([a-zA-Z0-9_-]+)\}') class FormatError(IOError): """ Exception thrown when there is a problem parsing a configuration file. """ def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class PkgNotFound(IOError): """Exception raised when a package can not be located.""" def __init__(self, msg): self.msg = msg def __str__(self): return self.msg def parse_flags(line): """ Parse a line from a config file containing compile flags. Parameters ---------- line : str A single line containing one or more compile flags. Returns ------- d : dict Dictionary of parsed flags, split into relevant categories. These categories are the keys of `d`: * 'include_dirs' * 'library_dirs' * 'libraries' * 'macros' * 'ignored' """ lexer = shlex.shlex(line) lexer.whitespace_split = True d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], 'macros': [], 'ignored': []} def next_token(t): if t.startswith('-I'): if len(t) > 2: d['include_dirs'].append(t[2:]) else: t = lexer.get_token() d['include_dirs'].append(t) elif t.startswith('-L'): if len(t) > 2: d['library_dirs'].append(t[2:]) else: t = lexer.get_token() d['library_dirs'].append(t) elif t.startswith('-l'): d['libraries'].append(t[2:]) elif t.startswith('-D'): d['macros'].append(t[2:]) else: d['ignored'].append(t) return lexer.get_token() t = lexer.get_token() while t: t = next_token(t) return d def _escape_backslash(val): return val.replace('\\', '\\\\') class LibraryInfo(object): """ Object containing build information about a library. Parameters ---------- name : str The library name. description : str Description of the library. version : str Version string. sections : dict The sections of the configuration file for the library. The keys are the section headers, the values the text under each header. vars : class instance A `VariableSet` instance, which contains ``(name, value)`` pairs for variables defined in the configuration file for the library. requires : sequence, optional The required libraries for the library to be installed. Notes ----- All input parameters (except "sections" which is a method) are available as attributes of the same name. """ def __init__(self, name, description, version, sections, vars, requires=None): self.name = name self.description = description if requires: self.requires = requires else: self.requires = [] self.version = version self._sections = sections self.vars = vars def sections(self): """ Return the section headers of the config file. Parameters ---------- None Returns ------- keys : list of str The list of section headers. """ return self._sections.keys() def cflags(self, section="default"): val = self.vars.interpolate(self._sections[section]['cflags']) return _escape_backslash(val) def libs(self, section="default"): val = self.vars.interpolate(self._sections[section]['libs']) return _escape_backslash(val) def __str__(self): m = ['Name: %s' % self.name] m.append('Description: %s' % self.description) if self.requires: m.append('Requires:') else: m.append('Requires: %s' % ",".join(self.requires)) m.append('Version: %s' % self.version) return "\n".join(m) class VariableSet(object): """ Container object for the variables defined in a config file. `VariableSet` can be used as a plain dictionary, with the variable names as keys. Parameters ---------- d : dict Dict of items in the "variables" section of the configuration file. """ def __init__(self, d): self._raw_data = dict([(k, v) for k, v in d.items()]) self._re = {} self._re_sub = {} self._init_parse() def _init_parse(self): for k, v in self._raw_data.items(): self._init_parse_var(k, v) def _init_parse_var(self, name, value): self._re[name] = re.compile(r'\$\{%s\}' % name) self._re_sub[name] = value def interpolate(self, value): # Brute force: we keep interpolating until there is no '${var}' anymore # or until interpolated string is equal to input string def _interpolate(value): for k in self._re.keys(): value = self._re[k].sub(self._re_sub[k], value) return value while _VAR.search(value): nvalue = _interpolate(value) if nvalue == value: break value = nvalue return value def variables(self): """ Return the list of variable names. Parameters ---------- None Returns ------- names : list of str The names of all variables in the `VariableSet` instance. """ return self._raw_data.keys() # Emulate a dict to set/get variables values def __getitem__(self, name): return self._raw_data[name] def __setitem__(self, name, value): self._raw_data[name] = value self._init_parse_var(name, value) def parse_meta(config): if not config.has_section('meta'): raise FormatError("No meta section found !") d = {} for name, value in config.items('meta'): d[name] = value for k in ['name', 'description', 'version']: if not d.has_key(k): raise FormatError("Option %s (section [meta]) is mandatory, " "but not found" % k) if not d.has_key('requires'): d['requires'] = [] return d def parse_variables(config): if not config.has_section('variables'): raise FormatError("No variables section found !") d = {} for name, value in config.items("variables"): d[name] = value return VariableSet(d) def parse_sections(config): return meta_d, r def pkg_to_filename(pkg_name): return "%s.ini" % pkg_name def parse_config(filename, dirs=None): if dirs: filenames = [os.path.join(d, filename) for d in dirs] else: filenames = [filename] config = SafeConfigParser() n = config.read(filenames) if not len(n) >= 1: raise PkgNotFound("Could not find file(s) %s" % str(filenames)) # Parse meta and variables sections meta = parse_meta(config) vars = {} if config.has_section('variables'): for name, value in config.items("variables"): vars[name] = _escape_backslash(value) # Parse "normal" sections secs = [s for s in config.sections() if not s in ['meta', 'variables']] sections = {} requires = {} for s in secs: d = {} if config.has_option(s, "requires"): requires[s] = config.get(s, 'requires') for name, value in config.items(s): d[name] = value sections[s] = d return meta, vars, sections, requires def _read_config_imp(filenames, dirs=None): def _read_config(f): meta, vars, sections, reqs = parse_config(f, dirs) # recursively add sections and variables of required libraries for rname, rvalue in reqs.items(): nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue)) # Update var dict for variables not in 'top' config file for k, v in nvars.items(): if not vars.has_key(k): vars[k] = v # Update sec dict for oname, ovalue in nsections[rname].items(): if ovalue: sections[rname][oname] += ' %s' % ovalue return meta, vars, sections, reqs meta, vars, sections, reqs = _read_config(filenames) # FIXME: document this. If pkgname is defined in the variables section, and # there is no pkgdir variable defined, pkgdir is automatically defined to # the path of pkgname. This requires the package to be imported to work if not vars.has_key("pkgdir") and vars.has_key("pkgname"): pkgname = vars["pkgname"] if not pkgname in sys.modules: raise ValueError("You should import %s to get information on %s" % (pkgname, meta["name"])) mod = sys.modules[pkgname] vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__)) return LibraryInfo(name=meta["name"], description=meta["description"], version=meta["version"], sections=sections, vars=VariableSet(vars)) # Trivial cache to cache LibraryInfo instances creation. To be really # efficient, the cache should be handled in read_config, since a same file can # be parsed many time outside LibraryInfo creation, but I doubt this will be a # problem in practice _CACHE = {} def read_config(pkgname, dirs=None): """ Return library info for a package from its configuration file. Parameters ---------- pkgname : str Name of the package (should match the name of the .ini file, without the extension, e.g. foo for the file foo.ini). dirs : sequence, optional If given, should be a sequence of directories - usually including the NumPy base directory - where to look for npy-pkg-config files. Returns ------- pkginfo : class instance The `LibraryInfo` instance containing the build information. Raises ------ PkgNotFound If the package is not found. See Also -------- misc_util.get_info, misc_util.get_pkg_info Examples -------- >>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath') >>> type(npymath_info) <class 'numpy.distutils.npy_pkg_config.LibraryInfo'> >>> print npymath_info Name: npymath Description: Portable, core math library implementing C99 standard Requires: Version: 0.1 #random """ try: return _CACHE[pkgname] except KeyError: v = _read_config_imp(pkg_to_filename(pkgname), dirs) _CACHE[pkgname] = v return v # TODO: # - implements version comparison (modversion + atleast) # pkg-config simple emulator - useful for debugging, and maybe later to query # the system if __name__ == '__main__': import sys from optparse import OptionParser import glob parser = OptionParser() parser.add_option("--cflags", dest="cflags", action="store_true", help="output all preprocessor and compiler flags") parser.add_option("--libs", dest="libs", action="store_true", help="output all linker flags") parser.add_option("--use-section", dest="section", help="use this section instead of default for options") parser.add_option("--version", dest="version", action="store_true", help="output version") parser.add_option("--atleast-version", dest="min_version", help="Minimal version") parser.add_option("--list-all", dest="list_all", action="store_true", help="Minimal version") parser.add_option("--define-variable", dest="define_variable", help="Replace variable with the given value") (options, args) = parser.parse_args(sys.argv) if len(args) < 2: raise ValueError("Expect package name on the command line:") if options.list_all: files = glob.glob("*.ini") for f in files: info = read_config(f) print ("%s\t%s - %s" % (info.name, info.name, info.description)) pkg_name = args[1] import os d = os.environ.get('NPY_PKG_CONFIG_PATH') if d: info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d]) else: info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.']) if options.section: section = options.section else: section = "default" if options.define_variable: m = re.search('([\S]+)=([\S]+)', options.define_variable) if not m: raise ValueError("--define-variable option should be of " \ "the form --define-variable=foo=bar") else: name = m.group(1) value = m.group(2) info.vars[name] = value if options.cflags: print (info.cflags(section)) if options.libs: print (info.libs(section)) if options.version: print (info.version) if options.min_version: print (info.version >= options.min_version)
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # /// script # dependencies = [ # "transformers @ git+https://github.com/huggingface/transformers.git", # "torch>=1.5.0", # "torchvision>=0.6.0", # "datasets>=1.8.0", # ] # /// import argparse import logging import math import os from pathlib import Path import datasets import numpy as np import torch from accelerate import Accelerator, DistributedType from accelerate.utils import set_seed from datasets import load_dataset from huggingface_hub import HfApi from torch import nn from torch.utils.data import DataLoader from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from tqdm.auto import tqdm import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, SchedulerType, get_scheduler, ) from transformers.trainer_pt_utils import get_parameter_names from transformers.utils import check_min_version from transformers.utils.versions import require_version """ Pre-training a 🤗 Transformers model for simple masked image modeling (SimMIM) without using HuggingFace Trainer. Any model supported by the AutoModelForMaskedImageModeling API can be used. """ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.57.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser( description="Finetune a transformers model on a simple Masked Image Modeling task" ) parser.add_argument( "--dataset_name", type=str, default="cifar10", help="Name of a dataset from the datasets package", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--image_column_name", type=str, default=None, help="The column name of the images in the files. If not set, will try to use 'image' or 'img'.", ) parser.add_argument( "--train_dir", type=str, default=None, help="A folder containing the training data.", ) parser.add_argument( "--validation_dir", type=None, default=None, help="A folder containing the validation data.", ) parser.add_argument( "--train_val_split", type=float, default=0.15, help="Percent to split off of train for validation.", ) parser.add_argument( "--mask_patch_size", type=int, default=32, help="The size of the square patches to use for masking.", ) parser.add_argument( "--mask_ratio", type=float, default=0.6, help="Percentage of patches to mask.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--max_eval_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ), ) parser.add_argument( "--model_name_or_path", type=str, default=None, help=( "The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a " "checkpoint identifier on the hub. " "Don't set if you want to train a model from scratch." ), ) parser.add_argument( "--model_type", type=str, default=None, help="If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES), ) parser.add_argument( "--config_name_or_path", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--config_overrides", type=str, default=None, help=( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ), ) parser.add_argument( "--cache_dir", type=str, default=None, help="Where do you want to store (cache) the pretrained models/datasets downloaded from the hub", ) parser.add_argument( "--model_revision", type=str, default="main", help="The specific model version to use (can be a branch name, tag name or commit id).", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--image_processor_name", type=str, default=None, help="Name or path of preprocessor config.", ) parser.add_argument( "--token", type=str, default=None, help=( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `hf auth login` (stored in `~/.huggingface`)." ), ) parser.add_argument( "--trust_remote_code", action="store_true", help=( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ), ) parser.add_argument( "--image_size", type=int, default=None, help="The size (resolution) of each image. If not specified, will use `image_size` of the configuration.", ) parser.add_argument( "--patch_size", type=int, default=None, help="The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.", ) parser.add_argument( "--encoder_stride", type=int, default=None, help={"help": "Stride to use for the encoder."}, ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--seed", type=int, default=None, help="A seed for reproducible training.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="The initial learning rate for [`AdamW`] optimizer.", ) parser.add_argument( "--weight_decay", type=float, default=0.0, help="Weight decay to use.", ) parser.add_argument( "--num_train_epochs", type=float, default=3.0, help="Total number of training epochs to perform (if not an integer, will perform the decimal part percents of the last epoch before stopping training).", ) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler.", ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--output_dir", type=str, default=None, help="Where to store the final model.", ) args = parser.parse_args() # Sanity checks data_files = {} if args.train_dir is not None: data_files["train"] = args.train_dir if args.validation_dir is not None: data_files["val"] = args.validation_dir args.data_files = data_files if data_files else None if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args class MaskGenerator: """ A class to generate boolean masks for the pretraining task. A mask is a 1D tensor of shape (model_patch_size**2,) where the value is either 0 or 1, where 1 indicates "masked". """ def __init__(self, input_size=192, mask_patch_size=32, model_patch_size=4, mask_ratio=0.6): self.input_size = input_size self.mask_patch_size = mask_patch_size self.model_patch_size = model_patch_size self.mask_ratio = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size") if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size") self.rand_size = self.input_size // self.mask_patch_size self.scale = self.mask_patch_size // self.model_patch_size self.token_count = self.rand_size**2 self.mask_count = int(np.ceil(self.token_count * self.mask_ratio)) def __call__(self): mask_idx = np.random.permutation(self.token_count)[: self.mask_count] mask = np.zeros(self.token_count, dtype=int) mask[mask_idx] = 1 mask = mask.reshape((self.rand_size, self.rand_size)) mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1) return torch.tensor(mask.flatten()) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) mask = torch.stack([example["mask"] for example in examples]) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["project_dir"] = args.output_dir accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs, ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Initialize our dataset. ds = load_dataset( args.dataset_name, args.dataset_config_name, data_files=args.data_files, cache_dir=args.cache_dir, token=args.token, trust_remote_code=args.trust_remote_code, ) # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if "validation" in ds else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = ds["train"].train_test_split(args.train_val_split) ds["train"] = split["train"] ds["validation"] = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": args.cache_dir, "revision": args.model_revision, "token": args.token, "trust_remote_code": args.trust_remote_code, } if args.config_name_or_path: config = AutoConfig.from_pretrained(args.config_name_or_path, **config_kwargs) elif args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if args.config_overrides is not None: logger.info(f"Overriding config: {args.config_overrides}") config.update_from_string(args.config_overrides) logger.info(f"New config: {config}") # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(config, "decoder_type"): config.decoder_type = "simmim" # adapt config args.image_size = args.image_size if args.image_size is not None else config.image_size args.patch_size = args.patch_size if args.patch_size is not None else config.patch_size args.encoder_stride = args.encoder_stride if args.encoder_stride is not None else config.encoder_stride config.update( { "image_size": args.image_size, "patch_size": args.patch_size, "encoder_stride": args.encoder_stride, } ) # create image processor if args.image_processor_name: image_processor = AutoImageProcessor.from_pretrained(args.image_processor_name, **config_kwargs) elif args.model_name_or_path: image_processor = AutoImageProcessor.from_pretrained(args.model_name_or_path, **config_kwargs) else: IMAGE_PROCESSOR_TYPES = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } image_processor = IMAGE_PROCESSOR_TYPES[args.model_type]() # create model if args.model_name_or_path: model = AutoModelForMaskedImageModeling.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir, revision=args.model_revision, token=args.token, trust_remote_code=args.trust_remote_code, ) else: logger.info("Training new model from scratch") model = AutoModelForMaskedImageModeling.from_config( config, token=args.token, trust_remote_code=args.trust_remote_code, ) column_names = ds["train"].column_names if args.image_column_name is not None: image_column_name = args.image_column_name elif "image" in column_names: image_column_name = "image" elif "img" in column_names: image_column_name = "img" else: image_column_name = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py transforms = Compose( [ Lambda(lambda img: img.convert("RGB")), RandomResizedCrop(args.image_size, scale=(0.67, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0)), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean, std=image_processor.image_std), ] ) # create mask generator mask_generator = MaskGenerator( input_size=args.image_size, mask_patch_size=args.mask_patch_size, model_patch_size=args.patch_size, mask_ratio=args.mask_ratio, ) def preprocess_images(examples): """Preprocess a batch of images by applying transforms + creating a corresponding mask, indicating which patches to mask.""" examples["pixel_values"] = [transforms(image) for image in examples[image_column_name]] examples["mask"] = [mask_generator() for i in range(len(examples[image_column_name]))] return examples if args.max_train_samples is not None: ds["train"] = ds["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms ds["train"].set_transform(preprocess_images) if args.max_eval_samples is not None: ds["validation"] = ds["validation"].shuffle(seed=args.seed).select(range(args.max_eval_samples)) # Set the validation transforms ds["validation"].set_transform(preprocess_images) # DataLoaders creation: train_dataloader = DataLoader( ds["train"], shuffle=True, collate_fn=collate_fn, batch_size=args.per_device_train_batch_size, ) eval_dataloader = DataLoader( ds["validation"], collate_fn=collate_fn, batch_size=args.per_device_eval_batch_size, ) # Optimizer # Split weights in two groups, one with weight decay and the other not. forbidden_name_patterns = [r"bias", r"layernorm", r"rmsnorm", r"(?:^|\.)norm(?:$|\.)", r"_norm(?:$|\.)"] decay_parameters = get_parameter_names(model, [nn.LayerNorm], forbidden_layer_names=forbidden_name_patterns) optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if n in decay_parameters and p.requires_grad], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if n not in decay_parameters and p.requires_grad], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps if overrode_max_train_steps else args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler, ) # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. if accelerator.distributed_type == DistributedType.TPU: model.tie_weights() # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("mim_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(ds['train'])}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(int(args.max_train_steps)), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size))) losses = torch.cat(losses) eval_loss = torch.mean(losses) logger.info(f"epoch {epoch}: eval_loss: {eval_loss}") if args.with_tracking: accelerator.log( { "eval_loss": eval_loss, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) accelerator.wait_for_everyone() accelerator.end_training() if __name__ == "__main__": main()
python
github
https://github.com/huggingface/transformers
examples/pytorch/image-pretraining/run_mim_no_trainer.py
import sys import struct import threading import json import skitai from .. import wsgi_handler from ...backbone.http_response import catch from aquests.athreads import trigger from rs4 import strutil from aquests.protocols.grpc import discover from aquests.protocols.ws import * from aquests.protocols.http import http_util from skitai import version_info, was as the_was try: import xmlrpc.client as xmlrpclib except ImportError: import xmlrpclib try: from urllib.parse import quote_plus except ImportError: from urllib import quote_plus try: from cStringIO import StringIO as BytesIO except ImportError: from io import BytesIO import copy from collections import Iterable from rs4.reraise import reraise from ...backbone.http_response import catch from collections.abc import Iterable ClosingIterator = None try: from werkzeug.wsgi import ClosingIterator except ImportError: pass class WebSocket: collector = None producer = None def __init__ (self, handler, request, message_encoding = None): self.handler = handler self.wasc = handler.wasc self.request = request self.channel = request.channel self.channel.set_terminator (2) self.rfile = BytesIO () self.masks = b"" self.has_masks = True self.buf = b"" self.payload_length = 0 self.opcode = None self.default_op_code = OPCODE_TEXT self._closed = False self.encoder_config = None self.message_encoding = self.setup_encoding (message_encoding) def close (self): if self._closed: return self._closed = True def closed (self): return self._closed def setup_encoding (self, message_encoding): if message_encoding == skitai.WS_MSG_GRPC: i, o = discover.find_type (request.uri [1:]) self.encoder_config = (i [0], 0 [0]) self.default_op_code = OP_BINARY self.message_encode = self.grpc_encode self.message_decode = self.grpc_decode elif message_encoding == skitai.WS_MSG_JSON: self.message_encode = json.dumps self.message_decode = json.loads elif message_encoding == skitai.WS_MSG_XMLRPC: self.message_encode = xmlrpclib.dumps self.message_decode = xmlrpclib.loads else: self.message_encode = self.transport self.message_decode = self.transport return message_encoding def transport (self, msg): return msg def grpc_encode (self, msg): f = self.encoder_config [0] () f.ParseFromString (msg) return f def grpc_decode (self, msg): return msg.SerializeToString () def _tobytes (self, b): if sys.version_info[0] < 3: return map(ord, b) else: return b def collect_incoming_data (self, data): #print (">>>>", data) if not data: # closed connection self.close () return if self.masks or (not self.has_masks and self.payload_length): self.rfile.write (data) else: self.buf += data def found_terminator (self): buf, self.buf = self.buf, b"" if self.masks or (not self.has_masks and self.payload_length): # end of message masked_data = bytearray(self.rfile.getvalue ()) if self.masks: masking_key = bytearray(self.masks) data = bytearray ([masked_data[i] ^ masking_key [i%4] for i in range (len (masked_data))]) else: data = masked_data if self.opcode == OPCODE_TEXT: # text data = data.decode('utf-8') self.payload_length = 0 self.opcode = None self.masks = b"" self.has_masks = True self.rfile.seek (0) self.rfile.truncate () self.channel.set_terminator (2) if self.opcode == OPCODE_PING: self.send (data, OPCODE_PONG) else: self.handle_message (data) elif self.payload_length: self.masks = buf self.channel.set_terminator (self.payload_length) elif self.opcode: if len (buf) == 2: fmt = ">H" else: fmt = ">Q" self.payload_length = struct.unpack(fmt, self._tobytes(buf))[0] if self.has_masks: self.channel.set_terminator (4) # mask else: self.channel.set_terminator (self.payload_length) elif self.opcode is None: b1, b2 = self._tobytes(buf) fin = b1 & FIN self.opcode = b1 & OPCODE #print (fin, self.opcode) if self.opcode == OPCODE_CLOSE: self.close () return mask = b2 & MASKED if not mask: self.has_masks = False payload_length = b2 & PAYLOAD_LEN if payload_length == 0: self.opcode = None self.has_masks = True self.channel.set_terminator (2) return if payload_length < 126: self.payload_length = payload_length if self.has_masks: self.channel.set_terminator (4) # mask else: self.channel.set_terminator (self.payload_length) elif payload_length == 126: self.channel.set_terminator (2) # short length elif payload_length == 127: self.channel.set_terminator (8) # long length else: raise AssertionError ("Web socket frame decode error") def build_data (self, message, op_code): message = self.message_encode (message) if op_code == -1: if type (message) is str: op_code = OPCODE_TEXT elif type (message) is bytes: op_code = OPCODE_BINARY if op_code == -1: op_code = self.default_op_code return message, op_code def send (self, messages, op_code = -1): if ClosingIterator and isinstance (messages, ClosingIterator): # for werkzeug iterator messages = [ b''.join ([msg for msg in messages]).decode ("utf8") ] elif isinstance (messages, (str, bytes)) or not isinstance (messages, Iterable): messages = [ messages ] for msg in messages: self.sendone (msg, op_code) def sendone (self, message, op_code = -1): if not self.channel: return message, op_code = self.build_data (message, op_code) header = bytearray() if strutil.is_encodable (message): payload = message.encode ("utf8") else: payload = message payload_length = len(payload) # Normal payload if payload_length <= 125: header.append(FIN | op_code) header.append(payload_length) # Extended payload elif payload_length >= 126 and payload_length <= 65535: header.append(FIN | op_code) header.append(PAYLOAD_LEN_EXT16) header.extend(struct.pack(">H", payload_length)) # Huge extended payload elif payload_length < 18446744073709551616: header.append(FIN | op_code) header.append(PAYLOAD_LEN_EXT64) header.extend(struct.pack(">Q", payload_length)) else: raise AssertionError ("Message is too big. Consider breaking it into chunks.") m = header + payload self._send (m) def _send (self, msg): if self.channel: if hasattr (self.wasc, 'threads'): trigger.wakeup (lambda p=self.channel, d=msg: (p.push (d),)) else: self.channel.push (msg) def handle_message (self, msg): raise NotImplementedError ("handle_message () not implemented") #--------------------------------------------------------- class Job (wsgi_handler.Job): def exec_app (self): was = the_was._get () was.request = self.request was.env = self.args [0] was.websocket = self.args [0]["websocket"] self.args [0]["skitai.was"] = was content = self.apph (*self.args) if content: if type (content) is not tuple: content = (content,) was.websocket.send (*content) was.request = None was.env = None was.websocket = None #--------------------------------------------------------- class WebSocket1 (WebSocket): # WEBSOCKET_REQDATA def __init__ (self, handler, request, apph, env, param_names, message_encoding = None): WebSocket.__init__ (self, handler, request, message_encoding) self.client_id = request.channel.channel_number self.apph = apph self.env = env self.param_names = param_names self.set_query_string () self.session = self.env.get ("websocket.session") def start_response (self, message, headers = None, exc_info = None): if exc_info: reraise (*exc_info) def set_query_string (self): if not self.param_names: self.querystring = "" self.params = {} return querystring = [] if self.env.get ("QUERY_STRING"): querystring.append (self.env.get ("QUERY_STRING")) querystring.append ("%s=" % self.param_names [0]) self.querystring = "&".join (querystring) self.params = http_util.crack_query (self.querystring) def open (self): self.handle_message (-1, skitai.WS_EVT_OPEN) if "websocket.handler" in self.env: app = self.apph.get_callable () app.register_websocket (self.client_id, self.send) def close (self): if "websocket.handler" in self.env: app = self.apph.get_callable () app.remove_websocket (self.client_id) if not self.closed (): self.handle_message (-1, skitai.WS_EVT_CLOSE) WebSocket.close (self) def make_params (self, msg, event): querystring = self.querystring params = self.params if event: self.env ['websocket.event'] = event else: self.env ['websocket.event'] = None querystring = querystring + quote_plus (msg) params [self.param_names [0]] = self.message_decode (msg) return querystring, params def handle_message (self, msg, event = None): if not msg: return if self.session: self.handle_session (msg, event) else: self.handle_thread (msg, event) def handle_session (self, msg, event): if event: if event == skitai.WS_EVT_CLOSE: try: next (self.session) resp = self.session.send (None) except: return else: return next (self.session) resp = self.session.send (msg) resp and self.send (resp) def handle_thread (self, msg, event = None): querystring, params = self.make_params (msg, event) self.env ["QUERY_STRING"] = querystring self.env ["websocket.params"] = params self.env ["websocket.client"] = self.client_id self.execute () def execute (self): args = (self.request, self.apph, (self.env, self.start_response), None, self.wasc.logger) if not self.env ["wsgi.multithread"]: Job (*args) () else: self.wasc.queue.put (Job (*args)) class WebSocket6 (WebSocket1): def __init__ (self, handler, request, apph, env, param_names, message_encoding = None): WebSocket1.__init__ (self, handler, request, apph, env, param_names, message_encoding) self.lock = threading.Lock () def _send (self, msg): with self.lock: WebSocket1._send (self, msg) class WebSocket5 (WebSocket1): # WEBSOCKET_MULTICAST CLIENT def __init__ (self, handler, request, server, env, param_names): self.server = server WebSocket1.__init__ (self, handler, request, server.apph, env, param_names) def handle_message (self, msg, event = None): self.server.handle_client (self.client_id, event) WebSocket1.handle_message (self, msg, event)
unknown
codeparrot/codeparrot-clean
from __future__ import unicode_literals import os import re import subprocess import time from .common import FileDownloader from ..compat import compat_str from ..utils import ( check_executable, encodeFilename, encodeArgument, get_exe_version, ) def rtmpdump_version(): return get_exe_version( 'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)') class RtmpFD(FileDownloader): def real_download(self, filename, info_dict): def run_rtmpdump(args): start = time.time() resume_percent = None resume_downloaded_data_len = None proc = subprocess.Popen(args, stderr=subprocess.PIPE) cursor_in_new_line = True proc_stderr_closed = False try: while not proc_stderr_closed: # read line from stderr line = '' while True: char = proc.stderr.read(1) if not char: proc_stderr_closed = True break if char in [b'\r', b'\n']: break line += char.decode('ascii', 'replace') if not line: # proc_stderr_closed is True continue mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line) if mobj: downloaded_data_len = int(float(mobj.group(1)) * 1024) percent = float(mobj.group(2)) if not resume_percent: resume_percent = percent resume_downloaded_data_len = downloaded_data_len time_now = time.time() eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent) speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len) data_len = None if percent > 0: data_len = int(downloaded_data_len * 100 / percent) self._hook_progress({ 'status': 'downloading', 'downloaded_bytes': downloaded_data_len, 'total_bytes_estimate': data_len, 'tmpfilename': tmpfilename, 'filename': filename, 'eta': eta, 'elapsed': time_now - start, 'speed': speed, }) cursor_in_new_line = False else: # no percent for live streams mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line) if mobj: downloaded_data_len = int(float(mobj.group(1)) * 1024) time_now = time.time() speed = self.calc_speed(start, time_now, downloaded_data_len) self._hook_progress({ 'downloaded_bytes': downloaded_data_len, 'tmpfilename': tmpfilename, 'filename': filename, 'status': 'downloading', 'elapsed': time_now - start, 'speed': speed, }) cursor_in_new_line = False elif self.params.get('verbose', False): if not cursor_in_new_line: self.to_screen('') cursor_in_new_line = True self.to_screen('[rtmpdump] ' + line) finally: proc.wait() if not cursor_in_new_line: self.to_screen('') return proc.returncode url = info_dict['url'] player_url = info_dict.get('player_url') page_url = info_dict.get('page_url') app = info_dict.get('app') play_path = info_dict.get('play_path') tc_url = info_dict.get('tc_url') flash_version = info_dict.get('flash_version') live = info_dict.get('rtmp_live', False) conn = info_dict.get('rtmp_conn') protocol = info_dict.get('rtmp_protocol') real_time = info_dict.get('rtmp_real_time', False) no_resume = info_dict.get('no_resume', False) continue_dl = self.params.get('continuedl', True) self.report_destination(filename) tmpfilename = self.temp_name(filename) test = self.params.get('test', False) # Check for rtmpdump first if not check_executable('rtmpdump', ['-h']): self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.') return False # Download using rtmpdump. rtmpdump returns exit code 2 when # the connection was interrupted and resuming appears to be # possible. This is part of rtmpdump's normal usage, AFAIK. basic_args = [ 'rtmpdump', '--verbose', '-r', url, '-o', tmpfilename] if player_url is not None: basic_args += ['--swfVfy', player_url] if page_url is not None: basic_args += ['--pageUrl', page_url] if app is not None: basic_args += ['--app', app] if play_path is not None: basic_args += ['--playpath', play_path] if tc_url is not None: basic_args += ['--tcUrl', tc_url] if test: basic_args += ['--stop', '1'] if flash_version is not None: basic_args += ['--flashVer', flash_version] if live: basic_args += ['--live'] if isinstance(conn, list): for entry in conn: basic_args += ['--conn', entry] elif isinstance(conn, compat_str): basic_args += ['--conn', conn] if protocol is not None: basic_args += ['--protocol', protocol] if real_time: basic_args += ['--realtime'] args = basic_args if not no_resume and continue_dl and not live: args += ['--resume'] if not live and continue_dl: args += ['--skip', '1'] args = [encodeArgument(a) for a in args] self._debug_cmd(args, exe='rtmpdump') RD_SUCCESS = 0 RD_FAILED = 1 RD_INCOMPLETE = 2 RD_NO_CONNECT = 3 started = time.time() try: retval = run_rtmpdump(args) except KeyboardInterrupt: if not info_dict.get('is_live'): raise retval = RD_SUCCESS self.to_screen('\n[rtmpdump] Interrupted by user') if retval == RD_NO_CONNECT: self.report_error('[rtmpdump] Could not connect to RTMP server.') return False while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live: prevsize = os.path.getsize(encodeFilename(tmpfilename)) self.to_screen('[rtmpdump] Downloaded %s bytes' % prevsize) time.sleep(5.0) # This seems to be needed args = basic_args + ['--resume'] if retval == RD_FAILED: args += ['--skip', '1'] args = [encodeArgument(a) for a in args] retval = run_rtmpdump(args) cursize = os.path.getsize(encodeFilename(tmpfilename)) if prevsize == cursize and retval == RD_FAILED: break # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024: self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.') retval = RD_SUCCESS break if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE): fsize = os.path.getsize(encodeFilename(tmpfilename)) self.to_screen('[rtmpdump] Downloaded %s bytes' % fsize) self.try_rename(tmpfilename, filename) self._hook_progress({ 'downloaded_bytes': fsize, 'total_bytes': fsize, 'filename': filename, 'status': 'finished', 'elapsed': time.time() - started, }) return True else: self.to_stderr('\n') self.report_error('rtmpdump exited with code %d' % retval) return False
unknown
codeparrot/codeparrot-clean
""" NodeMeister ENC - methods to aid in collection of resources """ def getExclusions(node): """ Get excluded groups, classes, params for a node :param node: the node to get exclusions for :type node: Node :returns: tuple: (list of excluded group names, list of excluded class names, list of excluded param names) """ excluded_groups = [] excluded_params = [] excluded_classes = [] for group_exclusion in node.excluded_groups.all(): excluded_groups.append(group_exclusion.group.name) for class_exclusion in node.excluded_classes.all(): excluded_classes.append(class_exclusion.exclusion) for param_exclusion in node.excluded_params.all(): excluded_params.append(param_exclusion.exclusion) return (excluded_groups, excluded_classes, excluded_params) def walkTree(groups, excluded_groups=[], excluded_classes=[], excluded_params=[]): """ """ classes = {} params = {} for groupmember in groups.all(): if groupmember.group.name not in excluded_groups: if groupmember.group.groups.all() is not []: more_groups = groupmember.group.groups.all() (newclasses, newparams) = walkTree(more_groups, excluded_groups, excluded_classes ) classes.update(newclasses) params.update(newparams) for groupclass in groupmember.group.classes.all(): if groupclass.classname not in excluded_classes: if not groupclass.classparams: classes[groupclass.classname] = None else: classes[groupclass.classname] = json.loads(groupclass.classparams) for groupparams in groupmember.group.parameters.all(): if groupparams.key not in excluded_params: if not groupparams.value: params[groupparams.key] = None else: params[groupparams.key] = json.loads(groupparams.value) return (classes, params)
unknown
codeparrot/codeparrot-clean
# encoding: utf-8 """Utilities to enable code objects to be pickled. Any process that import this module will be able to pickle code objects. This includes the func_code attribute of any function. Once unpickled, new functions can be built using new.function(code, globals()). Eventually we need to automate all of this so that functions themselves can be pickled. Reference: A. Tremols, P Cogolo, "Python Cookbook," p 302-305 """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import warnings warnings.warn("ipykernel.codeutil is deprecated since IPykernel 4.3.1. It has moved to ipyparallel.serialize", DeprecationWarning) import sys import types try: import copyreg # Py 3 except ImportError: import copy_reg as copyreg # Py 2 def code_ctor(*args): return types.CodeType(*args) def reduce_code(co): args = [co.co_argcount, co.co_nlocals, co.co_stacksize, co.co_flags, co.co_code, co.co_consts, co.co_names, co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_freevars, co.co_cellvars] if sys.version_info[0] >= 3: args.insert(1, co.co_kwonlyargcount) return code_ctor, tuple(args) copyreg.pickle(types.CodeType, reduce_code)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class account_common_account_report(osv.osv_memory): _name = 'account.common.account.report' _description = 'Account Common Account Report' _inherit = "account.common.report" _columns = { 'display_account': fields.selection([('all','All'), ('movement','With movements'), ('not_zero','With balance is not equal to 0'), ],'Display Accounts', required=True), } _defaults = { 'display_account': 'movement', } def pre_print_report(self, cr, uid, ids, data, context=None): if context is None: context = {} data['form'].update(self.read(cr, uid, ids, ['display_account'], context=context)[0]) return data #vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
unknown
codeparrot/codeparrot-clean
"""distutils.command.install Implements the Distutils 'install' command.""" from distutils import log # This module should be kept compatible with Python 2.1. __revision__ = "$Id: install.py 62788 2008-05-06 22:41:46Z christian.heimes $" import sys, os, string from types import * from distutils.core import Command from distutils.debug import DEBUG from distutils.sysconfig import get_config_vars from distutils.errors import DistutilsPlatformError from distutils.file_util import write_file from distutils.util import convert_path, subst_vars, change_root from distutils.util import get_platform from distutils.errors import DistutilsOptionError from site import USER_BASE from site import USER_SITE if sys.version < "2.2": WINDOWS_SCHEME = { 'purelib': '$base', 'platlib': '$base', 'headers': '$base/Include/$dist_name', 'scripts': '$base/Scripts', 'data' : '$base', } else: WINDOWS_SCHEME = { 'purelib': '$base/Lib/site-packages', 'platlib': '$base/Lib/site-packages', 'headers': '$base/Include/$dist_name', 'scripts': '$base/Scripts', 'data' : '$base', } INSTALL_SCHEMES = { 'unix_prefix': { 'purelib': '$base/lib/python$py_version_short/site-packages', 'platlib': '$platbase/lib/python$py_version_short/site-packages', 'headers': '$base/include/python$py_version_short/$dist_name', 'scripts': '$base/bin', 'data' : '$base', }, 'unix_home': { 'purelib': '$base/lib/python', 'platlib': '$base/lib/python', 'headers': '$base/include/python/$dist_name', 'scripts': '$base/bin', 'data' : '$base', }, 'unix_user': { 'purelib': '$usersite', 'platlib': '$usersite', 'headers': '$userbase/include/python$py_version_short/$dist_name', 'scripts': '$userbase/bin', 'data' : '$userbase', }, 'nt': WINDOWS_SCHEME, 'nt_user': { 'purelib': '$usersite', 'platlib': '$usersite', 'headers': '$userbase/Python$py_version_nodot/Include/$dist_name', 'scripts': '$userbase/Scripts', 'data' : '$userbase', }, 'mac': { 'purelib': '$base/Lib/site-packages', 'platlib': '$base/Lib/site-packages', 'headers': '$base/Include/$dist_name', 'scripts': '$base/Scripts', 'data' : '$base', }, 'mac_user': { 'purelib': '$usersite', 'platlib': '$usersite', 'headers': '$userbase/$py_version_short/include/$dist_name', 'scripts': '$userbase/bin', 'data' : '$userbase', }, 'os2': { 'purelib': '$base/Lib/site-packages', 'platlib': '$base/Lib/site-packages', 'headers': '$base/Include/$dist_name', 'scripts': '$base/Scripts', 'data' : '$base', }, 'os2_home': { 'purelib': '$usersite', 'platlib': '$usersite', 'headers': '$userbase/include/python$py_version_short/$dist_name', 'scripts': '$userbase/bin', 'data' : '$userbase', }, } # The keys to an installation scheme; if any new types of files are to be # installed, be sure to add an entry to every installation scheme above, # and to SCHEME_KEYS here. SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data') class install (Command): description = "install everything from build directory" user_options = [ # Select installation scheme and set base director(y|ies) ('prefix=', None, "installation prefix"), ('exec-prefix=', None, "(Unix only) prefix for platform-specific files"), ('home=', None, "(Unix only) home directory to install under"), ('user', None, "install in user site-package '%s'" % USER_SITE), # Or, just set the base director(y|ies) ('install-base=', None, "base installation directory (instead of --prefix or --home)"), ('install-platbase=', None, "base installation directory for platform-specific files " + "(instead of --exec-prefix or --home)"), ('root=', None, "install everything relative to this alternate root directory"), # Or, explicitly set the installation scheme ('install-purelib=', None, "installation directory for pure Python module distributions"), ('install-platlib=', None, "installation directory for non-pure module distributions"), ('install-lib=', None, "installation directory for all module distributions " + "(overrides --install-purelib and --install-platlib)"), ('install-headers=', None, "installation directory for C/C++ headers"), ('install-scripts=', None, "installation directory for Python scripts"), ('install-data=', None, "installation directory for data files"), # Byte-compilation options -- see install_lib.py for details, as # these are duplicated from there (but only install_lib does # anything with them). ('compile', 'c', "compile .py to .pyc [default]"), ('no-compile', None, "don't compile .py files"), ('optimize=', 'O', "also compile with optimization: -O1 for \"python -O\", " "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), # Miscellaneous control options ('force', 'f', "force installation (overwrite any existing files)"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), # Where to install documentation (eventually!) #('doc-format=', None, "format of documentation to generate"), #('install-man=', None, "directory for Unix man pages"), #('install-html=', None, "directory for HTML documentation"), #('install-info=', None, "directory for GNU info files"), ('record=', None, "filename in which to record list of installed files"), ] boolean_options = ['compile', 'force', 'skip-build', 'user'] negative_opt = {'no-compile' : 'compile'} def initialize_options (self): # High-level options: these select both an installation base # and scheme. self.prefix = None self.exec_prefix = None self.home = None self.user = 0 # These select only the installation base; it's up to the user to # specify the installation scheme (currently, that means supplying # the --install-{platlib,purelib,scripts,data} options). self.install_base = None self.install_platbase = None self.root = None # These options are the actual installation directories; if not # supplied by the user, they are filled in using the installation # scheme implied by prefix/exec-prefix/home and the contents of # that installation scheme. self.install_purelib = None # for pure module distributions self.install_platlib = None # non-pure (dists w/ extensions) self.install_headers = None # for C/C++ headers self.install_lib = None # set to either purelib or platlib self.install_scripts = None self.install_data = None self.install_userbase = USER_BASE self.install_usersite = USER_SITE self.compile = None self.optimize = None # These two are for putting non-packagized distributions into their # own directory and creating a .pth file if it makes sense. # 'extra_path' comes from the setup file; 'install_path_file' can # be turned off if it makes no sense to install a .pth file. (But # better to install it uselessly than to guess wrong and not # install it when it's necessary and would be used!) Currently, # 'install_path_file' is always true unless some outsider meddles # with it. self.extra_path = None self.install_path_file = 1 # 'force' forces installation, even if target files are not # out-of-date. 'skip_build' skips running the "build" command, # handy if you know it's not necessary. 'warn_dir' (which is *not* # a user option, it's just there so the bdist_* commands can turn # it off) determines whether we warn about installing to a # directory not in sys.path. self.force = 0 self.skip_build = 0 self.warn_dir = 1 # These are only here as a conduit from the 'build' command to the # 'install_*' commands that do the real work. ('build_base' isn't # actually used anywhere, but it might be useful in future.) They # are not user options, because if the user told the install # command where the build directory is, that wouldn't affect the # build command. self.build_base = None self.build_lib = None # Not defined yet because we don't know anything about # documentation yet. #self.install_man = None #self.install_html = None #self.install_info = None self.record = None # -- Option finalizing methods ------------------------------------- # (This is rather more involved than for most commands, # because this is where the policy for installing third- # party Python modules on various platforms given a wide # array of user input is decided. Yes, it's quite complex!) def finalize_options (self): # This method (and its pliant slaves, like 'finalize_unix()', # 'finalize_other()', and 'select_scheme()') is where the default # installation directories for modules, extension modules, and # anything else we care to install from a Python module # distribution. Thus, this code makes a pretty important policy # statement about how third-party stuff is added to a Python # installation! Note that the actual work of installation is done # by the relatively simple 'install_*' commands; they just take # their orders from the installation directory options determined # here. # Check for errors/inconsistencies in the options; first, stuff # that's wrong on any platform. if ((self.prefix or self.exec_prefix or self.home) and (self.install_base or self.install_platbase)): raise DistutilsOptionError, \ ("must supply either prefix/exec-prefix/home or " + "install-base/install-platbase -- not both") if self.home and (self.prefix or self.exec_prefix): raise DistutilsOptionError, \ "must supply either home or prefix/exec-prefix -- not both" if self.user and (self.prefix or self.exec_prefix or self.home or self.install_base or self.install_platbase): raise DistutilsOptionError("can't combine user with with prefix/" "exec_prefix/home or install_(plat)base") # Next, stuff that's wrong (or dubious) only on certain platforms. if os.name != "posix": if self.exec_prefix: self.warn("exec-prefix option ignored on this platform") self.exec_prefix = None # Now the interesting logic -- so interesting that we farm it out # to other methods. The goal of these methods is to set the final # values for the install_{lib,scripts,data,...} options, using as # input a heady brew of prefix, exec_prefix, home, install_base, # install_platbase, user-supplied versions of # install_{purelib,platlib,lib,scripts,data,...}, and the # INSTALL_SCHEME dictionary above. Phew! self.dump_dirs("pre-finalize_{unix,other}") if os.name == 'posix': self.finalize_unix() else: self.finalize_other() self.dump_dirs("post-finalize_{unix,other}()") # Expand configuration variables, tilde, etc. in self.install_base # and self.install_platbase -- that way, we can use $base or # $platbase in the other installation directories and not worry # about needing recursive variable expansion (shudder). py_version = (string.split(sys.version))[0] (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix') self.config_vars = {'dist_name': self.distribution.get_name(), 'dist_version': self.distribution.get_version(), 'dist_fullname': self.distribution.get_fullname(), 'py_version': py_version, 'py_version_short': py_version[0:3], 'py_version_nodot': py_version[0] + py_version[2], 'sys_prefix': prefix, 'prefix': prefix, 'sys_exec_prefix': exec_prefix, 'exec_prefix': exec_prefix, 'userbase': self.install_userbase, 'usersite': self.install_usersite, } self.expand_basedirs() self.dump_dirs("post-expand_basedirs()") # Now define config vars for the base directories so we can expand # everything else. self.config_vars['base'] = self.install_base self.config_vars['platbase'] = self.install_platbase if DEBUG: from pprint import pprint print "config vars:" pprint(self.config_vars) # Expand "~" and configuration variables in the installation # directories. self.expand_dirs() self.dump_dirs("post-expand_dirs()") # Create directories in the home dir: if self.user: self.create_home_path() # Pick the actual directory to install all modules to: either # install_purelib or install_platlib, depending on whether this # module distribution is pure or not. Of course, if the user # already specified install_lib, use their selection. if self.install_lib is None: if self.distribution.ext_modules: # has extensions: non-pure self.install_lib = self.install_platlib else: self.install_lib = self.install_purelib # Convert directories from Unix /-separated syntax to the local # convention. self.convert_paths('lib', 'purelib', 'platlib', 'scripts', 'data', 'headers', 'userbase', 'usersite') # Well, we're not actually fully completely finalized yet: we still # have to deal with 'extra_path', which is the hack for allowing # non-packagized module distributions (hello, Numerical Python!) to # get their own directories. self.handle_extra_path() self.install_libbase = self.install_lib # needed for .pth file self.install_lib = os.path.join(self.install_lib, self.extra_dirs) # If a new root directory was supplied, make all the installation # dirs relative to it. if self.root is not None: self.change_roots('libbase', 'lib', 'purelib', 'platlib', 'scripts', 'data', 'headers') self.dump_dirs("after prepending root") # Find out the build directories, ie. where to install from. self.set_undefined_options('build', ('build_base', 'build_base'), ('build_lib', 'build_lib')) # Punt on doc directories for now -- after all, we're punting on # documentation completely! # finalize_options () def dump_dirs (self, msg): if DEBUG: from distutils.fancy_getopt import longopt_xlate print msg + ":" for opt in self.user_options: opt_name = opt[0] if opt_name[-1] == "=": opt_name = opt_name[0:-1] if opt_name in self.negative_opt: opt_name = string.translate(self.negative_opt[opt_name], longopt_xlate) val = not getattr(self, opt_name) else: opt_name = string.translate(opt_name, longopt_xlate) val = getattr(self, opt_name) print " %s: %s" % (opt_name, val) def finalize_unix (self): if self.install_base is not None or self.install_platbase is not None: if ((self.install_lib is None and self.install_purelib is None and self.install_platlib is None) or self.install_headers is None or self.install_scripts is None or self.install_data is None): raise DistutilsOptionError, \ ("install-base or install-platbase supplied, but " "installation scheme is incomplete") return if self.user: if self.install_userbase is None: raise DistutilsPlatformError( "User base directory is not specified") self.install_base = self.install_platbase = self.install_userbase self.select_scheme("unix_user") elif self.home is not None: self.install_base = self.install_platbase = self.home self.select_scheme("unix_home") else: if self.prefix is None: if self.exec_prefix is not None: raise DistutilsOptionError, \ "must not supply exec-prefix without prefix" self.prefix = os.path.normpath(sys.prefix) self.exec_prefix = os.path.normpath(sys.exec_prefix) else: if self.exec_prefix is None: self.exec_prefix = self.prefix self.install_base = self.prefix self.install_platbase = self.exec_prefix self.select_scheme("unix_prefix") # finalize_unix () def finalize_other (self): # Windows and Mac OS for now if self.user: if self.install_userbase is None: raise DistutilsPlatformError( "User base directory is not specified") self.install_base = self.install_platbase = self.install_userbase self.select_scheme(os.name + "_user") elif self.home is not None: self.install_base = self.install_platbase = self.home self.select_scheme("unix_home") else: if self.prefix is None: self.prefix = os.path.normpath(sys.prefix) self.install_base = self.install_platbase = self.prefix try: self.select_scheme(os.name) except KeyError: raise DistutilsPlatformError, \ "I don't know how to install stuff on '%s'" % os.name # finalize_other () def select_scheme (self, name): # it's the caller's problem if they supply a bad name! scheme = INSTALL_SCHEMES[name] for key in SCHEME_KEYS: attrname = 'install_' + key if getattr(self, attrname) is None: setattr(self, attrname, scheme[key]) def _expand_attrs (self, attrs): for attr in attrs: val = getattr(self, attr) if val is not None: if os.name == 'posix' or os.name == 'nt': val = os.path.expanduser(val) val = subst_vars(val, self.config_vars) setattr(self, attr, val) def expand_basedirs (self): self._expand_attrs(['install_base', 'install_platbase', 'root']) def expand_dirs (self): self._expand_attrs(['install_purelib', 'install_platlib', 'install_lib', 'install_headers', 'install_scripts', 'install_data',]) def convert_paths (self, *names): for name in names: attr = "install_" + name setattr(self, attr, convert_path(getattr(self, attr))) def handle_extra_path (self): if self.extra_path is None: self.extra_path = self.distribution.extra_path if self.extra_path is not None: if type(self.extra_path) is StringType: self.extra_path = string.split(self.extra_path, ',') if len(self.extra_path) == 1: path_file = extra_dirs = self.extra_path[0] elif len(self.extra_path) == 2: (path_file, extra_dirs) = self.extra_path else: raise DistutilsOptionError, \ ("'extra_path' option must be a list, tuple, or " "comma-separated string with 1 or 2 elements") # convert to local form in case Unix notation used (as it # should be in setup scripts) extra_dirs = convert_path(extra_dirs) else: path_file = None extra_dirs = '' # XXX should we warn if path_file and not extra_dirs? (in which # case the path file would be harmless but pointless) self.path_file = path_file self.extra_dirs = extra_dirs # handle_extra_path () def change_roots (self, *names): for name in names: attr = "install_" + name setattr(self, attr, change_root(self.root, getattr(self, attr))) def create_home_path(self): """Create directories under ~ """ if not self.user: return home = convert_path(os.path.expanduser("~")) for name, path in self.config_vars.iteritems(): if path.startswith(home) and not os.path.isdir(path): self.debug_print("os.makedirs('%s', 0700)" % path) os.makedirs(path, 0700) # -- Command execution methods ------------------------------------- def run (self): # Obviously have to build before we can install if not self.skip_build: self.run_command('build') # If we built for any other platform, we can't install. build_plat = self.distribution.get_command_obj('build').plat_name # check warn_dir - it is a clue that the 'install' is happening # internally, and not to sys.path, so we don't check the platform # matches what we are running. if self.warn_dir and build_plat != get_platform(): raise DistutilsPlatformError("Can't install when " "cross-compiling") # Run all sub-commands (at least those that need to be run) for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) if self.path_file: self.create_path_file() # write list of installed files, if requested. if self.record: outputs = self.get_outputs() if self.root: # strip any package prefix root_len = len(self.root) for counter in xrange(len(outputs)): outputs[counter] = outputs[counter][root_len:] self.execute(write_file, (self.record, outputs), "writing list of installed files to '%s'" % self.record) sys_path = map(os.path.normpath, sys.path) sys_path = map(os.path.normcase, sys_path) install_lib = os.path.normcase(os.path.normpath(self.install_lib)) if (self.warn_dir and not (self.path_file and self.install_path_file) and install_lib not in sys_path): log.debug(("modules installed to '%s', which is not in " "Python's module search path (sys.path) -- " "you'll have to change the search path yourself"), self.install_lib) # run () def create_path_file (self): filename = os.path.join(self.install_libbase, self.path_file + ".pth") if self.install_path_file: self.execute(write_file, (filename, [self.extra_dirs]), "creating %s" % filename) else: self.warn("path file '%s' not created" % filename) # -- Reporting methods --------------------------------------------- def get_outputs (self): # Assemble the outputs of all the sub-commands. outputs = [] for cmd_name in self.get_sub_commands(): cmd = self.get_finalized_command(cmd_name) # Add the contents of cmd.get_outputs(), ensuring # that outputs doesn't contain duplicate entries for filename in cmd.get_outputs(): if filename not in outputs: outputs.append(filename) if self.path_file and self.install_path_file: outputs.append(os.path.join(self.install_libbase, self.path_file + ".pth")) return outputs def get_inputs (self): # XXX gee, this looks familiar ;-( inputs = [] for cmd_name in self.get_sub_commands(): cmd = self.get_finalized_command(cmd_name) inputs.extend(cmd.get_inputs()) return inputs # -- Predicates for sub-command list ------------------------------- def has_lib (self): """Return true if the current distribution has any Python modules to install.""" return (self.distribution.has_pure_modules() or self.distribution.has_ext_modules()) def has_headers (self): return self.distribution.has_headers() def has_scripts (self): return self.distribution.has_scripts() def has_data (self): return self.distribution.has_data_files() # 'sub_commands': a list of commands this command might have to run to # get its work done. See cmd.py for more info. sub_commands = [('install_lib', has_lib), ('install_headers', has_headers), ('install_scripts', has_scripts), ('install_data', has_data), ('install_egg_info', lambda self:True), ] # class install
unknown
codeparrot/codeparrot-clean
/* * Copyright 2012-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.testcontainers.lifecycle; import org.testcontainers.lifecycle.Startable; import org.springframework.beans.BeansException; import org.springframework.beans.factory.NoSuchBeanDefinitionException; import org.springframework.beans.factory.config.BeanDefinition; import org.springframework.beans.factory.config.BeanFactoryPostProcessor; import org.springframework.beans.factory.config.ConfigurableListableBeanFactory; import org.springframework.beans.factory.support.AbstractBeanDefinition; import org.springframework.core.Ordered; import org.springframework.core.annotation.Order; /** * {@link BeanFactoryPostProcessor} to prevent {@link AutoCloseable} destruction calls so * that {@link TestcontainersLifecycleBeanPostProcessor} can be smarter about which * containers to close. * * @author Phillip Webb * @author Stephane Nicoll * @see TestcontainersLifecycleApplicationContextInitializer */ @Order(Ordered.LOWEST_PRECEDENCE) class TestcontainersLifecycleBeanFactoryPostProcessor implements BeanFactoryPostProcessor { @Override public void postProcessBeanFactory(ConfigurableListableBeanFactory beanFactory) throws BeansException { for (String beanName : beanFactory.getBeanNamesForType(Startable.class, false, false)) { try { BeanDefinition beanDefinition = beanFactory.getBeanDefinition(beanName); String destroyMethodName = beanDefinition.getDestroyMethodName(); if (destroyMethodName == null || AbstractBeanDefinition.INFER_METHOD.equals(destroyMethodName)) { beanDefinition.setDestroyMethodName(""); } } catch (NoSuchBeanDefinitionException ex) { // Ignore } } } }
java
github
https://github.com/spring-projects/spring-boot
core/spring-boot-testcontainers/src/main/java/org/springframework/boot/testcontainers/lifecycle/TestcontainersLifecycleBeanFactoryPostProcessor.java
/* * Copyright 2014-2024 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ package ktorbuild.internal import org.gradle.accessors.dm.LibrariesForLibs import org.gradle.api.Project import org.gradle.kotlin.dsl.the /** * Accessor to make version catalog available in build-logic. * See: https://github.com/gradle/gradle/issues/15383#issuecomment-779893192 */ internal val Project.libs: LibrariesForLibs get() = rootProject.the<LibrariesForLibs>()
kotlin
github
https://github.com/ktorio/ktor
build-logic/src/main/kotlin/ktorbuild/internal/VersionCatalogs.kt
//// [tests/cases/compiler/collisionArgumentsClassMethod.ts] //// //// [collisionArgumentsClassMethod.ts] class c1 { public foo(i: number, ...arguments) { //arguments is error var arguments: any[]; // no error } public foo1(arguments: number, ...rest) { //arguments is error var arguments = 10; // no error } public fooNoError(arguments: number) { // no error var arguments = 10; // no error } public f4(i: number, ...arguments); // no codegen no error public f4(i: string, ...arguments); // no codegen no error public f4(i: any, ...arguments) { // error var arguments: any[]; // no error } public f41(arguments: number, ...rest); // no codegen no error public f41(arguments: string, ...rest); // no codegen no error public f41(arguments: any, ...rest) { // error var arguments: any; // no error } public f4NoError(arguments: number); // no error public f4NoError(arguments: string); // no error public f4NoError(arguments: any) { // no error var arguments: any; // no error } } declare class c2 { public foo(i: number, ...arguments); // No error - no code gen public foo1(arguments: number, ...rest); // No error - no code gen public fooNoError(arguments: number); // No error - no code gen public f4(i: number, ...arguments); // no codegen no error public f4(i: string, ...arguments); // no codegen no error public f41(arguments: number, ...rest); // no codegen no error public f41(arguments: string, ...rest); // no codegen no error public f4NoError(arguments: number); // no error public f4NoError(arguments: string); // no error } class c3 { public foo(...restParameters) { var arguments = 10; // no error } public fooNoError() { var arguments = 10; // no error } } //// [collisionArgumentsClassMethod.js] "use strict"; var c1 = /** @class */ (function () { function c1() { } c1.prototype.foo = function (i) { var arguments = []; for (var _i = 1; _i < arguments.length; _i++) { arguments[_i - 1] = arguments[_i]; } var arguments; // no error }; c1.prototype.foo1 = function (arguments) { var rest = []; for (var _i = 1; _i < arguments.length; _i++) { rest[_i - 1] = arguments[_i]; } var arguments = 10; // no error }; c1.prototype.fooNoError = function (arguments) { var arguments = 10; // no error }; c1.prototype.f4 = function (i) { var arguments = []; for (var _i = 1; _i < arguments.length; _i++) { arguments[_i - 1] = arguments[_i]; } var arguments; // no error }; c1.prototype.f41 = function (arguments) { var rest = []; for (var _i = 1; _i < arguments.length; _i++) { rest[_i - 1] = arguments[_i]; } var arguments; // no error }; c1.prototype.f4NoError = function (arguments) { var arguments; // no error }; return c1; }()); var c3 = /** @class */ (function () { function c3() { } c3.prototype.foo = function () { var restParameters = []; for (var _i = 0; _i < arguments.length; _i++) { restParameters[_i] = arguments[_i]; } var arguments = 10; // no error }; c3.prototype.fooNoError = function () { var arguments = 10; // no error }; return c3; }());
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/collisionArgumentsClassMethod(target=es5).js
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for SpaceToBatch and BatchToSpace ops.""" import numpy as np from tensorflow.compiler.tests import xla_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.platform import test def space_to_batch_direct(input_array, block_shape, paddings): """Direct Python implementation of space-to-batch conversion. This is used for tests only. Args: input_array: N-D array block_shape: 1-D array of shape [num_block_dims]. paddings: 2-D array of shape [num_block_dims, 2]. Returns: Converted tensor. """ input_array = np.array(input_array) block_shape = np.array(block_shape) num_block_dims = len(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) padded = np.pad(input_array, pad_width=([[0, 0]] + list(paddings) + [[0, 0]] * (input_array.ndim - 1 - num_block_dims)), mode="constant") reshaped_padded_shape = [input_array.shape[0]] output_shape = [input_array.shape[0] * np.prod(block_shape)] for block_dim, block_shape_value in enumerate(block_shape): reduced_size = padded.shape[block_dim + 1] // block_shape_value reshaped_padded_shape.append(reduced_size) output_shape.append(reduced_size) reshaped_padded_shape.append(block_shape_value) reshaped_padded_shape.extend(input_array.shape[num_block_dims + 1:]) output_shape.extend(input_array.shape[num_block_dims + 1:]) reshaped_padded = padded.reshape(reshaped_padded_shape) permuted_reshaped_padded = np.transpose(reshaped_padded, ( list(np.arange(num_block_dims) * 2 + 2) + [0] + list(np.arange(num_block_dims) * 2 + 1) + list( np.arange(input_array.ndim - num_block_dims - 1) + 1 + num_block_dims * 2))) return permuted_reshaped_padded.reshape(output_shape) class SpaceToBatchTest(xla_test.XLATestCase): """Tests input-output pairs for the SpaceToBatch and BatchToSpace ops.""" def _testPad(self, inputs, paddings, block_size, outputs): with self.session() as sess, self.test_scope(): for dtype in self.float_types: # outputs = space_to_batch(inputs) placeholder = array_ops.placeholder(dtype) x_tf = gen_array_ops.space_to_batch( placeholder, paddings, block_size=block_size) self.assertAllEqual(sess.run(x_tf, {placeholder: inputs}), outputs) # inputs = batch_to_space(outputs) x_tf = gen_array_ops.batch_to_space( placeholder, paddings, block_size=block_size) self.assertAllEqual(sess.run(x_tf, {placeholder: outputs}), inputs) def _testOne(self, inputs, block_size, outputs): paddings = np.zeros((2, 2), dtype=np.int32) self._testPad(inputs, paddings, block_size, outputs) # [1, 2, 2, 1] <-> [4, 1, 1, 1] def testSmallInput2x2(self): x_np = [[[[1], [2]], [[3], [4]]]] block_size = 2 x_out = [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] self._testOne(x_np, block_size, x_out) # [1, 2, 2, 1] <-> [1, 3, 3, 1] (padding) <-> [9, 1, 1, 1] def testSmallInput2x2Pad1x0(self): x_np = [[[[1], [2]], [[3], [4]]]] paddings = np.array([[1, 0], [1, 0]], dtype=np.int32) block_size = 3 x_out = [[[[0]]], [[[0]]], [[[0]]], [[[0]]], [[[1]]], [[[2]]], [[[0]]], [[[3]]], [[[4]]]] self._testPad(x_np, paddings, block_size, x_out) # Test with depth larger than 1. # [1, 2, 2, 3] <-> [4, 1, 1, 3] def testDepthInput2x2(self): x_np = [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] block_size = 2 x_out = [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] self._testOne(x_np, block_size, x_out) # Test for larger input dimensions. # [1, 4, 4, 1] <-> [4, 2, 2, 1] def testLargerInput2x2(self): x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]]] block_size = 2 x_out = [[[[1], [3]], [[9], [11]]], [[[2], [4]], [[10], [12]]], [[[5], [7]], [[13], [15]]], [[[6], [8]], [[14], [16]]]] self._testOne(x_np, block_size, x_out) # Test with batch larger than 1. # [2, 2, 4, 1] <-> [8, 1, 2, 1] def testBatchInput2x2(self): x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]]], [[[9], [10], [11], [12]], [[13], [14], [15], [16]]]] block_size = 2 x_out = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] self._testOne(x_np, block_size, x_out) # Tests for larger input spatial dimensions AND batch larger than 1, to ensure # that elements are correctly laid out spatially and properly interleaved # along the batch dimension. # [2, 4, 4, 1] <-> [8, 2, 2, 1] def testLargerInputBatch2x2(self): x_np = [[[[1], [2], [3], [4]], [[5], [6], [7], [8]], [[9], [10], [11], [12]], [[13], [14], [15], [16]]], [[[17], [18], [19], [20]], [[21], [22], [23], [24]], [[25], [26], [27], [28]], [[29], [30], [31], [32]]]] x_out = [[[[1], [3]], [[9], [11]]], [[[17], [19]], [[25], [27]]], [[[2], [4]], [[10], [12]]], [[[18], [20]], [[26], [28]]], [[[5], [7]], [[13], [15]]], [[[21], [23]], [[29], [31]]], [[[6], [8]], [[14], [16]]], [[[22], [24]], [[30], [32]]]] block_size = 2 self._testOne(x_np, block_size, x_out) class SpaceToBatchNDErrorHandlingTest(xla_test.XLATestCase): def testInvalidBlockShape(self): with self.assertRaisesRegex(ValueError, "block_shape must be positive"): with self.session() as sess, self.test_scope(): tf_in = constant_op.constant( -3.5e+35, shape=[10, 20, 20], dtype=dtypes.float32) block_shape = constant_op.constant(-10, shape=[2], dtype=dtypes.int64) paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32) sess.run(array_ops.space_to_batch_nd(tf_in, block_shape, paddings)) def testOutputSizeOutOfBounds(self): with self.assertRaisesRegex(ValueError, "Negative.* dimension size caused by overflow"): with self.session() as sess, self.test_scope(): tf_in = constant_op.constant( -3.5e+35, shape=[10, 19, 22], dtype=dtypes.float32) block_shape = constant_op.constant( 1879048192, shape=[2], dtype=dtypes.int64) paddings = constant_op.constant(0, shape=[2, 2], dtype=dtypes.int32) sess.run(array_ops.space_to_batch_nd(tf_in, block_shape, paddings)) class SpaceToBatchNDTest(xla_test.XLATestCase): """Tests input-output pairs for the SpaceToBatchND and BatchToSpaceND ops.""" def _testPad(self, inputs, block_shape, paddings, outputs): block_shape = np.array(block_shape) paddings = np.array(paddings).reshape((len(block_shape), 2)) with self.session() as sess, self.test_scope(): for dtype in self.float_types: # TODO(b/68813416): Skip bfloat16's as the input type for direct is # float32 and results in a mismatch, while making testDirect provide the # correctly typed input results in 'no fill-function for data-type' # error. if dtype == dtypes.bfloat16.as_numpy_dtype: continue if dtype == np.float16: actual_inputs = np.array(inputs).astype(dtype) actual_paddings = np.array(paddings).astype(dtype) expected_outputs = np.array(outputs).astype(dtype) else: actual_inputs = inputs actual_paddings = paddings expected_outputs = outputs placeholder = array_ops.placeholder(dtype) # outputs = space_to_batch(inputs) x_tf = array_ops.space_to_batch_nd(placeholder, block_shape, actual_paddings) self.assertAllEqual( sess.run(x_tf, {placeholder: actual_inputs}), expected_outputs) # inputs = batch_to_space(outputs) placeholder = array_ops.placeholder(dtype) x_tf = array_ops.batch_to_space_nd(placeholder, block_shape, actual_paddings) self.assertAllEqual( sess.run(x_tf, {placeholder: expected_outputs}), actual_inputs) def _testDirect(self, input_shape, block_shape, paddings): inputs = np.arange(np.prod(input_shape), dtype=np.float32) inputs = inputs.reshape(input_shape) self._testPad(inputs, block_shape, paddings, space_to_batch_direct(inputs, block_shape, paddings)) def testZeroBlockDimsZeroRemainingDims(self): self._testPad( inputs=[1, 2], block_shape=[], paddings=[], outputs=[1, 2],) def testZeroBlockDimsOneRemainingDim(self): self._testPad( inputs=[[1, 2], [3, 4]], block_shape=[], paddings=[], outputs=[[1, 2], [3, 4]]) # Same thing, but with a no-op block dim. self._testPad( inputs=[[1, 2], [3, 4]], block_shape=[1], paddings=[[0, 0]], outputs=[[1, 2], [3, 4]]) def testZeroBlockDimsTwoRemainingDims(self): self._testPad( inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], block_shape=[], paddings=[], outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # Same thing, but with a no-op block dim. self._testPad( inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], block_shape=[1], paddings=[[0, 0]], outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # Same thing, but with two no-op block dims. self._testPad( inputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], block_shape=[1, 1], paddings=[[0, 0], [0, 0]], outputs=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) def testOneBlockDimZeroRemainingDims(self): self._testPad( inputs=[[1, 2, 3], [4, 5, 6]], block_shape=[2], paddings=[1, 0], outputs=[[0, 2], [0, 5], [1, 3], [4, 6]]) def testOneBlockDimOneRemainingDim(self): self._testPad( inputs=[[[1, 11], [2, 21], [3, 31]], [[4, 41], [5, 51], [6, 61]]], block_shape=[2], paddings=[1, 0], outputs=[[[0, 0], [2, 21]], [[0, 0], [5, 51]], [[1, 11], [3, 31]], [[4, 41], [6, 61]]]) def testDirect0(self): # Test with zero-size remaining dimension. self._testDirect( input_shape=[3, 1, 2, 0], block_shape=[3], paddings=[[0, 2]]) def testDirect1(self): # Test with zero-size blocked dimension. self._testDirect( input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[0, 0]]) def testDirect2(self): # Test with padding up from zero size. self._testDirect( input_shape=[3, 0, 2, 5], block_shape=[3], paddings=[[1, 2]]) def testDirect3(self): self._testDirect( input_shape=[3, 3, 4, 5, 2], block_shape=[3, 4, 2], paddings=[[1, 2], [0, 0], [3, 0]]) def testDirect4(self): self._testDirect( input_shape=[3, 3, 4, 5, 2], block_shape=[3, 4, 2, 2], paddings=[[1, 2], [0, 0], [3, 0], [0, 0]]) def testDirect5(self): self._testDirect( input_shape=[3, 2, 2, 3, 4, 5, 2, 5], block_shape=[1, 1, 3, 4, 2, 2], paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0]]) def testDirect6(self): self._testDirect( input_shape=[3, 2, 2, 3, 4, 5, 2, 5], block_shape=[1, 1, 3, 4, 2, 2, 1], paddings=[[0, 0], [0, 0], [1, 2], [0, 0], [3, 0], [0, 0], [0, 0]]) if __name__ == "__main__": test.main()
python
github
https://github.com/tensorflow/tensorflow
tensorflow/compiler/tests/spacetobatch_op_test.py
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import socket import subprocess from charms import layer from charms.reactive import when from charmhelpers.core import hookenv from charms.layer import nginx from subprocess import Popen from subprocess import PIPE from subprocess import STDOUT @when('certificates.available') def request_server_certificates(tls): '''Send the data that is required to create a server certificate for this server.''' # Use the public ip of this unit as the Common Name for the certificate. common_name = hookenv.unit_public_ip() # Create SANs that the tls layer will add to the server cert. sans = [ hookenv.unit_public_ip(), hookenv.unit_private_ip(), socket.gethostname(), ] # Create a path safe name by removing path characters from the unit name. certificate_name = hookenv.local_unit().replace('/', '_') # Request a server cert with this information. tls.request_server_cert(common_name, sans, certificate_name) @when('nginx.available', 'apiserver.available', 'certificates.server.cert.available') def install_load_balancer(apiserver, tls): ''' Create the default vhost template for load balancing ''' # Get the tls paths from the layer data. layer_options = layer.options('tls-client') server_cert_path = layer_options.get('server_certificate_path') cert_exists = server_cert_path and os.path.isfile(server_cert_path) server_key_path = layer_options.get('server_key_path') key_exists = server_key_path and os.path.isfile(server_key_path) # Do both the the key and certificate exist? if cert_exists and key_exists: # At this point the cert and key exist, and they are owned by root. chown = ['chown', 'www-data:www-data', server_cert_path] # Change the owner to www-data so the nginx process can read the cert. subprocess.call(chown) chown = ['chown', 'www-data:www-data', server_key_path] # Change the owner to www-data so the nginx process can read the key. subprocess.call(chown) hookenv.open_port(hookenv.config('port')) services = apiserver.services() nginx.configure_site( 'apilb', 'apilb.conf', server_name='_', services=services, port=hookenv.config('port'), server_certificate=server_cert_path, server_key=server_key_path, ) hookenv.status_set('active', 'Loadbalancer ready.') @when('nginx.available') def set_nginx_version(): ''' Surface the currently deployed version of nginx to Juju ''' cmd = 'nginx -v' p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) raw = p.stdout.read() # The version comes back as: # nginx version: nginx/1.10.0 (Ubuntu) version = raw.split(b'/')[-1].split(b' ')[0] hookenv.application_version_set(version.rstrip()) @when('website.available') def provide_application_details(website): ''' re-use the nginx layer website relation to relay the hostname/port to any consuming kubernetes-workers, or other units that require the kubernetes API ''' website.configure(port=hookenv.config('port')) @when('loadbalancer.available') def provide_loadbalancing(loadbalancer): '''Send the public address and port to the public-address interface, so the subordinates can get the public address of this loadbalancer.''' loadbalancer.set_address_port(hookenv.unit_get('public-address'), hookenv.config('port'))
unknown
codeparrot/codeparrot-clean
import os import tarfile import zipfile from charmhelpers.core import ( host, hookenv, ) class ArchiveError(Exception): pass def get_archive_handler(archive_name): if os.path.isfile(archive_name): if tarfile.is_tarfile(archive_name): return extract_tarfile elif zipfile.is_zipfile(archive_name): return extract_zipfile else: # look at the file name for ext in ('.tar', '.tar.gz', '.tgz', 'tar.bz2', '.tbz2', '.tbz'): if archive_name.endswith(ext): return extract_tarfile for ext in ('.zip', '.jar'): if archive_name.endswith(ext): return extract_zipfile def archive_dest_default(archive_name): archive_file = os.path.basename(archive_name) return os.path.join(hookenv.charm_dir(), "archives", archive_file) def extract(archive_name, destpath=None): handler = get_archive_handler(archive_name) if handler: if not destpath: destpath = archive_dest_default(archive_name) if not os.path.isdir(destpath): host.mkdir(destpath) handler(archive_name, destpath) return destpath else: raise ArchiveError("No handler for archive") def extract_tarfile(archive_name, destpath): "Unpack a tar archive, optionally compressed" archive = tarfile.open(archive_name) archive.extractall(destpath) def extract_zipfile(archive_name, destpath): "Unpack a zip file" archive = zipfile.ZipFile(archive_name) archive.extractall(destpath)
unknown
codeparrot/codeparrot-clean
from __future__ import division, absolute_import, print_function import timeit from functools import reduce import numpy as np from numpy import float_ import numpy.core.fromnumeric as fromnumeric from numpy.testing import build_err_msg # Fixme: this does not look right. np.seterr(all='ignore') pi = np.pi class ModuleTester(object): def __init__(self, module): self.module = module self.allequal = module.allequal self.arange = module.arange self.array = module.array self.concatenate = module.concatenate self.count = module.count self.equal = module.equal self.filled = module.filled self.getmask = module.getmask self.getmaskarray = module.getmaskarray self.id = id self.inner = module.inner self.make_mask = module.make_mask self.masked = module.masked self.masked_array = module.masked_array self.masked_values = module.masked_values self.mask_or = module.mask_or self.nomask = module.nomask self.ones = module.ones self.outer = module.outer self.repeat = module.repeat self.resize = module.resize self.sort = module.sort self.take = module.take self.transpose = module.transpose self.zeros = module.zeros self.MaskType = module.MaskType try: self.umath = module.umath except AttributeError: self.umath = module.core.umath self.testnames = [] def assert_array_compare(self, comparison, x, y, err_msg='', header='', fill_value=True): """ Assert that a comparison of two masked arrays is satisfied elementwise. """ xf = self.filled(x) yf = self.filled(y) m = self.mask_or(self.getmask(x), self.getmask(y)) x = self.filled(self.masked_array(xf, mask=m), fill_value) y = self.filled(self.masked_array(yf, mask=m), fill_value) if (x.dtype.char != "O"): x = x.astype(float_) if isinstance(x, np.ndarray) and x.size > 1: x[np.isnan(x)] = 0 elif np.isnan(x): x = 0 if (y.dtype.char != "O"): y = y.astype(float_) if isinstance(y, np.ndarray) and y.size > 1: y[np.isnan(y)] = 0 elif np.isnan(y): y = 0 try: cond = (x.shape == () or y.shape == ()) or x.shape == y.shape if not cond: msg = build_err_msg([x, y], err_msg + '\n(shapes %s, %s mismatch)' % (x.shape, y.shape), header=header, names=('x', 'y')) assert cond, msg val = comparison(x, y) if m is not self.nomask and fill_value: val = self.masked_array(val, mask=m) if isinstance(val, bool): cond = val reduced = [0] else: reduced = val.ravel() cond = reduced.all() reduced = reduced.tolist() if not cond: match = 100-100.0*reduced.count(1)/len(reduced) msg = build_err_msg([x, y], err_msg + '\n(mismatch %s%%)' % (match,), header=header, names=('x', 'y')) assert cond, msg except ValueError: msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y')) raise ValueError(msg) def assert_array_equal(self, x, y, err_msg=''): """ Checks the elementwise equality of two masked arrays. """ self.assert_array_compare(self.equal, x, y, err_msg=err_msg, header='Arrays are not equal') def test_0(self): """ Tests creation """ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] xm = self.masked_array(x, mask=m) xm[0] def test_1(self): """ Tests creation """ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] xm = self.masked_array(x, mask=m1) ym = self.masked_array(y, mask=m2) xf = np.where(m1, 1.e+20, x) xm.set_fill_value(1.e+20) assert((xm-ym).filled(0).any()) s = x.shape assert(xm.size == reduce(lambda x, y:x*y, s)) assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) for s in [(4, 3), (6, 2)]: x.shape = s y.shape = s xm.shape = s ym.shape = s xf.shape = s assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) def test_2(self): """ Tests conversions and indexing. """ x1 = np.array([1, 2, 4, 3]) x2 = self.array(x1, mask=[1, 0, 0, 0]) x3 = self.array(x1, mask=[0, 1, 0, 1]) x4 = self.array(x1) # test conversion to strings, no errors str(x2) repr(x2) # tests of indexing assert type(x2[1]) is type(x1[1]) assert x1[1] == x2[1] x1[2] = 9 x2[2] = 9 self.assert_array_equal(x1, x2) x1[1:3] = 99 x2[1:3] = 99 x2[1] = self.masked x2[1:3] = self.masked x2[:] = x1 x2[1] = self.masked x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0]) x1 = np.arange(5)*1.0 x2 = self.masked_values(x1, 3.0) x1 = self.array([1, 'hello', 2, 3], object) x2 = np.array([1, 'hello', 2, 3], object) # check that no error occurs. x1[1] x2[1] assert x1[1:1].shape == (0,) # Tests copy-size n = [0, 0, 1, 0, 0] m = self.make_mask(n) m2 = self.make_mask(m) assert(m is m2) m3 = self.make_mask(m, copy=1) assert(m is not m3) def test_3(self): """ Tests resize/repeat """ x4 = self.arange(4) x4[2] = self.masked y4 = self.resize(x4, (8,)) assert self.allequal(self.concatenate([x4, x4]), y4) assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) y5 = self.repeat(x4, (2, 2, 2, 2), axis=0) self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) y6 = self.repeat(x4, 2, axis=0) assert self.allequal(y5, y6) y7 = x4.repeat((2, 2, 2, 2), axis=0) assert self.allequal(y5, y7) y8 = x4.repeat(2, 0) assert self.allequal(y5, y8) def test_4(self): """ Test of take, transpose, inner, outer products. """ x = self.arange(24) y = np.arange(24) x[5:6] = self.masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), self.inner(x, y)) assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), self.outer(x, y)) y = self.array(['abc', 1, 'def', 2, 3], object) y[2] = self.masked t = self.take(y, [0, 3, 4]) assert t[0] == 'abc' assert t[1] == 2 assert t[2] == 3 def test_5(self): """ Tests inplace w/ scalar """ x = self.arange(10) y = self.arange(10) xm = self.arange(10) xm[2] = self.masked x += 1 assert self.allequal(x, y+1) xm += 1 assert self.allequal(xm, y+1) x = self.arange(10) xm = self.arange(10) xm[2] = self.masked x -= 1 assert self.allequal(x, y-1) xm -= 1 assert self.allequal(xm, y-1) x = self.arange(10)*1.0 xm = self.arange(10)*1.0 xm[2] = self.masked x *= 2.0 assert self.allequal(x, y*2) xm *= 2.0 assert self.allequal(xm, y*2) x = self.arange(10)*2 xm = self.arange(10)*2 xm[2] = self.masked x /= 2 assert self.allequal(x, y) xm /= 2 assert self.allequal(xm, y) x = self.arange(10)*1.0 xm = self.arange(10)*1.0 xm[2] = self.masked x /= 2.0 assert self.allequal(x, y/2.0) xm /= self.arange(10) self.assert_array_equal(xm, self.ones((10,))) x = self.arange(10).astype(float_) xm = self.arange(10) xm[2] = self.masked x += 1. assert self.allequal(x, y + 1.) def test_6(self): """ Tests inplace w/ array """ x = self.arange(10, dtype=float_) y = self.arange(10) xm = self.arange(10, dtype=float_) xm[2] = self.masked m = xm.mask a = self.arange(10, dtype=float_) a[-1] = self.masked x += a xm += a assert self.allequal(x, y+a) assert self.allequal(xm, y+a) assert self.allequal(xm.mask, self.mask_or(m, a.mask)) x = self.arange(10, dtype=float_) xm = self.arange(10, dtype=float_) xm[2] = self.masked m = xm.mask a = self.arange(10, dtype=float_) a[-1] = self.masked x -= a xm -= a assert self.allequal(x, y-a) assert self.allequal(xm, y-a) assert self.allequal(xm.mask, self.mask_or(m, a.mask)) x = self.arange(10, dtype=float_) xm = self.arange(10, dtype=float_) xm[2] = self.masked m = xm.mask a = self.arange(10, dtype=float_) a[-1] = self.masked x *= a xm *= a assert self.allequal(x, y*a) assert self.allequal(xm, y*a) assert self.allequal(xm.mask, self.mask_or(m, a.mask)) x = self.arange(10, dtype=float_) xm = self.arange(10, dtype=float_) xm[2] = self.masked m = xm.mask a = self.arange(10, dtype=float_) a[-1] = self.masked x /= a xm /= a def test_7(self): "Tests ufunc" d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6), self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),) for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', # 'sin', 'cos', 'tan', # 'arcsin', 'arccos', 'arctan', # 'sinh', 'cosh', 'tanh', # 'arcsinh', # 'arccosh', # 'arctanh', # 'absolute', 'fabs', 'negative', # # 'nonzero', 'around', # 'floor', 'ceil', # # 'sometrue', 'alltrue', # 'logical_not', # 'add', 'subtract', 'multiply', # 'divide', 'true_divide', 'floor_divide', # 'remainder', 'fmod', 'hypot', 'arctan2', # 'equal', 'not_equal', 'less_equal', 'greater_equal', # 'less', 'greater', # 'logical_and', 'logical_or', 'logical_xor', ]: try: uf = getattr(self.umath, f) except AttributeError: uf = getattr(fromnumeric, f) mf = getattr(self.module, f) args = d[:uf.nin] ur = uf(*args) mr = mf(*args) self.assert_array_equal(ur.filled(0), mr.filled(0), f) self.assert_array_equal(ur._mask, mr._mask) def test_99(self): # test average ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) self.assert_array_equal(2.0, self.average(ott, axis=0)) self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.])) result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1) self.assert_array_equal(2.0, result) assert(wts == 4.0) ott[:] = self.masked assert(self.average(ott, axis=0) is self.masked) ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) ott = ott.reshape(2, 2) ott[:, 1] = self.masked self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0]) assert(self.average(ott, axis=1)[0] is self.masked) self.assert_array_equal([2., 0.], self.average(ott, axis=0)) result, wts = self.average(ott, axis=0, returned=1) self.assert_array_equal(wts, [1., 0.]) w1 = [0, 1, 1, 1, 1, 0] w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] x = self.arange(6) self.assert_array_equal(self.average(x, axis=0), 2.5) self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5) y = self.array([self.arange(6), 2.0*self.arange(6)]) self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.) self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.) self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) self.assert_array_equal(self.average(y, None, weights=w2), 20./6.) self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.]) self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0]) m1 = self.zeros(6) m2 = [0, 0, 1, 1, 0, 0] m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] m4 = self.ones(6) m5 = [0, 1, 1, 1, 1, 1] self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5) self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5) self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0) self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0) z = self.masked_array(y, m3) self.assert_array_equal(self.average(z, None), 20./6.) self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0]) self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0]) def test_A(self): x = self.arange(24) x[5:6] = self.masked x = x.reshape(2, 3, 4) if __name__ == '__main__': setup_base = ("from __main__ import ModuleTester \n" "import numpy\n" "tester = ModuleTester(module)\n") setup_cur = "import numpy.ma.core as module\n" + setup_base (nrepeat, nloop) = (10, 10) if 1: for i in range(1, 8): func = 'tester.test_%i()' % i cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) cur = np.sort(cur) print("#%i" % i + 50*'.') print(eval("ModuleTester.test_%i.__doc__" % i)) print("core_current : %.3f - %.3f" % (cur[0], cur[1]))
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import random import time # Note: pyauto_functional must come before pyauto. import pyauto_functional import pyauto import webrtc_test_base class WebrtcApprtcCallTest(webrtc_test_base.WebrtcTestBase): """Tests calling apprtc.appspot.com and setting up a call. Prerequisites: This test case must run on a machine with a webcam, either fake or real, and with some kind of audio device. The machine must have access to the public Internet. This should be considered an integration test: test failures could mean that the AppRTC reference is broken, that WebRTC is broken, or both. """ def tearDown(self): pyauto.PyUITest.tearDown(self) self.assertEquals('', self.CheckErrorsAndCrashes(), 'Chrome crashed or hit a critical error during test.') def testApprtcLoopbackCall(self): self.NavigateToURL('http://apprtc.appspot.com/?debug=loopback') self.WaitForInfobarCount(1, tab_index=0) self.PerformActionOnInfobar('accept', infobar_index=0, tab_index=0) self._WaitForCallEstablishment(tab_index=0) def testApprtcTabToTabCall(self): # Randomize the call session id. If we would use the same id we would risk # getting problems with hung calls and lingering state in AppRTC. random_call_id = 'pyauto%d' % random.randint(0, 65536) apprtc_url = 'http://apprtc.appspot.com/?r=%s' % random_call_id self.NavigateToURL(apprtc_url) self.AppendTab(pyauto.GURL(apprtc_url)) self.WaitForInfobarCount(1, tab_index=0) self.WaitForInfobarCount(1, tab_index=1) self.PerformActionOnInfobar('accept', infobar_index=0, tab_index=0) # TODO(phoglund): workaround for # https://code.google.com/p/webrtc/issues/detail?id=1742 time.sleep(1) self.PerformActionOnInfobar('accept', infobar_index=0, tab_index=1) self._WaitForCallEstablishment(tab_index=0) self._WaitForCallEstablishment(tab_index=1) def _WaitForCallEstablishment(self, tab_index): # AppRTC will set opacity to 1 for remote video when the call is up. video_playing = self.WaitUntil( function=lambda: self.GetDOMValue('remoteVideo.style.opacity', tab_index=tab_index), expect_retval='1') self.assertTrue(video_playing, msg=('Timed out while waiting for ' 'remoteVideo.style.opacity to return 1.')) if __name__ == '__main__': pyauto_functional.Main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- from openerp import tools from openerp.osv import osv, fields class TestMassMailing(osv.TransientModel): _name = 'mail.mass_mailing.test' _description = 'Sample Mail Wizard' _columns = { 'email_to': fields.char('Recipients', required=True, help='Comma-separated list of email addresses.'), 'mass_mailing_id': fields.many2one('mail.mass_mailing', 'Mailing', required=True), } _defaults = { 'email_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx), } def send_mail_test(self, cr, uid, ids, context=None): Mail = self.pool['mail.mail'] for wizard in self.browse(cr, uid, ids, context=context): mailing = wizard.mass_mailing_id test_emails = tools.email_split(wizard.email_to) mail_ids = [] for test_mail in test_emails: mail_values = { 'email_from': mailing.email_from, 'reply_to': mailing.reply_to, 'email_to': test_mail, 'subject': mailing.name, 'body_html': '', 'notification': True, 'mailing_id': mailing.id, } mail_mail_obj = Mail.browse(cr, uid, Mail.create(cr, uid, mail_values, context=context), context=context) unsubscribe_url = Mail._get_unsubscribe_url(cr, uid, mail_mail_obj, test_mail, context=context) body = tools.append_content_to_html(mailing.body_html, unsubscribe_url, plaintext=False, container_tag='p') Mail.write(cr, uid, mail_mail_obj.id, {'body_html': mailing.body_html}, context=context) mail_ids.append(mail_mail_obj.id) Mail.send(cr, uid, mail_ids, context=context) self.pool['mail.mass_mailing'].write(cr, uid, [mailing.id], {'state': 'test'}, context=context) return True
unknown
codeparrot/codeparrot-clean
# Expired Certificates and Configuration for Testing This has a valid certificate authority in [ca](./ca) and an invalid server certificate in [server](./server). This can all be regenerated with: ``` make clean make all ```
unknown
github
https://github.com/psf/requests
tests/certs/expired/README.md
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import re import operator as py_operator from distutils.version import LooseVersion, StrictVersion from ansible import errors def failed(*a, **kw): ''' Test if task result yields failed ''' item = a[0] if type(item) != dict: raise errors.AnsibleFilterError("|failed expects a dictionary") rc = item.get('rc',0) failed = item.get('failed',False) if rc != 0 or failed: return True else: return False def success(*a, **kw): ''' Test if task result yields success ''' return not failed(*a, **kw) def changed(*a, **kw): ''' Test if task result yields changed ''' item = a[0] if type(item) != dict: raise errors.AnsibleFilterError("|changed expects a dictionary") if not 'changed' in item: changed = False if ('results' in item # some modules return a 'results' key and type(item['results']) == list and type(item['results'][0]) == dict): for result in item['results']: changed = changed or result.get('changed', False) else: changed = item.get('changed', False) return changed def skipped(*a, **kw): ''' Test if task result yields skipped ''' item = a[0] if type(item) != dict: raise errors.AnsibleFilterError("|skipped expects a dictionary") skipped = item.get('skipped', False) return skipped def regex(value='', pattern='', ignorecase=False, multiline=False, match_type='search'): ''' Expose `re` as a boolean filter using the `search` method by default. This is likely only useful for `search` and `match` which already have their own filters. ''' flags = 0 if ignorecase: flags |= re.I if multiline: flags |= re.M _re = re.compile(pattern, flags=flags) _bool = __builtins__.get('bool') return _bool(getattr(_re, match_type, 'search')(value)) def match(value, pattern='', ignorecase=False, multiline=False): ''' Perform a `re.match` returning a boolean ''' return regex(value, pattern, ignorecase, multiline, 'match') def search(value, pattern='', ignorecase=False, multiline=False): ''' Perform a `re.search` returning a boolean ''' return regex(value, pattern, ignorecase, multiline, 'search') def version_compare(value, version, operator='eq', strict=False): ''' Perform a version comparison on a value ''' op_map = { '==': 'eq', '=': 'eq', 'eq': 'eq', '<': 'lt', 'lt': 'lt', '<=': 'le', 'le': 'le', '>': 'gt', 'gt': 'gt', '>=': 'ge', 'ge': 'ge', '!=': 'ne', '<>': 'ne', 'ne': 'ne' } if strict: Version = StrictVersion else: Version = LooseVersion if operator in op_map: operator = op_map[operator] else: raise errors.AnsibleFilterError('Invalid operator type') try: method = getattr(py_operator, operator) return method(Version(str(value)), Version(str(version))) except Exception as e: raise errors.AnsibleFilterError('Version comparison: %s' % e) class TestModule(object): ''' Ansible core jinja2 tests ''' def tests(self): return { # failure testing 'failed' : failed, 'succeeded' : success, # changed testing 'changed' : changed, # skip testing 'skipped' : skipped, # regex 'match': match, 'search': search, 'regex': regex, # version comparison 'version_compare': version_compare, }
unknown
codeparrot/codeparrot-clean
import re from util import hook, http, formatting search_url = "http://search.atomz.com/search/?sp_a=00062d45-sp00000000" @hook.command def snopes(inp): """snopes <topic> -- Searches snopes for an urban legend about <topic>.""" search_page = http.get_html(search_url, sp_q=inp, sp_c="1") result_urls = search_page.xpath("//a[@target='_self']/@href") if not result_urls: return "no matching pages found" snopes_page = http.get_html(result_urls[0]) snopes_text = snopes_page.text_content() claim = re.search(r"Claim: .*", snopes_text).group(0).strip() status = re.search(r"Status: .*", snopes_text) if status is not None: status = status.group(0).strip() else: # new-style statuses status = "Status: %s." % re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED", snopes_text).group(0).title() claim = re.sub(r"[\s\xa0]+", " ", claim) # compress whitespace status = re.sub(r"[\s\xa0]+", " ", status) return formatting.output('Snopes', ['{} {} {}'.format(claim, status, result_urls[0])])
unknown
codeparrot/codeparrot-clean
""" Devices controlled my the ISY are represented as "nodes" on the ISY device and with Node Objects in the API There are three types of Node Object: * IsyNode - Node Object Represent lights, switches, motion sensors * IsyScene - Scene Object Represents Scenes contains Nodes that comprise a "Scene" * IsyNodeFolder - Can hold Scene's or Nodes a organizational obj for Scene's and Nodes Only IsyNode Objects maintain "state" What states are maintined depend on the physical node device itself but they can include - on, off of dim level - temperature - wattage Nodes can have "members" or subnodes IsyScene Objects can take commands but do not maintin a queryable state A Scene is predefined state for one or more nodes scenes can only be comprised of nodes which are call "members" only nodes can be members of a scene IsyNodeFolders are just for organizing Nodes, Scenes and Folders can be members of a Folder """ __author__ = 'Peter Shipley <peter.shipley@gmail.com>' __copyright__ = "Copyright (C) 2015 Peter Shipley" __license__ = "BSD" import hashlib from ISY.IsyUtilClass import IsySubClass, val2bool from ISY.IsyExceptionClass import * # from IsyClass import * # from IsyNodeClass import * # from IsyProgramClass import * # from IsyVarClass import * __all__ = ['IsyNode', 'IsyNodeFolder', 'IsyScene'] # library_using_super class _IsyNodeBase(IsySubClass): #_objtype = (0, "unknown") _objtype = "unknown" def on(self, val=255) : """ Send On command to a node args: optional value for on level """ self._on(val, "DON") def faston(self, val=255) : """ Send Fast On command to a node args: optional value for on level """ self._on(val, "DFON") def _on(self, val, cmd) : if not str(val).isdigit : raise IsyTypeError("On Command : Bad Value : node=%s val=%s" % self._mydict["address"], str(val)) if "property" in self._mydict : if "ST" in self._mydict["property"] : self._mydict["property"]["ST"]["value"] = val self._mydict["property"]["ST"]["formatted"] = "{:.0%}".format(val/255) self.isy._node_send(self._mydict["address"], "cmd", cmd, val) def off(self) : """ Send Off command to a node args: None """ self._off("DOF") def fastoff(self) : """ Send Fast Off command to a node args: None """ self._off("DFOF") def _off(self, cmd="DOF") : self.isy._node_send(self._mydict["address"], "cmd", cmd) if "property" in self._mydict : # self._mydict["property"]["time"] = 0 if "ST" in self._mydict["property"] : self._mydict["property"]["ST"]["value"] = 0 self._mydict["property"]["ST"]["formatted"] = "Off" def beep(self) : self.isy._node_send(self._mydict["address"], "cmd", "BEEP") def get_spoken(self): """ get notes property 'spoken' """ return self._get_prop("spoken") spoken = property(get_spoken) def get_path(self): return self.isy._node_get_path(self._mydict['address'], self._objtype) path = property(get_path) def members_list(self) : pass def member_iter(self, flag=0): return self.members_list() def member_list(self): if 'members' in self._mydict : # print("mydict['members'] : ", type(self._mydict['members']) ) if type(self._mydict['members']) == 'dict' : return self._mydict['members'].keys() # if type(self._mydict['members']) == 'list' : return self._mydict['members'][:] return [ ] def is_dimable(self) : if 'type' in self._mydict : a = self._mydict["type"].split('.') if a[0] == "1" : return True return False dimable = property(is_dimable) def get_callback(self) : return self.isy.callback_get(self._mydict["address"]) def set_callback(self, func, *args) : if func is None : return self.isy.callback_del(self._mydict["address"]) else : return self.isy.callback_set(self._mydict["address"], func, args) callback = property(get_callback, set_callback) def is_member(self, obj) : if "members" in self._mydict : if isinstance(obj, str) : return obj in self._mydict["members"] elif isinstance(obj, _IsyNodeBase) : return obj._get_prop("address") in self._mydict["members"] return False def member_add(self, node, flag=0) : r = self.isy.soapcomm("SetParent", node=node._get_prop("address"), nodeType=node.nodeType(), parent=self._mydict["address"], parentType=self.nodeType()) def _rename(self, cmd, newname) : if self.debug & 0x01 : print("rename : ", self.__class__.__name__, " : ", newname) #if not isinstance(newname, str) or len(newname) == 0 : # print "newname : ", newname # raise IsyTypeError("rename : name value not str") r = self.isy.soapcomm(cmd, id=self._mydict["address"], name=newname ) return r # check if scene _contains_ node def __contains__(self, other): return self.is_member(other) # check if obj _contains_ attib # def __contains__(self, other): # if isinstance(other, str) : # return other in self._getlist # else : # return False # class MemberDicte(dict): # # def __getitem__(self, key): # val = dict.__getitem__(self, key) # print 'GET', key # return val # # def __setitem__(self, key, val): # print 'SET', key, val # dict.__setitem__(self, key, val) # # def __delitem__(self, key): # print 'DEL', key # dict.__delitem__(self, key) # # def __repr__(self): # dictrepr = dict.__repr__(self) # return '%s(%s)' % (type(self).__name__, dictrepr) # # def get(self, key, default_val): # print 'GET', key, default_val # dict.get(self, key, default_val) # # def update(self, *args, **kwargs): # print 'update', args, kwargs # for k, v in dict(*args, **kwargs).iteritems(): # self[k] = v # # convers a node Id to a int # eg: "9 4A 5F 2" => 00001001010010100101111100000010 => 155868930 # def node_id_to_int(h) : a = h.split(' ') return ( int(a[0], 16) << 24 ) | ( int(a[1], 16) << 16 ) | \ ( int(a[2], 16) << 8 ) | int(a[3], 16) # def rate # def onlevel class IsyNode(_IsyNodeBase): """ Node Class for ISY Attributes : status / ST ramprate / RR onlevel / OL Readonly Attributes : address formatted enabled pnode type name ELK_ID flag funtions: get_rr: set_rr: Bugs: Results are undefined for Node class objects that represent a deleteed node """ _getlist = ['address', 'enabled', 'formatted', 'ELK_ID', 'parent', 'parent-type', 'name', 'pnode', 'flag', 'wattage', 'isLoad', 'location', 'description', 'spoken', 'OL', 'RR', 'ST', 'type'] _setlist = ['RR', 'OL', 'status', 'ramprate', 'onlevel', 'enable'] _propalias = {'status': 'ST', 'value': 'ST', 'val': 'ST', 'id': 'address', 'addr': 'address', 'ramprate': 'RR', 'onlevel': 'OL', "node-flag": "flag"} #_boollist = [ "enabled" ] def __init__(self, isy, ndict) : # self._objtype = (1, "node") self._objtype = "node" self._nodeprops = None super(self.__class__, self).__init__(isy, ndict) # if not self.isy.eventupdates : # #update only nodes # if "node-flag" in self._mydict : # self.update() self._hash = hashlib.sha256(self._mydict["address"]) if self.debug & 0x01 : print("Init Node : \"" + self._mydict["address"] + \ "\" : \"" + self._mydict["name"] + "\"") # self.isy._printdict(self.__dict__) # Special case from BaseClass due to ST/RR/OL props def _get_prop(self, prop): # print "IN get_prop ", prop if prop == "formatted" : prop = "ST" value = "formatted" else : value = "value" if prop in self._propalias : prop = self._propalias[prop] if not prop in self._getlist : # if prop in ['parent', 'parent-type'] : # return None raise IsyPropertyError("no property Attribute {!s}".format(prop)) # check if we have a property if prop in ['isLoad', 'location', 'description', 'spoken'] : if self._nodeprops is None : self._nodenotes = self.isy.node_get_notes(self._mydict["address"]) if self._nodenotes is None : return None if prop in self._nodenotes : return self._nodenotes[prop] else : # return None return "" if prop in ['ST', 'OL', 'RR'] : # Scene's do not have property values if prop in self._mydict["property"] : # print self._mydict["property"] # print "prop value", prop, value return self._mydict["property"][prop][value] else : return None # if self._mydict["property"]["time"] == 0 : # self.update() # elif self.isy.cachetime : # if time.gmtime() < (self.cachetime + self._mydict["property"]["time"]) : # self.update() else : # if prop in self._mydict : # if prop in self._boollist : # return(val2bool(self._mydict[prop])) # else : # return self._mydict[prop] # else : # return None return super(self.__class__, self)._get_prop(prop) def _set_prop(self, prop, new_value): """ generic property set """ # print "IN set_prop ", prop, new_value if self.debug & 0x04 : print("_set_prop ", prop, " : ", new_value) if prop in self._propalias : prop = self._propalias[prop] if not prop in self._setlist : if prop == "ST" : self.on(new_value) return else : raise IsyPropertyError("_set_prop : " \ "Invalid property Attribute " + prop) if prop == 'enable' : self._mydict[prop] = bool(new_value) self.isy.node_enable(self._mydict["address"], bool(new_value)) elif prop in ['OL', 'RR'] : if not str(new_value).isdigit : raise IsyTypeError("Set Property : Bad Value : node=%s prop=%s val=%s" % self._mydict["address"], prop, str(new_value)) self.isy._node_send(self._mydict["address"], "set", prop, str(new_value)) # self._mydict["property"]["time"] = 0 if prop in self._mydict["property"] : # if isinstance(new_value, (int, float)) : # already checked with isdigit self._mydict["property"][prop]["value"] = new_value # we need to tie this to some action elif prop in self._mydict : # self._mydict[prop] = new_value pass else : #print "_set_prop AttributeError" raise AttributeError("no Attribute " + prop) def _gettype(self): """ Type of Node (readonly) """ return "node" # enable node def get_enable(self): """ get enable/disable status a node """ return self._get_prop("enable") def set_enable(self, new_bool): """ Set enable status a node args: enable bool """ return self._set_prop("enable", new_bool) enable = property(get_enable, set_enable, None, "enable/disable a node") def get_wattage(self): """ get wattage """ return self._get_prop("wattage") def set_wattage(self, watts): """ set wattage property """ return self.isy.node_set_powerinfo(self._mydict["address"], wattage=watts) wattage = property(get_wattage, set_wattage) # ramprate property # obj mathod for getting/setting a Node's value # sets how fast a light fades on. def get_rr(self): """ Get/Set RampRate property of Node """ return self._get_prop("RR") def set_rr(self, new_value): """ Get/Set RampRate property of Node """ return self._set_prop("RR", new_value) ramprate = property(get_rr, set_rr) # On Level property # obj mathod for getting/setting a Node's value # where in most cases light is how bright the light is # when turned on def get_ol(self): """ Get/Set On Level property of Node """ return self._get_prop("OL") def set_ol(self, new_value): """ Get/Set On Level property of Node """ return self._set_prop("OL", new_value) onlevel = property(get_ol, set_ol) # def get_fm(self): # """ property On Level Value of Node """ # return self._get_prop("formatted") # formatted = property(get_fm) # status property # obj mathod for getting/setting a Node's value # where in most cases light is how bright the light is def get_status(self): """ Get/Set Status property of Node """ return self._get_prop("ST") def set_status(self, new_value): """ Get/Set Status property of Node """ return self.on(new_value) status = property(get_status, set_status) def dim(self) : """ decrease brightness of a device by ~3% """ self.isy._node_send(self._mydict["address"], "cmd", "DIM") def brighten(self) : """ increase brightness of a device by ~3% """ self.isy._node_send(self._mydict["address"], "cmd", "BRT") # # readonly to node attribute # def rename(self, newname) : return self._rename("RenameNode", newname) # # # def update(self) : """ force object to manualy update it's propertys """ xurl = "/rest/nodes/" + self._mydict["address"] if self.debug & 0x01 : print("_updatenode pre _getXML") _nodestat = self.isy._getXMLetree(xurl) # del self._mydict["property"]["ST"] for prop in _nodestat.iter('property'): tprop = dict() for k, v in list(prop.items()) : tprop[k] = v if "id" in tprop : self._mydict["property"][tprop["id"]] = tprop # self._mydict["property"]["time"] = time.gmtime() # experimental def __bool__(self) : #print "__nonzero__ call", self._mydict["property"]["ST"]["value"], \ # " :: ", int(self._mydict["property"]["ST"]["value"]) return(bool(self._mydict["property"]["ST"]["value"]) > 0) # use the node address as the hash value def __hash__(self) : return(self._hash) # def __str__(self): # print "__str__ call" # return("my str : " + self._mydict["name"]) def __float__(self): # print "__float__ call" return float(int(self._mydict["property"]["ST"]["value"]) / float(255)) class IsyScene(_IsyNodeBase): """ Node Group Class for ISY writeonly attributes : status readonly attributes : address name flag deviceGroup parent parent-type ELK_ID """ _getlist = ['address', 'name', "ELK_ID", "deviceGroup", 'flag', 'parent', 'parent-type'] _setlist = [] _propalias = {'id': 'address', 'addr': 'address', "group-flag": "flag"} def __init__(self, *args): #self._objtype = (2, "scene") self._objtype = "scene" super(self.__class__, self).__init__(*args) # status property # obj mathod for getting/setting a Scene's value # where in most cases light is how bright the light is def set_status(self, new_value): """ set status value of Scene """ return self._set_prop("ST", new_value) status = property(None, set_status) def _getmembers(self) : """ List members of a scene or group """ if "members" in self._mydict : return self._mydict["members"].keys() else : return None members = property(_getmembers) def member_list(self) : return self._getmembers() def is_member(self, obj) : if "members" in self._mydict : if isinstance(obj, str) : return obj in self._mydict["members"] elif isinstance(obj, _IsyNodeBase) : return obj._get_prop("address") in self._mydict["members"] return False def rename(self, newname) : """ rename node/scene/folder """ return self._rename("RenameGroup", newname) def member_del(self, node) : r = self.isy.scene_del_node( self._mydict["address"], node) # r = self.isy.soapcomm("RemoveFromGroup", # node=node._get_prop("address"), # group=self._mydict["address"]) return r def member_add_controler(self, node, flag=16) : """ Add Node to scene/group as Responder """ return self.member_add(node, flag) def member_add_responder(self, node, flag=32) : """ Add Node to scene/group Controller """ return self.member_add(node, flag) def member_add(self, node, flag=16) : """ Add Node to scene/group """ r = self.isy.scene_add_node( self._mydict["address"], node, flag=0x10) # r = self.isy.soapcomm("MoveNode", # node=node._get_prop("address"), # group=self._mydict["address"], # flag=16) return r def member_iter(self, flag=0): """ iter though members Folders iter though their contents (nodes/scenes/folders) Scene iter though their members (nodes) Nodes iter though sub-nodes (nodes) """ if "members" in self._mydict : for k in list(self._mydict["members"].keys()) : if flag and not(flag & self._mydict["members"][k]) : continue else : yield k def __iter__(self): return self.member_iter() # check if scene _contains_ node def __contains__(self, other): return self.is_member(other) class IsyNodeFolder(_IsyNodeBase): """ Node Folder Class for ISY readonly attributes : address name flag """ _getlist = ['address', 'name', 'flag'] _setlist = [] _propalias = {'id': 'address', 'addr': 'address', "folder-flag": "flag"} def __init__(self, *args): #self._objtype = (3, "folder") self._objtype = "folder" super(self.__class__, self).__init__(*args) def member_add(self, node, flag=0) : """ add Node/Scene or Folder to Folder Obj Args: node = address, name or Node/Scene/Folder Obj sets Parent for node/scene/folder to current Obj Folder calls SOAP SetParent() """ r = self.isy.soapcomm("SetParent", node=node._get_prop("address"), nodeType=node.nodeType(), parent=self._mydict["address"], parentType=self.nodeType()) return r def member_del(self, node) : """ del Node/Scene or Folder to Folder Obj Args: node = address, name or Node/Scene/Folder Obj del node/scene/folder to current Obj Folder (and moves to base folder) calls SOAP SetParent() """ r = self.isy.soapcomm("SetParent", node=node._get_prop("address"), nodeType=node.nodeType()) return r def rename(self, newname) : """ renames current Obj Folder args : name = new folder name calls SOAP RenameFolder() """ return self._rename("RenameFolder", newname) def __iter__(self): return self.member_iter() def __contains__(self, other): pass # # Do nothing # (syntax check) # if __name__ == "__main__": import __main__ print(__main__.__file__) print("syntax ok") exit(0)
unknown
codeparrot/codeparrot-clean
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import java.io.IOException; import java.util.HashSet; import java.util.Random; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.test.GenericTestUtils; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.BeforeAll; import org.slf4j.event.Level; /** * This class tests the FileStatus API. */ public class TestListFiles { static { GenericTestUtils.setLogLevel(FileSystem.LOG, Level.TRACE); } static final long seed = 0xDEADBEEFL; final protected static Configuration conf = new Configuration(); protected static FileSystem fs; protected static Path TEST_DIR; final private static int FILE_LEN = 10; private static Path FILE1; private static Path DIR1; private static Path FILE2; private static Path FILE3; static { setTestPaths(new Path(GenericTestUtils.getTempPath("testlistfiles"), "main_")); } protected static Path getTestDir() { return TEST_DIR; } /** * Sets the root testing directory and reinitializes any additional test paths * that are under the root. This method is intended to be called from a * subclass's @BeforeClass method if there is a need to override the testing * directory. * * @param testDir Path root testing directory */ protected static void setTestPaths(Path testDir) { TEST_DIR = testDir; FILE1 = new Path(TEST_DIR, "file1"); DIR1 = new Path(TEST_DIR, "dir1"); FILE2 = new Path(DIR1, "file2"); FILE3 = new Path(DIR1, "file3"); } @BeforeAll public static void testSetUp() throws Exception { fs = FileSystem.getLocal(conf); fs.delete(TEST_DIR, true); } private static void writeFile(FileSystem fileSys, Path name, int fileSize) throws IOException { // Create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer); stm.close(); } /** Test when input path is a file */ @Test public void testFile() throws IOException { fs.mkdirs(TEST_DIR); writeFile(fs, FILE1, FILE_LEN); RemoteIterator<LocatedFileStatus> itor = fs.listFiles( FILE1, true); LocatedFileStatus stat = itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN, stat.getLen()); assertEquals(fs.makeQualified(FILE1), stat.getPath()); assertEquals(1, stat.getBlockLocations().length); itor = fs.listFiles(FILE1, false); stat = itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN, stat.getLen()); assertEquals(fs.makeQualified(FILE1), stat.getPath()); assertEquals(1, stat.getBlockLocations().length); fs.delete(FILE1, true); } /** Test when input path is a directory */ @Test public void testDirectory() throws IOException { fs.mkdirs(DIR1); // test empty directory RemoteIterator<LocatedFileStatus> itor = fs.listFiles( DIR1, true); assertFalse(itor.hasNext()); itor = fs.listFiles(DIR1, false); assertFalse(itor.hasNext()); // testing directory with 1 file writeFile(fs, FILE2, FILE_LEN); itor = fs.listFiles(DIR1, true); LocatedFileStatus stat = itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN, stat.getLen()); assertEquals(fs.makeQualified(FILE2), stat.getPath()); assertEquals(1, stat.getBlockLocations().length); itor = fs.listFiles(DIR1, false); stat = itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN, stat.getLen()); assertEquals(fs.makeQualified(FILE2), stat.getPath()); assertEquals(1, stat.getBlockLocations().length); // test more complicated directory writeFile(fs, FILE1, FILE_LEN); writeFile(fs, FILE3, FILE_LEN); Set<Path> filesToFind = new HashSet<>(); filesToFind.add(fs.makeQualified(FILE1)); filesToFind.add(fs.makeQualified(FILE2)); filesToFind.add(fs.makeQualified(FILE3)); itor = fs.listFiles(TEST_DIR, true); stat = itor.next(); assertTrue(stat.isFile()); assertTrue(filesToFind.remove(stat.getPath()), "Path " + stat.getPath() + " unexpected"); stat = itor.next(); assertTrue(stat.isFile()); assertTrue(filesToFind.remove(stat.getPath()), "Path " + stat.getPath() + " unexpected"); stat = itor.next(); assertTrue(stat.isFile()); assertTrue(filesToFind.remove(stat.getPath()), "Path " + stat.getPath() + " unexpected"); assertFalse(itor.hasNext()); assertTrue(filesToFind.isEmpty()); itor = fs.listFiles(TEST_DIR, false); stat = itor.next(); assertTrue(stat.isFile()); assertEquals(fs.makeQualified(FILE1), stat.getPath()); assertFalse(itor.hasNext()); fs.delete(TEST_DIR, true); } }
java
github
https://github.com/apache/hadoop
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestListFiles.java
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..utils import is_torch_available, logging if is_torch_available(): import torch from torch import nn from contextlib import contextmanager from ..core_model_loading import ConversionOps from ..quantizers.quantizers_utils import get_module_from_name, should_convert_module logger = logging.get_logger(__name__) FP4_VALUES = [ +0.0, +0.5, +1.0, +1.5, +2.0, +3.0, +4.0, +6.0, -0.0, -0.5, -1.0, -1.5, -2.0, -3.0, -4.0, -6.0, ] @contextmanager def on_device(dev): if is_torch_available(): import torch if isinstance(dev, torch.Tensor): dev = dev.device elif isinstance(dev, str): dev = torch.device(dev) dev_type = getattr(dev, "type", None) if dev_type == "cuda": with torch.cuda.device(dev): yield return if dev_type == "xpu" and hasattr(torch, "xpu"): with torch.xpu.device(dev): yield return # other: CPU yield class Mxfp4Quantize(ConversionOps): def __init__(self, hf_quantizer): self.hf_quantizer = hf_quantizer def convert( self, input_dict: dict[str, torch.Tensor], model: torch.nn.Module | None = None, missing_keys: list[str] | None = None, full_layer_name: str | None = None, **kwargs, ) -> dict[str, torch.Tensor]: _, value = tuple(input_dict.items())[0] value = value[0] if isinstance(value, list) else value module, _ = get_module_from_name(model, full_layer_name) with torch.device(value.device): if isinstance(module, Mxfp4GptOssExperts): triton_weight_tensor, weight_scale = quantize_to_mxfp4(value.transpose(-1, -2), triton_kernels_hub) PrecisionConfig, FlexCtx, InFlexData = ( triton_kernels_hub.matmul_ogs.PrecisionConfig, triton_kernels_hub.matmul_ogs.FlexCtx, triton_kernels_hub.matmul_ogs.InFlexData, ) triton_weight_tensor, weight_scale = swizzle_mxfp4( triton_weight_tensor, weight_scale, triton_kernels_hub ) proj = "gate_up_proj" if "gate_up_proj" in full_layer_name else "down_proj" if proj in module._parameters: # Remove the nn.Parameter registration so we can attach the Triton tensor del module._parameters[proj] setattr(module, proj, triton_weight_tensor) setattr( module, f"{proj}_precision_config", PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())), ) missing_keys.discard(f"{full_layer_name}") module._is_hf_initialized = True return {} class Mxfp4Dequantize(ConversionOps): def __init__(self, hf_quantizer): self.hf_quantizer = hf_quantizer def convert( self, input_dict: dict[str, torch.Tensor], model: torch.nn.Module | None = None, full_layer_name: str | None = None, missing_keys=None, **kwargs, ) -> dict[str, torch.Tensor]: if "_blocks" in input_dict.keys(): if isinstance(input_dict["_blocks"], list): blocks = input_dict["_blocks"][0] else: blocks = input_dict["_blocks"] if "_scales" in input_dict.keys(): if isinstance(input_dict["_scales"], list): scales = input_dict["_scales"][0] else: scales = input_dict["_scales"] # Here we are dequantizing the weights dequantized = dequantize_convertops(blocks, scales) return {full_layer_name: dequantized} class Mxfp4Deserialize(ConversionOps): def __init__(self, hf_quantizer): self.hf_quantizer = hf_quantizer def convert( self, input_dict: dict[str, torch.Tensor], model: torch.nn.Module | None = None, full_layer_name: str | None = None, missing_keys: list[str] | None = None, **kwargs, ) -> dict[str, torch.Tensor]: param_data = {} if "_blocks" in input_dict.keys(): if isinstance(input_dict["_blocks"], list): param_data["_blocks"] = input_dict["_blocks"][0] else: param_data["_blocks"] = input_dict["_blocks"] if "_scales" in input_dict.keys(): if isinstance(input_dict["_scales"], list): param_data["_scales"] = input_dict["_scales"][0] else: param_data["_scales"] = input_dict["_scales"] # Eagerly set tensors on the module and perform swizzle module, _ = get_module_from_name(model, full_layer_name) proj = "gate_up_proj" if "gate_up_proj" in full_layer_name else "down_proj" swizzle_mxfp4_convertops( param_data["_blocks"], param_data["_scales"], module, proj, param_data["_blocks"].device, triton_kernels_hub, ) missing_keys.discard(f"{full_layer_name}") module._is_hf_initialized = True # We return an empty mapping since the module was updated in-place. This prevents # the loader from trying to materialize the original meta-parameter names again. # We don't use set_param_for_module since it expects mainly a torch.nn.Parameter or a safetensors pointer return {} # Copied from GPT_OSS repo and vllm def quantize_to_mxfp4(w, triton_kernels_hub): downcast_to_mxfp_torch = triton_kernels_hub.numerics_details.mxfp.downcast_to_mxfp_torch w, w_scale = downcast_to_mxfp_torch(w.to(torch.bfloat16), torch.uint8, axis=1) return w, w_scale def swizzle_mxfp4(w, w_scale, triton_kernels_hub): """ Changes the layout of the tensors depending on the hardware """ FP4, convert_layout, wrap_torch_tensor = ( triton_kernels_hub.tensor.FP4, triton_kernels_hub.tensor.convert_layout, triton_kernels_hub.tensor.wrap_torch_tensor, ) layout = triton_kernels_hub.tensor_details.layout StridedLayout = triton_kernels_hub.tensor_details.layout.StridedLayout value_layout, value_layout_opts = layout.make_default_matmul_mxfp4_w_layout(mx_axis=1) w = convert_layout(wrap_torch_tensor(w, dtype=FP4), value_layout, **value_layout_opts) w_scale = convert_layout(wrap_torch_tensor(w_scale), StridedLayout) return w, w_scale # Mostly copied from GPT_OSS repo # TODO: Add absolute link when the repo is public def _convert_moe_packed_tensors( blocks, scales, *, dtype: torch.dtype = torch.bfloat16, rows_per_chunk: int = 32768 * 1024, # TODO these values are not here by mistake ;) ) -> torch.Tensor: """ Convert the mxfp4 weights again, dequantizing and makes them compatible with the forward pass of GPT_OSS. """ import math blocks = blocks.to(torch.uint8) scales = scales.to(torch.int32) - 127 # TODO that's because 128=2**7 assert blocks.shape[:-1] == scales.shape, f"{blocks.shape[:-1]=} does not match {scales.shape=}" lut = torch.tensor(FP4_VALUES, dtype=dtype, device=blocks.device) *prefix_shape, G, B = blocks.shape rows_total = math.prod(prefix_shape) * G blocks = blocks.reshape(rows_total, B) scales = scales.reshape(rows_total, 1) out = torch.empty(rows_total, B * 2, dtype=dtype, device=blocks.device) for r0 in range(0, rows_total, rows_per_chunk): r1 = min(r0 + rows_per_chunk, rows_total) blk = blocks[r0:r1] exp = scales[r0:r1] sub = out[r0:r1] # This vector is only used to index into `lut`, but is hugeee in GPU memory so we delete it immediately idx_lo = (blk & 0x0F).to(torch.int) sub[:, 0::2] = lut[idx_lo] del idx_lo # This vector is only used to index into `lut`, but is hugeee in GPU memory so we delete it immediately idx_hi = (blk >> 4).to(torch.int) sub[:, 1::2] = lut[idx_hi] del idx_hi # Perform op torch.ldexp(sub, exp, out=sub) del blk, exp, sub out = out.reshape(*prefix_shape, G, B * 2).view(*prefix_shape, G * B * 2) return out.transpose(1, 2).contiguous() def convert_moe_packed_tensors( blocks, scales, *, dtype: torch.dtype = torch.bfloat16, rows_per_chunk: int = 32768 * 1024, # TODO these values are not here by mistake ;) ) -> torch.Tensor: """ Convert the mxfp4 weights again, dequantizing and makes them compatible with the forward pass of GPT_OSS. """ # Since the intermediate ops requite A LOT of memory, in very constrained device_map="auto" settings # it may OOM, hence this wrapper and move back to cpu if needed # torch statistics are not accurate enough to estimate if we will have enough memory due to fragmentation and # in-place operation on non-contiguous tensors (may sometimes require more temporary copies) try: return _convert_moe_packed_tensors(blocks, scales, dtype=dtype, rows_per_chunk=rows_per_chunk) # In the case of OOM due to very tight device_map, we convert and return on cpu - it will then be put back on correct # devide with the accelerate dispatch (doing it right away may still lead to OOM, but more memory is available later) except torch.OutOfMemoryError: blocks = blocks.to("cpu") scales = scales.to("cpu") return _convert_moe_packed_tensors(blocks, scales, dtype=dtype, rows_per_chunk=rows_per_chunk) class Mxfp4GptOssExperts(nn.Module): def __init__(self, config): super().__init__() self.num_experts = config.num_local_experts self.intermediate_size = config.intermediate_size self.hidden_size = config.hidden_size self.gate_up_proj = nn.Parameter( torch.zeros(self.num_experts, 2 * self.intermediate_size, self.hidden_size // 32, 16, dtype=torch.uint8), requires_grad=False, ) self.gate_up_proj_bias = nn.Parameter( torch.zeros(self.num_experts, 2 * self.intermediate_size, dtype=torch.float32), requires_grad=False ) self.down_proj = nn.Parameter( torch.zeros((self.num_experts, self.hidden_size, self.intermediate_size // 32, 16), dtype=torch.uint8), requires_grad=False, ) self.down_proj_bias = nn.Parameter( torch.zeros(self.num_experts, self.hidden_size, dtype=torch.float32), requires_grad=False ) self.alpha = 1.702 self.limit = getattr(config, "swiglu_limit", 7.0) self.gate_up_proj_precision_config = None self.down_proj_precision_config = None self.limit = getattr(config, "swiglu_limit", 7.0) def forward(self, hidden_states: torch.Tensor, routing_data, gather_idx, scatter_idx) -> torch.Tensor: FnSpecs, FusedActivation, matmul_ogs = ( triton_kernels_hub.matmul_ogs.FnSpecs, triton_kernels_hub.matmul_ogs.FusedActivation, triton_kernels_hub.matmul_ogs.matmul_ogs, ) swiglu_fn = triton_kernels_hub.swiglu.swiglu_fn with on_device(hidden_states.device): act = FusedActivation(FnSpecs("swiglu", swiglu_fn, ("alpha", "limit")), (self.alpha, self.limit), 2) intermediate_cache1 = matmul_ogs( hidden_states, self.gate_up_proj, self.gate_up_proj_bias.to(torch.float32), routing_data, gather_indx=gather_idx, precision_config=self.gate_up_proj_precision_config, gammas=None, fused_activation=act, ) intermediate_cache3 = matmul_ogs( intermediate_cache1, self.down_proj, self.down_proj_bias.to(torch.float32), routing_data, scatter_indx=scatter_idx, precision_config=self.down_proj_precision_config, gammas=routing_data.gate_scal, ) return intermediate_cache3 # Adapted from GPT_OSS repo # TODO: Add absolute link when the repo is public def routing_torch_dist( logits, n_expts_act, ): import os GatherIndx, RoutingData, ScatterIndx, compute_expt_data_torch = ( triton_kernels_hub.routing.GatherIndx, triton_kernels_hub.routing.RoutingData, triton_kernels_hub.routing.ScatterIndx, triton_kernels_hub.routing.compute_expt_data_torch, ) with on_device(logits.device): world_size = torch.distributed.get_world_size() rank = int(os.environ.get("LOCAL_RANK", "0")) replace_value = -1 n_tokens = logits.shape[0] n_expts_tot = logits.shape[1] n_local_experts = n_expts_tot // world_size local_expert_start = rank * n_local_experts local_expert_end = (rank + 1) * n_local_experts n_gates_pad = n_tokens * n_expts_act def topk(vals, k): tk_indx = torch.argsort(-vals, dim=1, stable=True)[:, :k] tk_indx = tk_indx.long() tk_val = torch.take_along_dim(vals, tk_indx, dim=1) return tk_val, tk_indx.int() expt_scal, expt_indx = topk(logits, n_expts_act) expt_scal = torch.softmax(expt_scal, dim=-1) expt_indx, sort_indices = torch.sort(expt_indx, dim=1) expt_scal = torch.gather(expt_scal, 1, sort_indices) # Flatten and mask for local experts expt_scal = expt_scal.reshape(-1) hist = torch.histc(expt_indx, bins=n_expts_tot, max=n_expts_tot - 1)[local_expert_start:local_expert_end] expt_indx = expt_indx.view(-1).to(torch.int32) # we use a large value to replace the indices that are not in the local expert range var = 1000 expt_indx = torch.where(expt_indx < local_expert_start, var, expt_indx) topk_indx = torch.argsort(expt_indx, stable=True).to(torch.int32) gate_indx = torch.argsort(topk_indx).to(torch.int32) expt_indx = torch.where(expt_indx < local_expert_end, expt_indx, replace_value) expt_indx = torch.where(local_expert_start <= expt_indx, expt_indx, replace_value) gate_indx = torch.where(expt_indx == replace_value, replace_value, gate_indx) gate_scal = expt_scal[topk_indx] topk_indx = torch.where(gate_indx[topk_indx] == replace_value, replace_value, topk_indx) # # Routing metadata for local expert computation gather_indx = GatherIndx(src_indx=topk_indx.int(), dst_indx=gate_indx.int()) scatter_indx = ScatterIndx(src_indx=gate_indx.int(), dst_indx=topk_indx.int()) expt_data = compute_expt_data_torch(hist, n_local_experts, n_gates_pad) hit_experts = n_expts_act return RoutingData(gate_scal, hist, n_local_experts, hit_experts, expt_data), gather_indx, scatter_indx def mlp_forward(self, hidden_states): import torch.distributed as dist if dist.is_available() and dist.is_initialized() and hasattr(self, "_is_hooked"): routing = routing_torch_dist else: routing = triton_kernels_hub.routing.routing batch_size = hidden_states.shape[0] hidden_states = hidden_states.reshape(-1, self.router.hidden_dim) router_logits = nn.functional.linear(hidden_states, self.router.weight, self.router.bias) with on_device(router_logits.device): routing_data, gather_idx, scatter_idx = routing(router_logits, self.router.top_k) routed_out = self.experts(hidden_states, routing_data, gather_idx, scatter_idx=scatter_idx) routed_out = routed_out.reshape(batch_size, -1, self.router.hidden_dim) return routed_out, router_logits def dequantize(module, param_name, param_value, target_device, dq_param_name, **kwargs): from ..integrations.tensor_parallel import shard_and_distribute_module model = kwargs.get("model") empty_param = kwargs.get("empty_param") casting_dtype = kwargs.get("casting_dtype") to_contiguous = kwargs.get("to_contiguous") rank = kwargs.get("rank") device_mesh = kwargs.get("device_mesh") for proj in ["gate_up_proj", "down_proj"]: if proj in param_name: if device_mesh is not None: param_value = shard_and_distribute_module( model, param_value, empty_param, dq_param_name, casting_dtype, to_contiguous, rank, device_mesh, ) blocks_attr = f"{proj}_blocks" scales_attr = f"{proj}_scales" setattr(module, param_name.rsplit(".", 1)[1], param_value) if hasattr(module, blocks_attr) and hasattr(module, scales_attr): dequantized = convert_moe_packed_tensors(getattr(module, blocks_attr), getattr(module, scales_attr)) setattr(module, proj, torch.nn.Parameter(dequantized.to(target_device))) delattr(module, blocks_attr) delattr(module, scales_attr) def dequantize_convertops(blocks, scales): dequantized = convert_moe_packed_tensors(blocks, scales) return torch.nn.Parameter(dequantized) def load_and_swizzle_mxfp4(module, param_name, param_value, target_device, triton_kernels_hub, **kwargs): """ This transforms the weights obtained using `convert_gpt_oss.py` to load them into `Mxfp4GptOssExperts`. """ PrecisionConfig, FlexCtx, InFlexData = ( triton_kernels_hub.matmul_ogs.PrecisionConfig, triton_kernels_hub.matmul_ogs.FlexCtx, triton_kernels_hub.matmul_ogs.InFlexData, ) from ..integrations.tensor_parallel import shard_and_distribute_module model = kwargs.get("model") empty_param = kwargs.get("empty_param") casting_dtype = kwargs.get("casting_dtype") to_contiguous = kwargs.get("to_contiguous") rank = kwargs.get("rank") device_mesh = kwargs.get("device_mesh") if "blocks" in param_name: proj = param_name.split(".")[-1].split("_blocks")[0] if "scales" in param_name: proj = param_name.split(".")[-1].split("_scales")[0] if device_mesh is not None: shard_and_distribute_module( model, param_value, empty_param, param_name, casting_dtype, to_contiguous, rank, device_mesh ) else: setattr(module, param_name.rsplit(".", 1)[1], torch.nn.Parameter(param_value, requires_grad=False)) blocks_attr = f"{proj}_blocks" scales_attr = f"{proj}_scales" blocks = getattr(module, blocks_attr) # at this point values were loaded from ckpt scales = getattr(module, scales_attr) # Check if both blocks and scales both not on meta device if blocks.device.type != "meta" and scales.device.type != "meta": local_experts = blocks.size(0) if proj == "gate_up_proj": blocks = blocks.reshape(local_experts, module.intermediate_size * 2, -1) else: blocks = blocks.reshape(local_experts, -1, module.intermediate_size // 2) if getattr(target_device, "type", target_device) == "cpu": target_device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" blocks = blocks.to(target_device).contiguous() scales = scales.to(target_device).contiguous() with on_device(target_device): triton_weight_tensor, weight_scale = swizzle_mxfp4( blocks.transpose(-2, -1), scales.transpose(-2, -1), triton_kernels_hub ) # need to overwrite the shapes for the kernels if proj == "gate_up_proj": triton_weight_tensor.shape = torch.Size([local_experts, module.hidden_size, module.intermediate_size * 2]) else: triton_weight_tensor.shape = torch.Size([local_experts, module.intermediate_size, module.hidden_size]) # triton_weight_tensor is what needs to be passed in oai kernels. It stores the data, the shapes and any more objects. It is like a subtensor setattr(module, proj, triton_weight_tensor) setattr( module, f"{proj}_precision_config", PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())), ) # delete blocks and scales delattr(module, scales_attr) delattr(module, blocks_attr) del blocks def swizzle_mxfp4_convertops(blocks, scales, module, proj, target_device, triton_kernels_hub): """ This transforms the weights obtained using `convert_gpt_oss.py` to load them into `Mxfp4GptOssExperts`. """ PrecisionConfig, FlexCtx, InFlexData = ( triton_kernels_hub.matmul_ogs.PrecisionConfig, triton_kernels_hub.matmul_ogs.FlexCtx, triton_kernels_hub.matmul_ogs.InFlexData, ) local_experts = blocks.size(0) if getattr(target_device, "type", target_device) == "cpu": target_device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda" blocks = blocks.to(target_device).contiguous() scales = scales.to(target_device).contiguous() if proj == "gate_up_proj": blocks = blocks.reshape(local_experts, module.intermediate_size * 2, -1) else: blocks = blocks.reshape(local_experts, -1, module.intermediate_size // 2) if getattr(target_device, "type", target_device) == "cpu": target_device = "cuda" with on_device(target_device): triton_weight_tensor, weight_scale = swizzle_mxfp4( blocks.transpose(-2, -1), scales.transpose(-2, -1), triton_kernels_hub ) # need to overwrite the shapes for the kernels if proj == "gate_up_proj": triton_weight_tensor.shape = torch.Size([local_experts, module.hidden_size, module.intermediate_size * 2]) else: triton_weight_tensor.shape = torch.Size([local_experts, module.intermediate_size, module.hidden_size]) # triton_weight_tensor is what needs to be passed in oai kernels. It stores the data, the shapes and any more objects. It's like a subtensor # Since the Experts module registers gate_up_proj and down_proj as nn.Parameters, we need to remove them so we can attach the Triton tensor if proj in module._parameters: # Remove the nn.Parameter registration so we can attach the Triton tensor del module._parameters[proj] setattr(module, proj, triton_weight_tensor) setattr( module, f"{proj}_precision_config", PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())), ) def replace_with_mxfp4_linear(model, quantization_config=None, modules_to_not_convert: list[str] | None = None): """ Public method that replaces the expert layers of the given model with mxfp4 quantized layers. Args: model (`torch.nn.Module`): The model to convert, can be any `torch.nn.Module` instance. quantization_config (`Mxfp4Config`, defaults to `None`): The quantization config object that contains the quantization parameters. modules_to_not_convert (`list`, *optional*, defaults to `None`): A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be converted. """ if quantization_config.dequantize: return model from .hub_kernels import get_kernel global triton_kernels_hub triton_kernels_hub = get_kernel("kernels-community/gpt-oss-triton-kernels") has_been_replaced = False for module_name, module in model.named_modules(): if not should_convert_module(module_name, modules_to_not_convert): continue if module.__class__.__name__ == "GptOssExperts" and not quantization_config.dequantize: with torch.device("meta"): model.set_submodule(module_name, Mxfp4GptOssExperts(model.config)) has_been_replaced = True if module.__class__.__name__ == "GptOssMLP" and not quantization_config.dequantize: from types import MethodType module.forward = MethodType(mlp_forward, module) if not has_been_replaced: logger.warning( "You are loading your model using mixed-precision FP4 quantization but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model
python
github
https://github.com/huggingface/transformers
src/transformers/integrations/mxfp4.py
[ { "year": 1990, "month": 1, "day": 20, "hour": 14, "minute": 12, "second": 1 }, { "year": 1996, "month": 3, "day": 20, "hour": 2, "minute": 55, "second": 12 }, { "year": 2000, "month": 3, "day": 23, "hour": 1, "minute": 34, "second": 35 }, { "year": 2010, "month": 5, "day": 10, "hour": 9, "minute": 31, "second": 12 }, { "year": 2020, "month": 6, "day": 13, "hour": 23, "minute": 1, "second": 12 }, { "year": 1881, "month": 6, "day": 10, "hour": 3, "minute": 27, "second": 59 }, { "year": 1720, "month": 2, "day": 1, "hour": 13, "minute": 24, "second": 13 }, { "year": 1630, "month": 3, "day": 4, "hour": 3, "minute": 1, "second": 1 }, { "year": 1540, "month": 3, "day": 4, "hour": 3, "minute": 2, "second": 12 }, { "year": 1024, "month": 5, "day": 10, "hour": 19, "minute": 31, "second": 2 } ]
json
github
https://github.com/nodejs/node
deps/crates/vendor/icu_calendar/benches/fixtures/datetimes.json
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) # Copyright 2025 Eddie James %YAML 1.2 --- $id: http://devicetree.org/schemas/interrupt-controller/aspeed,ast2500-scu-ic.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Aspeed AST25XX, AST26XX, AST27XX SCU Interrupt Controller maintainers: - Eddie James <eajames@linux.ibm.com> properties: compatible: enum: - aspeed,ast2500-scu-ic - aspeed,ast2600-scu-ic0 - aspeed,ast2600-scu-ic1 - aspeed,ast2700-scu-ic0 - aspeed,ast2700-scu-ic1 - aspeed,ast2700-scu-ic2 - aspeed,ast2700-scu-ic3 reg: maxItems: 1 '#interrupt-cells': const: 1 interrupts: maxItems: 1 interrupt-controller: true required: - compatible - reg - '#interrupt-cells' - interrupts - interrupt-controller additionalProperties: false examples: - | interrupt-controller@18 { compatible = "aspeed,ast2500-scu-ic"; reg = <0x18 0x4>; #interrupt-cells = <1>; interrupts = <21>; interrupt-controller; };
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2500-scu-ic.yaml
#!/usr/bin/python # # (c) 2013, Nimbis Services # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['deprecated'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ec2_ami_search short_description: Retrieve AWS AMI information for a given operating system. deprecated: "Use M(ec2_ami_find) instead." version_added: "1.6" description: - Look up the most recent AMI on AWS for a given operating system. - Returns C(ami), C(aki), C(ari), C(serial), C(tag) - If there is no AKI or ARI associated with an image, these will be C(null). - Only supports images from cloud-images.ubuntu.com - 'Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"})' options: distro: description: Linux distribution (e.g., C(ubuntu)) required: true choices: ["ubuntu"] release: description: short name of the release (e.g., C(precise)) required: true stream: description: Type of release. required: false default: "server" choices: ["server", "desktop"] store: description: Back-end store for instance required: false default: "ebs" choices: ["ebs", "ebs-io1", "ebs-ssd", "instance-store"] arch: description: CPU architecture required: false default: "amd64" choices: ["i386", "amd64"] region: description: EC2 region required: false default: us-east-1 choices: ["ap-northeast-1", "ap-southeast-1", "ap-northeast-2", "ap-southeast-2", "ca-central-1", "eu-central-1", "eu-west-1", "eu-west-2", "sa-east-1", "us-east-1", "us-east-2", "us-west-1", "us-west-2", "us-gov-west-1"] virt: description: virutalization type required: false default: paravirtual choices: ["paravirtual", "hvm"] author: "Ansible Core Team (deprecated)" ''' EXAMPLES = ''' - name: Launch an Ubuntu 12.04 (Precise Pangolin) EC2 instance hosts: 127.0.0.1 connection: local tasks: - name: Get the Ubuntu precise AMI ec2_ami_search: distro: ubuntu release: precise region: us-west-1 store: instance-store register: ubuntu_image - name: Start the EC2 instance ec2: image: "{{ ubuntu_image.ami }}" instance_type: m1.small key_name: mykey ''' import csv from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url SUPPORTED_DISTROS = ['ubuntu'] AWS_REGIONS = ['ap-northeast-1', 'ap-southeast-1', 'ap-northeast-2', 'ap-southeast-2', 'ap-south-1', 'ca-central-1', 'eu-central-1', 'eu-west-1', 'eu-west-2', 'sa-east-1', 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', "us-gov-west-1"] def get_url(module, url): """ Get url and return response """ r, info = fetch_url(module, url) if info['status'] != 200: # Backwards compat info['status_code'] = info['status'] module.fail_json(**info) return r def ubuntu(module): """ Get the ami for ubuntu """ release = module.params['release'] stream = module.params['stream'] store = module.params['store'] arch = module.params['arch'] region = module.params['region'] virt = module.params['virt'] url = get_ubuntu_url(release, stream) req = get_url(module, url) reader = csv.reader(req, delimiter='\t') try: ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream, store, arch, region, virt) module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag, serial=serial) except KeyError: module.fail_json(msg="No matching AMI found") def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt): """ Look up the Ubuntu AMI that matches query given a table of AMIs table: an iterable that returns a row of (release, stream, tag, serial, region, ami, aki, ari, virt) release: ubuntu release name stream: 'server' or 'desktop' store: 'ebs', 'ebs-io1', 'ebs-ssd' or 'instance-store' arch: 'i386' or 'amd64' region: EC2 region virt: 'paravirtual' or 'hvm' Returns (ami, aki, ari, tag, serial)""" expected = (release, stream, store, arch, region, virt) for row in table: (actual_release, actual_stream, tag, serial, actual_store, actual_arch, actual_region, ami, aki, ari, actual_virt) = row actual = (actual_release, actual_stream, actual_store, actual_arch, actual_region, actual_virt) if actual == expected: # aki and ari are sometimes blank if aki == '': aki = None if ari == '': ari = None return (ami, aki, ari, tag, serial) raise KeyError() def get_ubuntu_url(release, stream): url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt" return url % (release, stream) def main(): arg_spec = dict( distro=dict(required=True, choices=SUPPORTED_DISTROS), release=dict(required=True), stream=dict(required=False, default='server', choices=['desktop', 'server']), store=dict(required=False, default='ebs', choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']), arch=dict(required=False, default='amd64', choices=['i386', 'amd64']), region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), virt=dict(required=False, default='paravirtual', choices=['paravirtual', 'hvm']), ) module = AnsibleModule(argument_spec=arg_spec) distro = module.params['distro'] if distro == 'ubuntu': ubuntu(module) else: module.fail_json(msg="Unsupported distro: %s" % distro) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains the data classes of the Google Calendar Data API""" __author__ = 'j.s@google.com (Jeff Scudder)' import atom.core import atom.data import gdata.acl.data import gdata.data import gdata.geo.data import gdata.opensearch.data GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005/}%s' class AccessLevelProperty(atom.core.XmlElement): """Describes how much a given user may do with an event or calendar""" _qname = GCAL_TEMPLATE % 'accesslevel' value = 'value' class AllowGSync2Property(atom.core.XmlElement): """Whether the user is permitted to run Google Apps Sync""" _qname = GCAL_TEMPLATE % 'allowGSync2' value = 'value' class AllowGSyncProperty(atom.core.XmlElement): """Whether the user is permitted to run Google Apps Sync""" _qname = GCAL_TEMPLATE % 'allowGSync' value = 'value' class AnyoneCanAddSelfProperty(atom.core.XmlElement): """Whether anyone can add self as attendee""" _qname = GCAL_TEMPLATE % 'anyoneCanAddSelf' value = 'value' class CalendarAclRole(gdata.acl.data.AclRole): """Describes the Calendar roles of an entry in the Calendar access control list""" _qname = gdata.acl.data.GACL_TEMPLATE % 'role' class CalendarCommentEntry(gdata.data.GDEntry): """Describes an entry in a feed of a Calendar event's comments""" class CalendarCommentFeed(gdata.data.GDFeed): """Describes feed of a Calendar event's comments""" entry = [CalendarCommentEntry] class CalendarComments(gdata.data.Comments): """Describes a container of a feed link for Calendar comment entries""" _qname = gdata.data.GD_TEMPLATE % 'comments' class CalendarExtendedProperty(gdata.data.ExtendedProperty): """Defines a value for the realm attribute that is used only in the calendar API""" _qname = gdata.data.GD_TEMPLATE % 'extendedProperty' class CalendarWhere(gdata.data.Where): """Extends the base Where class with Calendar extensions""" _qname = gdata.data.GD_TEMPLATE % 'where' class ColorProperty(atom.core.XmlElement): """Describes the color of a calendar""" _qname = GCAL_TEMPLATE % 'color' value = 'value' class GuestsCanInviteOthersProperty(atom.core.XmlElement): """Whether guests can invite others to the event""" _qname = GCAL_TEMPLATE % 'guestsCanInviteOthers' value = 'value' class GuestsCanModifyProperty(atom.core.XmlElement): """Whether guests can modify event""" _qname = GCAL_TEMPLATE % 'guestsCanModify' value = 'value' class GuestsCanSeeGuestsProperty(atom.core.XmlElement): """Whether guests can see other attendees""" _qname = GCAL_TEMPLATE % 'guestsCanSeeGuests' value = 'value' class HiddenProperty(atom.core.XmlElement): """Describes whether a calendar is hidden""" _qname = GCAL_TEMPLATE % 'hidden' value = 'value' class IcalUIDProperty(atom.core.XmlElement): """Describes the UID in the ical export of the event""" _qname = GCAL_TEMPLATE % 'uid' value = 'value' class OverrideNameProperty(atom.core.XmlElement): """Describes the override name property of a calendar""" _qname = GCAL_TEMPLATE % 'overridename' value = 'value' class PrivateCopyProperty(atom.core.XmlElement): """Indicates whether this is a private copy of the event, changes to which should not be sent to other calendars""" _qname = GCAL_TEMPLATE % 'privateCopy' value = 'value' class QuickAddProperty(atom.core.XmlElement): """Describes whether gd:content is for quick-add processing""" _qname = GCAL_TEMPLATE % 'quickadd' value = 'value' class ResourceProperty(atom.core.XmlElement): """Describes whether gd:who is a resource such as a conference room""" _qname = GCAL_TEMPLATE % 'resource' value = 'value' id = 'id' class EventWho(gdata.data.Who): """Extends the base Who class with Calendar extensions""" _qname = gdata.data.GD_TEMPLATE % 'who' resource = ResourceProperty class SelectedProperty(atom.core.XmlElement): """Describes whether a calendar is selected""" _qname = GCAL_TEMPLATE % 'selected' value = 'value' class SendAclNotificationsProperty(atom.core.XmlElement): """Describes whether to send ACL notifications to grantees""" _qname = GCAL_TEMPLATE % 'sendAclNotifications' value = 'value' class CalendarAclEntry(gdata.data.GDEntry): """Describes an entry in a feed of a Calendar access control list (ACL)""" send_acl_notifications = SendAclNotificationsProperty class CalendarAclFeed(gdata.data.GDFeed): """Describes a Calendar access contorl list (ACL) feed""" entry = [CalendarAclEntry] class SendEventNotificationsProperty(atom.core.XmlElement): """Describes whether to send event notifications to other participants of the event""" _qname = GCAL_TEMPLATE % 'sendEventNotifications' value = 'value' class SequenceNumberProperty(atom.core.XmlElement): """Describes sequence number of an event""" _qname = GCAL_TEMPLATE % 'sequence' value = 'value' class CalendarRecurrenceExceptionEntry(gdata.data.GDEntry): """Describes an entry used by a Calendar recurrence exception entry link""" uid = IcalUIDProperty sequence = SequenceNumberProperty class CalendarRecurrenceException(gdata.data.RecurrenceException): """Describes an exception to a recurring Calendar event""" _qname = gdata.data.GD_TEMPLATE % 'recurrenceException' class SettingsProperty(atom.core.XmlElement): """User preference name-value pair""" _qname = GCAL_TEMPLATE % 'settingsProperty' name = 'name' value = 'value' class SettingsEntry(gdata.data.GDEntry): """Describes a Calendar Settings property entry""" settings_property = SettingsProperty class CalendarSettingsFeed(gdata.data.GDFeed): """Personal settings for Calendar application""" entry = [SettingsEntry] class SuppressReplyNotificationsProperty(atom.core.XmlElement): """Lists notification methods to be suppressed for this reply""" _qname = GCAL_TEMPLATE % 'suppressReplyNotifications' methods = 'methods' class SyncEventProperty(atom.core.XmlElement): """Describes whether this is a sync scenario where the Ical UID and Sequence number are honored during inserts and updates""" _qname = GCAL_TEMPLATE % 'syncEvent' value = 'value' class CalendarEventEntry(gdata.data.BatchEntry): """Describes a Calendar event entry""" quickadd = QuickAddProperty send_event_notifications = SendEventNotificationsProperty sync_event = SyncEventProperty anyone_can_add_self = AnyoneCanAddSelfProperty extended_property = [CalendarExtendedProperty] sequence = SequenceNumberProperty guests_can_invite_others = GuestsCanInviteOthersProperty guests_can_modify = GuestsCanModifyProperty guests_can_see_guests = GuestsCanSeeGuestsProperty georss_where = gdata.geo.data.GeoRssWhere private_copy = PrivateCopyProperty suppress_reply_notifications = SuppressReplyNotificationsProperty uid = IcalUIDProperty class TimeZoneProperty(atom.core.XmlElement): """Describes the time zone of a calendar""" _qname = GCAL_TEMPLATE % 'timezone' value = 'value' class TimesCleanedProperty(atom.core.XmlElement): """Describes how many times calendar was cleaned via Manage Calendars""" _qname = GCAL_TEMPLATE % 'timesCleaned' value = 'value' class CalendarEntry(gdata.data.GDEntry): """Describes a Calendar entry in the feed of a user's calendars""" timezone = TimeZoneProperty overridename = OverrideNameProperty hidden = HiddenProperty selected = SelectedProperty times_cleaned = TimesCleanedProperty color = ColorProperty where = [CalendarWhere] accesslevel = AccessLevelProperty class CalendarEventFeed(gdata.data.BatchFeed): """Describes a Calendar event feed""" allow_g_sync2 = AllowGSync2Property timezone = TimeZoneProperty entry = [CalendarEventEntry] times_cleaned = TimesCleanedProperty allow_g_sync = AllowGSyncProperty class CalendarFeed(gdata.data.GDFeed): """Describes a feed of Calendars""" entry = [CalendarEntry] class WebContentGadgetPref(atom.core.XmlElement): """Describes a single web content gadget preference""" _qname = GCAL_TEMPLATE % 'webContentGadgetPref' name = 'name' value = 'value' class WebContent(atom.core.XmlElement): """Describes a "web content" extension""" _qname = GCAL_TEMPLATE % 'webContent' height = 'height' width = 'width' web_content_gadget_pref = [WebContentGadgetPref] url = 'url' display = 'display'
unknown
codeparrot/codeparrot-clean
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the CockroachDB Software License // included in the /LICENSE file. package sql import ( "context" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/eval" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" ) // tableInserter handles writing kvs and forming table rows for inserts. type tableInserter struct { tableWriterBase ri row.Inserter } // init initializes the tableInserter with a Txn. func (ti *tableInserter) init(_ context.Context, txn *kv.Txn, evalCtx *eval.Context) error { return ti.tableWriterBase.init(txn, ti.tableDesc(), evalCtx) } // row performs an insert. // // The passed Datums is not used after `row` returns. // // The PartialIndexUpdateHelper is used to determine which partial indexes // to avoid updating when performing row modification. This is necessary // because not all rows are indexed by partial indexes. // // The VectorIndexUpdateHelper is used to determine which partitions to update // in each vector index and supply the quantized vectors to add to the // partitions. This is necessary because these values are not part of the table, // and are materialized only for the purpose of updating vector indexes. // // The traceKV parameter determines whether the individual K/V operations // should be logged to the context. We use a separate argument here instead // of a Value field on the context because Value access in context.Context // is rather expensive. func (ti *tableInserter) row( ctx context.Context, values tree.Datums, pm row.PartialIndexUpdateHelper, vh row.VectorIndexUpdateHelper, oth row.OriginTimestampCPutHelper, traceKV bool, ) error { ti.currentBatchSize++ return ti.ri.InsertRow(ctx, &ti.putter, values, pm, vh, oth, row.CPutOp, traceKV) } // tableDesc returns the TableDescriptor for the table that the tableInserter // will modify. func (ti *tableInserter) tableDesc() catalog.TableDescriptor { return ti.ri.Helper.TableDesc }
go
github
https://github.com/cockroachdb/cockroach
pkg/sql/tablewriter_insert.go
#if defined __AVX512__ || defined __AVX512F__ #include <immintrin.h> void test() { __m512 a, b, c, d, e; __m512i ai, bi, ci, di, ei, fi; __m128 *mem; __m128i *memi; __mmask16 m; a = _mm512_4fnmadd_ps(a, b, c, d, e, mem); ai = _mm512_4dpwssd_epi32(ai, bi, ci, di, ei, memi); ai = _mm512_popcnt_epi64(ai); } #else #error "AVX512-KNM is not supported" #endif int main() { return 0; }
cpp
github
https://github.com/opencv/opencv
cmake/checks/cpu_avx512knm.cpp
# set up logging before importing any other components if __name__ == '__main__': from config import initialize_logging # noqa initialize_logging('jmxfetch') # std import glob import logging import os import signal import sys import time # 3rd party import yaml # datadog from config import ( DEFAULT_CHECK_FREQUENCY, get_confd_path, get_config, get_logging_config, PathNotFound, ) from util import yLoader from utils.jmx import JMX_FETCH_JAR_NAME, JMXFiles from utils.platform import Platform from utils.subprocess_output import subprocess log = logging.getLogger('jmxfetch') JAVA_LOGGING_LEVEL = { logging.CRITICAL: "FATAL", logging.DEBUG: "DEBUG", logging.ERROR: "ERROR", logging.FATAL: "FATAL", logging.INFO: "INFO", logging.WARN: "WARN", logging.WARNING: "WARN", } _JVM_DEFAULT_MAX_MEMORY_ALLOCATION = " -Xmx200m" _JVM_DEFAULT_INITIAL_MEMORY_ALLOCATION = " -Xms50m" JMXFETCH_MAIN_CLASS = "org.datadog.jmxfetch.App" JMX_CHECKS = [ 'activemq', 'activemq_58', 'cassandra', 'jmx', 'solr', 'tomcat', ] JMX_COLLECT_COMMAND = 'collect' JMX_LIST_COMMANDS = { 'list_everything': 'List every attributes available that has a type supported by JMXFetch', 'list_collected_attributes': 'List attributes that will actually be collected by your current instances configuration', 'list_matching_attributes': 'List attributes that match at least one of your instances configuration', 'list_not_matching_attributes': "List attributes that don't match any of your instances configuration", 'list_limited_attributes': "List attributes that do match one of your instances configuration but that are not being collected because it would exceed the number of metrics that can be collected", JMX_COLLECT_COMMAND: "Start the collection of metrics based on your current configuration and display them in the console"} LINK_TO_DOC = "See http://docs.datadoghq.com/integrations/java/ for more information" class InvalidJMXConfiguration(Exception): pass class JMXFetch(object): """ Start JMXFetch if any JMX check is configured """ def __init__(self, confd_path, agentConfig): self.confd_path = confd_path self.agentConfig = agentConfig self.logging_config = get_logging_config() self.check_frequency = DEFAULT_CHECK_FREQUENCY self.jmx_process = None self.jmx_checks = None def terminate(self): self.jmx_process.terminate() def _handle_sigterm(self, signum, frame): # Terminate jmx process on SIGTERM signal log.debug("Caught sigterm. Stopping subprocess.") self.jmx_process.terminate() def register_signal_handlers(self): """ Enable SIGTERM and SIGINT handlers """ try: # Gracefully exit on sigterm signal.signal(signal.SIGTERM, self._handle_sigterm) # Handle Keyboard Interrupt signal.signal(signal.SIGINT, self._handle_sigterm) except ValueError: log.exception("Unable to register signal handlers.") def configure(self, checks_list=None, clean_status_file=True): """ Instantiate JMXFetch parameters, clean potential previous run leftovers. """ if clean_status_file: JMXFiles.clean_status_file() self.jmx_checks, self.invalid_checks, self.java_bin_path, self.java_options, \ self.tools_jar_path, self.custom_jar_paths = \ self.get_configuration(self.confd_path, checks_list=checks_list) def should_run(self): """ Should JMXFetch run ? """ return self.jmx_checks is not None and self.jmx_checks != [] def run(self, command=None, checks_list=None, reporter=None, redirect_std_streams=False): """ Run JMXFetch redirect_std_streams: if left to False, the stdout and stderr of JMXFetch are streamed directly to the environment's stdout and stderr and cannot be retrieved via python's sys.stdout and sys.stderr. Set to True to redirect these streams to python's sys.stdout and sys.stderr. """ if checks_list or self.jmx_checks is None: # (Re)set/(re)configure JMXFetch parameters when `checks_list` is specified or # no configuration was found self.configure(checks_list) try: command = command or JMX_COLLECT_COMMAND if len(self.invalid_checks) > 0: try: JMXFiles.write_status_file(self.invalid_checks) except Exception: log.exception("Error while writing JMX status file") if len(self.jmx_checks) > 0: return self._start(self.java_bin_path, self.java_options, self.jmx_checks, command, reporter, self.tools_jar_path, self.custom_jar_paths, redirect_std_streams) else: # We're exiting purposefully, so exit with zero (supervisor's expected # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly # and thus can exit cleanly. time.sleep(4) log.info("No valid JMX integration was found. Exiting ...") except Exception: log.exception("Error while initiating JMXFetch") raise @classmethod def get_configuration(cls, confd_path, checks_list=None): """ Return a tuple (jmx_checks, invalid_checks, java_bin_path, java_options, tools_jar_path) jmx_checks: list of yaml files that are jmx checks (they have the is_jmx flag enabled or they are in JMX_CHECKS) and that have at least one instance configured invalid_checks: dictionary whose keys are check names that are JMX checks but they have a bad configuration. Values of the dictionary are exceptions generated when checking the configuration java_bin_path: is the path to the java executable. It was previously set in the "instance" part of the yaml file of the jmx check. So we need to parse yaml files to get it. We assume that this value is alwayws the same for every jmx check so we can return the first value returned java_options: is string contains options that will be passed to java_bin_path We assume that this value is alwayws the same for every jmx check so we can return the first value returned tools_jar_path: Path to tools.jar, which is only part of the JDK and that is required to connect to a local JMX instance using the attach api. """ jmx_checks = [] java_bin_path = None java_options = None tools_jar_path = None custom_jar_paths = [] invalid_checks = {} for conf in glob.glob(os.path.join(confd_path, '*.yaml')): filename = os.path.basename(conf) check_name = filename.split('.')[0] if os.path.exists(conf): f = open(conf) try: check_config = yaml.load(f.read(), Loader=yLoader) assert check_config is not None f.close() except Exception: f.close() log.error("Unable to parse yaml config in %s" % conf) continue try: is_jmx, check_java_bin_path, check_java_options, check_tools_jar_path, check_custom_jar_paths = \ cls._is_jmx_check(check_config, check_name, checks_list) if is_jmx: jmx_checks.append(filename) if java_bin_path is None and check_java_bin_path is not None: java_bin_path = check_java_bin_path if java_options is None and check_java_options is not None: java_options = check_java_options if tools_jar_path is None and check_tools_jar_path is not None: tools_jar_path = check_tools_jar_path if check_custom_jar_paths: custom_jar_paths.extend(check_custom_jar_paths) except InvalidJMXConfiguration, e: log.error("%s check does not have a valid JMX configuration: %s" % (check_name, e)) # Make sure check_name is a string - Fix issues with Windows check_name = check_name.encode('ascii', 'ignore') invalid_checks[check_name] = str(e) return (jmx_checks, invalid_checks, java_bin_path, java_options, tools_jar_path, custom_jar_paths) def _start(self, path_to_java, java_run_opts, jmx_checks, command, reporter, tools_jar_path, custom_jar_paths, redirect_std_streams): statsd_port = self.agentConfig.get('dogstatsd_port', "8125") if reporter is None: reporter = "statsd:%s" % str(statsd_port) log.info("Starting jmxfetch:") try: path_to_java = path_to_java or "java" java_run_opts = java_run_opts or "" path_to_jmxfetch = self._get_path_to_jmxfetch() path_to_status_file = JMXFiles.get_status_file_path() classpath = path_to_jmxfetch if tools_jar_path is not None: classpath = r"%s:%s" % (tools_jar_path, classpath) if custom_jar_paths: classpath = r"%s:%s" % (':'.join(custom_jar_paths), classpath) subprocess_args = [ path_to_java, # Path to the java bin '-classpath', classpath, JMXFETCH_MAIN_CLASS, '--check_period', str(self.check_frequency * 1000), # Period of the main loop of jmxfetch in ms '--conf_directory', r"%s" % self.confd_path, # Path of the conf.d directory that will be read by jmxfetch, '--log_level', JAVA_LOGGING_LEVEL.get(self.logging_config.get("log_level"), "INFO"), # Log Level: Mapping from Python log level to log4j log levels '--log_location', r"%s" % self.logging_config.get('jmxfetch_log_file'), # Path of the log file '--reporter', reporter, # Reporter to use '--status_location', r"%s" % path_to_status_file, # Path to the status file to write command, # Name of the command ] if Platform.is_windows(): # Signal handlers are not supported on Windows: # use a file to trigger JMXFetch exit instead path_to_exit_file = JMXFiles.get_python_exit_file_path() subprocess_args.insert(len(subprocess_args) - 1, '--exit_file_location') subprocess_args.insert(len(subprocess_args) - 1, path_to_exit_file) subprocess_args.insert(4, '--check') for check in jmx_checks: subprocess_args.insert(5, check) # Specify a maximum memory allocation pool for the JVM if "Xmx" not in java_run_opts and "XX:MaxHeapSize" not in java_run_opts: java_run_opts += _JVM_DEFAULT_MAX_MEMORY_ALLOCATION # Specify the initial memory allocation pool for the JVM if "Xms" not in java_run_opts and "XX:InitialHeapSize" not in java_run_opts: java_run_opts += _JVM_DEFAULT_INITIAL_MEMORY_ALLOCATION for opt in java_run_opts.split(): subprocess_args.insert(1, opt) log.info("Running %s" % " ".join(subprocess_args)) # Launch JMXfetch subprocess jmx_process = subprocess.Popen( subprocess_args, close_fds=not redirect_std_streams, # set to True instead of False when the streams are redirected for WIN compatibility stdout=subprocess.PIPE if redirect_std_streams else None, stderr=subprocess.PIPE if redirect_std_streams else None ) self.jmx_process = jmx_process # Register SIGINT and SIGTERM signal handlers self.register_signal_handlers() if redirect_std_streams: # Wait for JMXFetch to return, and write out the stdout and stderr of JMXFetch to sys.stdout and sys.stderr out, err = jmx_process.communicate() sys.stdout.write(out) sys.stderr.write(err) else: # Wait for JMXFetch to return jmx_process.wait() return jmx_process.returncode except OSError: java_path_msg = "Couldn't launch JMXTerm. Is Java in your PATH ?" log.exception(java_path_msg) invalid_checks = {} for check in jmx_checks: check_name = check.split('.')[0] check_name = check_name.encode('ascii', 'ignore') invalid_checks[check_name] = java_path_msg JMXFiles.write_status_file(invalid_checks) raise except Exception: log.exception("Couldn't launch JMXFetch") raise @staticmethod def _is_jmx_check(check_config, check_name, checks_list): init_config = check_config.get('init_config', {}) or {} java_bin_path = None java_options = None is_jmx = False is_attach_api = False tools_jar_path = init_config.get("tools_jar_path") custom_jar_paths = init_config.get("custom_jar_paths") if init_config is None: init_config = {} if checks_list: if check_name in checks_list: is_jmx = True elif init_config.get('is_jmx') or check_name in JMX_CHECKS: is_jmx = True if is_jmx: instances = check_config.get('instances', []) if type(instances) != list or len(instances) == 0: raise InvalidJMXConfiguration("You need to have at least one instance " "defined in the YAML file for this check") for inst in instances: if type(inst) != dict: raise InvalidJMXConfiguration("Each instance should be" " a dictionary. %s" % LINK_TO_DOC) host = inst.get('host', None) port = inst.get('port', None) conf = inst.get('conf', init_config.get('conf', None)) tools_jar_path = inst.get('tools_jar_path') # Support for attach api using a process name regex proc_regex = inst.get('process_name_regex') # Support for a custom jmx URL jmx_url = inst.get('jmx_url') name = inst.get('name') if proc_regex is not None: is_attach_api = True elif jmx_url is not None: if name is None: raise InvalidJMXConfiguration("A name must be specified when using a jmx_url") else: if host is None: raise InvalidJMXConfiguration("A host must be specified") if port is None or type(port) != int: raise InvalidJMXConfiguration("A numeric port must be specified") if conf is None: log.warning("%s doesn't have a 'conf' section. Only basic JVM metrics" " will be collected. %s" % (inst, LINK_TO_DOC)) else: if type(conf) != list or len(conf) == 0: raise InvalidJMXConfiguration("'conf' section should be a list" " of configurations %s" % LINK_TO_DOC) for config in conf: include = config.get('include', None) if include is None: raise InvalidJMXConfiguration("Each configuration must have an" " 'include' section. %s" % LINK_TO_DOC) if type(include) != dict: raise InvalidJMXConfiguration("'include' section must" " be a dictionary %s" % LINK_TO_DOC) if java_bin_path is None: if init_config and init_config.get('java_bin_path'): # We get the java bin path from the yaml file # for backward compatibility purposes java_bin_path = init_config.get('java_bin_path') else: for instance in instances: if instance and instance.get('java_bin_path'): java_bin_path = instance.get('java_bin_path') if java_options is None: if init_config and init_config.get('java_options'): java_options = init_config.get('java_options') else: for instance in instances: if instance and instance.get('java_options'): java_options = instance.get('java_options') if is_attach_api: if tools_jar_path is None: for instance in instances: if instance and instance.get("tools_jar_path"): tools_jar_path = instance.get("tools_jar_path") if tools_jar_path is None: raise InvalidJMXConfiguration("You must specify the path to tools.jar" " in your JDK.") elif not os.path.isfile(tools_jar_path): raise InvalidJMXConfiguration("Unable to find tools.jar at %s" % tools_jar_path) else: tools_jar_path = None if custom_jar_paths: if isinstance(custom_jar_paths, basestring): custom_jar_paths = [custom_jar_paths] for custom_jar_path in custom_jar_paths: if not os.path.isfile(custom_jar_path): raise InvalidJMXConfiguration("Unable to find custom jar at %s" % custom_jar_path) return is_jmx, java_bin_path, java_options, tools_jar_path, custom_jar_paths def _get_path_to_jmxfetch(self): if not Platform.is_windows(): return os.path.realpath(os.path.join(os.path.abspath(__file__), "..", "checks", "libs", JMX_FETCH_JAR_NAME)) return os.path.realpath(os.path.join(os.path.abspath(__file__), "..", "..", "jmxfetch", JMX_FETCH_JAR_NAME)) def init(config_path=None): agentConfig = get_config(parse_args=False, cfg_path=config_path) try: confd_path = get_confd_path() except PathNotFound, e: log.error("No conf.d folder found at '%s' or in the directory where" "the Agent is currently deployed.\n" % e.args[0]) return confd_path, agentConfig def main(config_path=None): """ JMXFetch main entry point """ confd_path, agentConfig = init(config_path) jmx = JMXFetch(confd_path, agentConfig) return jmx.run() if __name__ == '__main__': sys.exit(main())
unknown
codeparrot/codeparrot-clean
import __future__ class Node(object): def __init__(self, data = None, next_node = None, prev_node = None): self.data = data self.next_node = next_node self.prev_node = prev_node def print_node(self): if (self.data): print("self", self.data, ", next:", self.next_node.data, ", prev:", self.prev_node.data) else: print("Empty Node") class linked_list(object): def __init__(self, head = None, tail = None): self.head = head self.tail = tail def insertleft(self, data): new_head = Node(data, self.head) if self.head: self.head.prev_node = new_head else: self.tail = new_head self.head = new_head def popleft(self): node = self.head if node: nxt = node.next_node # Additional list remains. Next node is now the new head. if nxt: nxt.prev_node = None self.head = nxt # empty list. Reset tail and head to None else: self.head = None self.tail = None return node.data else: return None def search(self, data): current = self.head found = False while current and not found: if current.data == data: found = True else: current = current.next_node if current: return current else: return None def print_node(self, data): node = self.search(data) if node: node.print_node() else: print ("Node does not Exist") def remove(self, data): node = self.search(data) # Node not found. if not node: return prev = node.prev_node nxt = node.next_node # Node is a middle link with a previous and next if prev and nxt: prev.next_node = nxt nxt.prev_node = prev # Node is tail node. No next node. elif prev and not nxt: prev.next_node = None self.tail = node.prev_node # Node is head node. No previous node. elif nxt and not prev: nxt.prev_node = None self.head = node.next_node # Node is both head and tail, leaving empty list. else: self.head = None self.tail = None return def print_list(self): current = self.head while current: print(current.data) current = current.next_node if __name__ == "__main__": l = linked_list() for i in xrange(10): l.insertleft(i) l.remove(4) for i in xrange(9): print(l.popleft())
unknown
codeparrot/codeparrot-clean
// LANGUAGE: +ContextParameters // MODULE: context // FILE: contextParameter.kt class Ctx1 { fun foo() = 10 } class Ctx2 { fun boo(x: Int) = 10 + x } context(ctx2: Ctx2) fun bar(x: Int) = ctx2.boo(x) context(ctx1: Ctx1, ctx2: Ctx2) fun check(x: Int) { <caret_context>ctx1.foo() } fun main() { context(Ctx1(), Ctx2()) { check(10) } } // MODULE: main // MODULE_KIND: CodeFragment // CONTEXT_MODULE: context // FILE: fragment.kt // CODE_FRAGMENT_KIND: EXPRESSION bar(x)
kotlin
github
https://github.com/JetBrains/kotlin
analysis/analysis-api/testData/components/compilerFacility/compilation/codeFragments/capturing/contextParameter.kt
from django.template.base import TemplateSyntaxError from django.template.loader import get_template from django.test import SimpleTestCase from django.utils import six from .utils import render, setup class WidthRatioTagTests(SimpleTestCase): @setup({'widthratio01': '{% widthratio a b 0 %}'}) def test_widthratio01(self): output = render('widthratio01', {'a': 50, 'b': 100}) self.assertEqual(output, '0') @setup({'widthratio02': '{% widthratio a b 100 %}'}) def test_widthratio02(self): output = render('widthratio02', {'a': 0, 'b': 0}) self.assertEqual(output, '0') @setup({'widthratio03': '{% widthratio a b 100 %}'}) def test_widthratio03(self): output = render('widthratio03', {'a': 0, 'b': 100}) self.assertEqual(output, '0') @setup({'widthratio04': '{% widthratio a b 100 %}'}) def test_widthratio04(self): output = render('widthratio04', {'a': 50, 'b': 100}) self.assertEqual(output, '50') @setup({'widthratio05': '{% widthratio a b 100 %}'}) def test_widthratio05(self): output = render('widthratio05', {'a': 100, 'b': 100}) self.assertEqual(output, '100') @setup({'widthratio06': '{% widthratio a b 100 %}'}) def test_widthratio06(self): """ 62.5 should round to 63 on Python 2 and 62 on Python 3 See http://docs.python.org/py3k/whatsnew/3.0.html """ output = render('widthratio06', {'a': 50, 'b': 80}) self.assertEqual(output, '62' if six.PY3 else '63') @setup({'widthratio07': '{% widthratio a b 100 %}'}) def test_widthratio07(self): """ 71.4 should round to 71 """ output = render('widthratio07', {'a': 50, 'b': 70}) self.assertEqual(output, '71') # Raise exception if we don't have 3 args, last one an integer @setup({'widthratio08': '{% widthratio %}'}) def test_widthratio08(self): with self.assertRaises(TemplateSyntaxError): get_template('widthratio08') @setup({'widthratio09': '{% widthratio a b %}'}) def test_widthratio09(self): with self.assertRaises(TemplateSyntaxError): render('widthratio09', {'a': 50, 'b': 100}) @setup({'widthratio10': '{% widthratio a b 100.0 %}'}) def test_widthratio10(self): output = render('widthratio10', {'a': 50, 'b': 100}) self.assertEqual(output, '50') @setup({'widthratio11': '{% widthratio a b c %}'}) def test_widthratio11(self): """ #10043: widthratio should allow max_width to be a variable """ output = render('widthratio11', {'a': 50, 'c': 100, 'b': 100}) self.assertEqual(output, '50') # #18739: widthratio should handle None args consistently with # non-numerics @setup({'widthratio12a': '{% widthratio a b c %}'}) def test_widthratio12a(self): output = render('widthratio12a', {'a': 'a', 'c': 100, 'b': 100}) self.assertEqual(output, '') @setup({'widthratio12b': '{% widthratio a b c %}'}) def test_widthratio12b(self): output = render('widthratio12b', {'a': None, 'c': 100, 'b': 100}) self.assertEqual(output, '') @setup({'widthratio13a': '{% widthratio a b c %}'}) def test_widthratio13a(self): output = render('widthratio13a', {'a': 0, 'c': 100, 'b': 'b'}) self.assertEqual(output, '') @setup({'widthratio13b': '{% widthratio a b c %}'}) def test_widthratio13b(self): output = render('widthratio13b', {'a': 0, 'c': 100, 'b': None}) self.assertEqual(output, '') @setup({'widthratio14a': '{% widthratio a b c %}'}) def test_widthratio14a(self): with self.assertRaises(TemplateSyntaxError): render('widthratio14a', {'a': 0, 'c': 'c', 'b': 100}) @setup({'widthratio14b': '{% widthratio a b c %}'}) def test_widthratio14b(self): with self.assertRaises(TemplateSyntaxError): render('widthratio14b', {'a': 0, 'c': None, 'b': 100}) @setup({'widthratio15': '{% load custom %}{% widthratio a|noop:"x y" b 0 %}'}) def test_widthratio15(self): """ Test whitespace in filter argument """ output = render('widthratio15', {'a': 50, 'b': 100}) self.assertEqual(output, '0') # Widthratio with variable assignment @setup({'widthratio16': '{% widthratio a b 100 as variable %}-{{ variable }}-'}) def test_widthratio16(self): output = render('widthratio16', {'a': 50, 'b': 100}) self.assertEqual(output, '-50-') @setup({'widthratio17': '{% widthratio a b 100 as variable %}-{{ variable }}-'}) def test_widthratio17(self): output = render('widthratio17', {'a': 100, 'b': 100}) self.assertEqual(output, '-100-') @setup({'widthratio18': '{% widthratio a b 100 as %}'}) def test_widthratio18(self): with self.assertRaises(TemplateSyntaxError): get_template('widthratio18') @setup({'widthratio19': '{% widthratio a b 100 not_as variable %}'}) def test_widthratio19(self): with self.assertRaises(TemplateSyntaxError): get_template('widthratio19') @setup({'widthratio20': '{% widthratio a b 100 %}'}) def test_widthratio20(self): output = render('widthratio20', {'a': float('inf'), 'b': float('inf')}) self.assertEqual(output, '') @setup({'widthratio21': '{% widthratio a b 100 %}'}) def test_widthratio21(self): output = render('widthratio21', {'a': float('inf'), 'b': 2}) self.assertEqual(output, '')
unknown
codeparrot/codeparrot-clean
/* contrib/pageinspect/pageinspect--1.2--1.3.sql */ -- complain if script is sourced in psql, rather than via ALTER EXTENSION \echo Use "ALTER EXTENSION pageinspect UPDATE TO '1.3'" to load this file. \quit -- -- brin_page_type() -- CREATE FUNCTION brin_page_type(IN page bytea) RETURNS text AS 'MODULE_PATHNAME', 'brin_page_type' LANGUAGE C STRICT; -- -- brin_metapage_info() -- CREATE FUNCTION brin_metapage_info(IN page bytea, OUT magic text, OUT version integer, OUT pagesperrange integer, OUT lastrevmappage bigint) AS 'MODULE_PATHNAME', 'brin_metapage_info' LANGUAGE C STRICT; -- -- brin_revmap_data() -- CREATE FUNCTION brin_revmap_data(IN page bytea, OUT pages tid) RETURNS SETOF tid AS 'MODULE_PATHNAME', 'brin_revmap_data' LANGUAGE C STRICT; -- -- brin_page_items() -- CREATE FUNCTION brin_page_items(IN page bytea, IN index_oid regclass, OUT itemoffset int, OUT blknum int, OUT attnum int, OUT allnulls bool, OUT hasnulls bool, OUT placeholder bool, OUT value text) RETURNS SETOF record AS 'MODULE_PATHNAME', 'brin_page_items' LANGUAGE C STRICT; -- -- gin_metapage_info() -- CREATE FUNCTION gin_metapage_info(IN page bytea, OUT pending_head bigint, OUT pending_tail bigint, OUT tail_free_size int4, OUT n_pending_pages bigint, OUT n_pending_tuples bigint, OUT n_total_pages bigint, OUT n_entry_pages bigint, OUT n_data_pages bigint, OUT n_entries bigint, OUT version int4) AS 'MODULE_PATHNAME', 'gin_metapage_info' LANGUAGE C STRICT; -- -- gin_page_opaque_info() -- CREATE FUNCTION gin_page_opaque_info(IN page bytea, OUT rightlink bigint, OUT maxoff int4, OUT flags text[]) AS 'MODULE_PATHNAME', 'gin_page_opaque_info' LANGUAGE C STRICT; -- -- gin_leafpage_items() -- CREATE FUNCTION gin_leafpage_items(IN page bytea, OUT first_tid tid, OUT nbytes int2, OUT tids tid[]) RETURNS SETOF record AS 'MODULE_PATHNAME', 'gin_leafpage_items' LANGUAGE C STRICT;
sql
github
https://github.com/postgres/postgres
contrib/pageinspect/pageinspect--1.2--1.3.sql
# sympy/galgebra/vector.py """ vector.py is a helper class for the MV class that defines the basis vectors and metric and calulates derivatives of the basis vectors for the MV class. """ import itertools import copy from sympy import Symbol, S, Matrix, trigsimp, diff, expand from sympy.core.compatibility import range from sympy.galgebra.printing import GA_Printer from sympy.galgebra.stringarrays import str_array from sympy.galgebra.ncutil import linear_derivation, bilinear_product from sympy.galgebra.debug import oprint def flatten(lst): return list(itertools.chain(*lst)) def TrigSimp(x): return trigsimp(x, recursive=True) class Vector(object): """ Vector class. Setup is done by defining a set of basis vectors in static function 'Bases'. The linear combination of scalar (commutative) sympy quatities and the basis vectors form the vector space. If the number of basis vectors is 'n' the metric tensor is formed as an n by n sympy matrix of scalar symbols and represents the dot products of pairs of basis vectors. """ is_orthogonal = False @staticmethod def setup(base, n=None, metric=None, coords=None, curv=(None, None), debug=False): """ Generate basis of vector space as tuple of vectors and associated metric tensor as Matrix. See str_array(base,n) for usage of base and n and str_array(metric) for usage of metric. To overide elements in the default metric use the character '#' in the metric string. For example if one wishes the diagonal elements of the metric tensor to be zero enter metric = '0 #,# 0'. If the basis vectors are e1 and e2 then the default metric - Vector.metric = ((dot(e1,e1),dot(e1,e2)),dot(e2,e1),dot(e2,e2)) becomes - Vector.metric = ((0,dot(e1,e2)),(dot(e2,e1),0)). The function dot returns a Symbol and is symmetric. The functions 'Bases' calculates the global quantities: - Vector.basis tuple of basis vectors Vector.base_to_index dictionary to convert base to base inded Vector.metric metric tensor represented as a matrix of symbols and numbers """ Vector.is_orthogonal = False Vector.coords = coords Vector.subscripts = [] base_name_lst = base.split(' ') # Define basis vectors if '*' in base: base_lst = base.split('*') base = base_lst[0] Vector.subscripts = base_lst[1].split('|') base_name_lst = [] for subscript in Vector.subscripts: base_name_lst.append(base + '_' + subscript) else: if len(base_name_lst) > 1: Vector.subscripts = [] for base_name in base_name_lst: tmp = base_name.split('_') Vector.subscripts.append(tmp[-1]) elif len(base_name_lst) == 1 and Vector.coords is not None: base_name_lst = [] for coord in Vector.coords: Vector.subscripts.append(str(coord)) base_name_lst.append(base + '_' + str(coord)) else: raise TypeError("'%s' does not define basis vectors" % base) basis = [] base_to_index = {} index = 0 for base_name in base_name_lst: basis_vec = Vector(base_name) basis.append(basis_vec) base_to_index[basis_vec.obj] = index index += 1 Vector.base_to_index = base_to_index Vector.basis = tuple(basis) # define metric tensor default_metric = [] for bv1 in Vector.basis: row = [] for bv2 in Vector.basis: row.append(Vector.basic_dot(bv1, bv2)) default_metric.append(row) Vector.metric = Matrix(default_metric) if metric is not None: if metric[0] == '[' and metric[-1] == ']': Vector.is_orthogonal = True metric_str_lst = metric[1:-1].split(',') Vector.metric = [] for g_ii in metric_str_lst: Vector.metric.append(S(g_ii)) Vector.metric = Matrix(Vector.metric) else: metric_str_lst = flatten(str_array(metric)) for index in range(len(metric_str_lst)): if metric_str_lst[index] != '#': Vector.metric[index] = S(metric_str_lst[index]) Vector.metric_dict = {} # Used to calculate dot product N = range(len(Vector.basis)) if Vector.is_orthogonal: for ii in N: Vector.metric_dict[Vector.basis[ii].obj] = Vector.metric[ii] else: for irow in N: for icol in N: Vector.metric_dict[(Vector.basis[irow].obj, Vector.basis[icol].obj)] = Vector.metric[irow, icol] # calculate tangent vectors and metric for curvilinear basis if curv != (None, None): X = S.Zero for (coef, base) in zip(curv[0], Vector.basis): X += coef * base.obj Vector.tangents = [] for (coord, norm) in zip(Vector.coords, curv[1]): tau = diff(X, coord) tau = trigsimp(tau) tau /= norm tau = expand(tau) Vtau = Vector() Vtau.obj = tau Vector.tangents.append(Vtau) metric = [] for tv1 in Vector.tangents: row = [] for tv2 in Vector.tangents: row.append(tv1 * tv2) metric.append(row) metric = Matrix(metric) metric = metric.applyfunc(TrigSimp) Vector.metric_dict = {} if metric.is_diagonal: Vector.is_orthogonal = True tmp_metric = [] for ii in N: tmp_metric.append(metric[ii, ii]) Vector.metric_dict[Vector.basis[ii].obj] = metric[ii, ii] Vector.metric = Matrix(tmp_metric) else: Vector.is_orthogonal = False Vector.metric = metric for irow in N: for icol in N: Vector.metric_dict[(Vector.basis[irow].obj, Vector.basis[icol].obj)] = Vector.metric[irow, icol] Vector.norm = curv[1] if debug: oprint('Tangent Vectors', Vector.tangents, 'Metric', Vector.metric, 'Metric Dictionary', Vector.metric_dict, 'Normalization', Vector.norm, dict_mode=True) # calculate derivatives of tangent vectors Vector.dtau_dict = None dtau_dict = {} for x in Vector.coords: for (tau, base) in zip(Vector.tangents, Vector.basis): dtau = tau.diff(x).applyfunc(TrigSimp) result = S.Zero for (t, b) in zip(Vector.tangents, Vector.basis): t_dtau = TrigSimp(t * dtau) result += t_dtau * b.obj dtau_dict[(base.obj, x)] = result Vector.dtau_dict = dtau_dict if debug: oprint('Basis Derivatives', Vector.dtau_dict, dict_mode=True) return tuple(Vector.basis) def __init__(self, basis_str=None): if isinstance(basis_str, Vector): self.obj = basis_str else: if basis_str is None or basis_str == '0': self.obj = S(0) else: self.obj = Symbol(basis_str, commutative=False) """ def diff(self, x): (coefs, bases) = linear_expand(self.obj) result = S.Zero for (coef, base) in zip(coefs, bases): result += diff(coef, x) * base return result """ def diff(self, x): Dself = Vector() if isinstance(Vector.dtau_dict, dict): Dself.obj = linear_derivation(self.obj, Vector.Diff, x) else: Dself.obj = diff(self.obj, x) return Dself @staticmethod def basic_dot(v1, v2): """ Dot product of two basis vectors returns a Symbol """ i1 = list(Vector.basis).index(v1) # Python 2.5 i2 = list(Vector.basis).index(v2) # Python 2.5 if i1 < i2: dot_str = '(' + str(Vector.basis[i1]) + '.' + str(Vector.basis[i2]) + ')' else: dot_str = '(' + str(Vector.basis[i2]) + '.' + str(Vector.basis[i1]) + ')' return Symbol(dot_str) @staticmethod def dot(b1, b2): if Vector.is_orthogonal: if b1 != b2: return S.Zero else: return Vector.metric_dict[b1] else: return Vector.metric_dict[(b1, b2)] @staticmethod def Diff(b, x): return Vector.dtau_dict[(b, x)] ######################## Operator Definitions####################### def __str__(self): return GA_Printer().doprint(self) def __mul__(self, v): if not isinstance(v, Vector): self_x_v = Vector() self_x_v.obj = self.obj * v return self_x_v else: result = expand(self.obj * v.obj) result = bilinear_product(result, Vector.dot) return result def __rmul__(self, s): s_x_self = Vector() s_x_self.obj = s * self.obj return s_x_self def __add__(self, v): self_p_v = Vector() self_p_v.obj = self.obj + v.obj return self_p_v def __add_ab__(self, v): self.obj += v.obj return def __sub__(self, v): self_m_v = Vector() self_m_v.obj = self.obj - v.obj return self_m_v def __sub_ab__(self, v): self.obj -= v.obj return def __pos__(self): return self def __neg__(self): n_self = copy.deepcopy(self) n_self.obj = -self.obj return n_self def applyfunc(self, fct): fct_self = Vector() fct_self.obj = fct(self.obj) return fct_self
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # # script.py # colorific # import sys import optparse from colorific import config from colorific.palette import ( extract_colors, print_colors, save_palette_as_image, color_stream_mt, color_stream_st) class Application(object): def __init__(self): self.parser = self.create_option_parser() def create_option_parser(self): usage = '\n'.join([ "%prog [options]", "", "Reads a stream of image filenames from stdin, and outputs a ", "single line for each containing hex color values."]) parser = optparse.OptionParser(usage) parser.add_option( '-p', '--parallel', action='store', dest='n_processes', type='int', default=config.N_PROCESSES) parser.add_option( '--min-saturation', action='store', dest='min_saturation', default=config.MIN_SATURATION, type='float', help="Only keep colors which meet this saturation " "[%.02f]" % config.MIN_SATURATION) parser.add_option( '--max-colors', action='store', dest='max_colors', type='int', default=config.MAX_COLORS, help="The maximum number of colors to output per palette " "[%d]" % config.MAX_COLORS) parser.add_option( '--min-distance', action='store', dest='min_distance', type='float', default=config.MIN_DISTANCE, help="The minimum distance colors must have to stay separate " "[%.02f]" % config.MIN_DISTANCE) parser.add_option( '--min-prominence', action='store', dest='min_prominence', type='float', default=config.MIN_PROMINENCE, help="The minimum proportion of pixels needed to keep a color " "[%.02f]" % config.MIN_PROMINENCE) parser.add_option( '--n-quantized', action='store', dest='n_quantized', type='int', default=config.N_QUANTIZED, help="Speed up by reducing the number in the quantizing step " "[%d]" % config.N_QUANTIZED) parser.add_option( '-o', action='store_true', dest='save_palette', default=False, help="Output the palette as an image file") return parser def run(self): argv = sys.argv[1:] (options, args) = self.parser.parse_args(argv) if args: # image filenames were provided as arguments for filename in args: try: palette = extract_colors( filename, min_saturation=options.min_saturation, min_prominence=options.min_prominence, min_distance=options.min_distance, max_colors=options.max_colors, n_quantized=options.n_quantized) except Exception as e: # TODO: it's too broad exception. print >> sys.stderr, filename, e continue print_colors(filename, palette) if options.save_palette: save_palette_as_image(filename, palette) sys.exit(1) if options.n_processes > 1: # XXX add all the knobs we can tune color_stream_mt(n=options.n_processes) else: color_stream_st( min_saturation=options.min_saturation, min_prominence=options.min_prominence, min_distance=options.min_distance, max_colors=options.max_colors, n_quantized=options.n_quantized, save_palette=options.save_palette) def main(): application = Application() application.run() if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
// Copyright 2024 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package kubernetes import ( "context" "fmt" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/mvccpb" clientv3 "go.etcd.io/etcd/client/v3" ) // New creates Client from config. // Caller is responsible to call Close() to clean up client. func New(cfg clientv3.Config) (*Client, error) { c, err := clientv3.New(cfg) if err != nil { return nil, err } kc := &Client{ Client: c, } kc.Kubernetes = kc return kc, nil } type Client struct { *clientv3.Client Kubernetes Interface } var _ Interface = (*Client)(nil) func (k Client) Get(ctx context.Context, key string, opts GetOptions) (resp GetResponse, err error) { rangeResp, err := k.KV.Get(ctx, key, clientv3.WithRev(opts.Revision), clientv3.WithLimit(1)) if err != nil { return resp, err } resp.Revision = rangeResp.Header.Revision if len(rangeResp.Kvs) == 1 { resp.KV = rangeResp.Kvs[0] } return resp, nil } func (k Client) List(ctx context.Context, prefix string, opts ListOptions) (resp ListResponse, err error) { rangeStart := prefix if opts.Continue != "" { rangeStart = opts.Continue } rangeEnd := clientv3.GetPrefixRangeEnd(prefix) rangeResp, err := k.KV.Get(ctx, rangeStart, clientv3.WithRange(rangeEnd), clientv3.WithLimit(opts.Limit), clientv3.WithRev(opts.Revision)) if err != nil { return resp, err } resp.Kvs = rangeResp.Kvs resp.Count = rangeResp.Count resp.Revision = rangeResp.Header.Revision return resp, nil } func (k Client) Count(ctx context.Context, prefix string, _ CountOptions) (int64, error) { resp, err := k.KV.Get(ctx, prefix, clientv3.WithPrefix(), clientv3.WithCountOnly()) if err != nil { return 0, err } return resp.Count, nil } func (k Client) OptimisticPut(ctx context.Context, key string, value []byte, expectedRevision int64, opts PutOptions) (resp PutResponse, err error) { txn := k.KV.Txn(ctx).If( clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision), ).Then( clientv3.OpPut(key, string(value), clientv3.WithLease(opts.LeaseID)), ) if opts.GetOnFailure { txn = txn.Else(clientv3.OpGet(key)) } txnResp, err := txn.Commit() if err != nil { return resp, err } resp.Succeeded = txnResp.Succeeded resp.Revision = txnResp.Header.Revision if opts.GetOnFailure && !txnResp.Succeeded { if len(txnResp.Responses) == 0 { return resp, fmt.Errorf("invalid OptimisticPut response: %v", txnResp.Responses) } resp.KV = kvFromTxnResponse(txnResp.Responses[0]) } return resp, nil } func (k Client) OptimisticDelete(ctx context.Context, key string, expectedRevision int64, opts DeleteOptions) (resp DeleteResponse, err error) { txn := k.KV.Txn(ctx).If( clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision), ).Then( clientv3.OpDelete(key), ) if opts.GetOnFailure { txn = txn.Else(clientv3.OpGet(key)) } txnResp, err := txn.Commit() if err != nil { return resp, err } resp.Succeeded = txnResp.Succeeded resp.Revision = txnResp.Header.Revision if opts.GetOnFailure && !txnResp.Succeeded { resp.KV = kvFromTxnResponse(txnResp.Responses[0]) } return resp, nil } func kvFromTxnResponse(resp *pb.ResponseOp) *mvccpb.KeyValue { getResponse := resp.GetResponseRange() if len(getResponse.Kvs) == 1 { return getResponse.Kvs[0] } return nil }
go
github
https://github.com/etcd-io/etcd
client/v3/kubernetes/client.go
/* * Copyright 2002-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.beans.testfixture.beans; /** * @author Costin Leau */ public class DummyBean { private Object value; private String name; private int age; private TestBean spouse; public DummyBean(Object value) { this.value = value; } public DummyBean(String name, int age) { this.name = name; this.age = age; } public DummyBean(int ageRef, String nameRef) { this.name = nameRef; this.age = ageRef; } public DummyBean(String name, TestBean spouse) { this.name = name; this.spouse = spouse; } public DummyBean(String name, Object value, int age) { this.name = name; this.value = value; this.age = age; } public Object getValue() { return value; } public String getName() { return name; } public int getAge() { return age; } public TestBean getSpouse() { return spouse; } }
java
github
https://github.com/spring-projects/spring-framework
spring-beans/src/testFixtures/java/org/springframework/beans/testfixture/beans/DummyBean.java
# -*- coding: utf-8 -*- from ccxt.bitfinex import bitfinex import hashlib from ccxt.base.errors import ExchangeError from ccxt.base.errors import NotSupported from ccxt.base.errors import InsufficientFunds class bitfinex2 (bitfinex): def describe(self): return self.deep_extend(super(bitfinex2, self).describe(), { 'id': 'bitfinex2', 'name': 'Bitfinex v2', 'countries': 'VG', 'version': 'v2', 'hasCORS': True, # old metainfo interface 'hasFetchOrder': True, 'hasFetchTickers': True, 'hasFetchOHLCV': True, 'hasWithdraw': True, 'hasDeposit': False, 'hasFetchOpenOrders': False, 'hasFetchClosedOrders': False, # new metainfo interface 'has': { 'fetchOHLCV': True, 'fetchTickers': True, 'fetchOrder': True, 'fetchOpenOrders': False, 'fetchClosedOrders': False, 'withdraw': True, 'deposit': False, }, 'timeframes': { '1m': '1m', '5m': '5m', '15m': '15m', '30m': '30m', '1h': '1h', '3h': '3h', '6h': '6h', '12h': '12h', '1d': '1D', '1w': '7D', '2w': '14D', '1M': '1M', }, 'rateLimit': 1500, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/27766244-e328a50c-5ed2-11e7-947b-041416579bb3.jpg', 'api': 'https://api.bitfinex.com', 'www': 'https://www.bitfinex.com', 'doc': [ 'https://bitfinex.readme.io/v2/docs', 'https://github.com/bitfinexcom/bitfinex-api-node', ], 'fees': 'https://www.bitfinex.com/fees', }, 'api': { 'public': { 'get': [ 'platform/status', 'tickers', 'ticker/{symbol}', 'trades/{symbol}/hist', 'book/{symbol}/{precision}', 'book/{symbol}/P0', 'book/{symbol}/P1', 'book/{symbol}/P2', 'book/{symbol}/P3', 'book/{symbol}/R0', 'symbols_details', 'stats1/{key}:{size}:{symbol}/{side}/{section}', 'stats1/{key}:{size}:{symbol}/long/last', 'stats1/{key}:{size}:{symbol}/long/hist', 'stats1/{key}:{size}:{symbol}/short/last', 'stats1/{key}:{size}:{symbol}/short/hist', 'candles/trade:{timeframe}:{symbol}/{section}', 'candles/trade:{timeframe}:{symbol}/last', 'candles/trade:{timeframe}:{symbol}/hist', ], 'post': [ 'calc/trade/avg', ], }, 'private': { 'post': [ 'auth/r/wallets', 'auth/r/orders/{symbol}', 'auth/r/orders/{symbol}/new', 'auth/r/orders/{symbol}/hist', 'auth/r/order/{symbol}:{id}/trades', 'auth/r/trades/{symbol}/hist', 'auth/r/positions', 'auth/r/funding/offers/{symbol}', 'auth/r/funding/offers/{symbol}/hist', 'auth/r/funding/loans/{symbol}', 'auth/r/funding/loans/{symbol}/hist', 'auth/r/funding/credits/{symbol}', 'auth/r/funding/credits/{symbol}/hist', 'auth/r/funding/trades/{symbol}/hist', 'auth/r/info/margin/{key}', 'auth/r/info/funding/{key}', 'auth/r/movements/{currency}/hist', 'auth/r/stats/perf:{timeframe}/hist', 'auth/r/alerts', 'auth/w/alert/set', 'auth/w/alert/{type}:{symbol}:{price}/del', 'auth/calc/order/avail', ], }, }, 'markets': { 'AVT/BTC': {'id': 'tAVTBTC', 'symbol': 'AVT/BTC', 'base': 'AVT', 'quote': 'BTC'}, 'AVT/ETH': {'id': 'tAVTETH', 'symbol': 'AVT/ETH', 'base': 'AVT', 'quote': 'ETH'}, 'AVT/USD': {'id': 'tAVTUSD', 'symbol': 'AVT/USD', 'base': 'AVT', 'quote': 'USD'}, 'CST_BCC/BTC': {'id': 'tBCCBTC', 'symbol': 'CST_BCC/BTC', 'base': 'CST_BCC', 'quote': 'BTC'}, 'CST_BCC/USD': {'id': 'tBCCUSD', 'symbol': 'CST_BCC/USD', 'base': 'CST_BCC', 'quote': 'USD'}, 'BCH/BTC': {'id': 'tBCHBTC', 'symbol': 'BCH/BTC', 'base': 'BCH', 'quote': 'BTC'}, 'BCH/ETH': {'id': 'tBCHETH', 'symbol': 'BCH/ETH', 'base': 'BCH', 'quote': 'ETH'}, 'BCH/USD': {'id': 'tBCHUSD', 'symbol': 'BCH/USD', 'base': 'BCH', 'quote': 'USD'}, 'CST_BCU/BTC': {'id': 'tBCUBTC', 'symbol': 'CST_BCU/BTC', 'base': 'CST_BCU', 'quote': 'BTC'}, 'CST_BCU/USD': {'id': 'tBCUUSD', 'symbol': 'CST_BCU/USD', 'base': 'CST_BCU', 'quote': 'USD'}, 'BT1/BTC': {'id': 'tBT1BTC', 'symbol': 'BT1/BTC', 'base': 'BT1', 'quote': 'BTC'}, 'BT1/USD': {'id': 'tBT1USD', 'symbol': 'BT1/USD', 'base': 'BT1', 'quote': 'USD'}, 'BT2/BTC': {'id': 'tBT2BTC', 'symbol': 'BT2/BTC', 'base': 'BT2', 'quote': 'BTC'}, 'BT2/USD': {'id': 'tBT2USD', 'symbol': 'BT2/USD', 'base': 'BT2', 'quote': 'USD'}, 'BTC/USD': {'id': 'tBTCUSD', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD'}, 'BTC/EUR': {'id': 'tBTCEUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR'}, 'BTG/BTC': {'id': 'tBTGBTC', 'symbol': 'BTG/BTC', 'base': 'BTG', 'quote': 'BTC'}, 'BTG/USD': {'id': 'tBTGUSD', 'symbol': 'BTG/USD', 'base': 'BTG', 'quote': 'USD'}, 'DASH/BTC': {'id': 'tDSHBTC', 'symbol': 'DASH/BTC', 'base': 'DASH', 'quote': 'BTC'}, 'DASH/USD': {'id': 'tDSHUSD', 'symbol': 'DASH/USD', 'base': 'DASH', 'quote': 'USD'}, 'DAT/BTC': {'id': 'tDATBTC', 'symbol': 'DAT/BTC', 'base': 'DAT', 'quote': 'BTC'}, 'DAT/ETH': {'id': 'tDATETH', 'symbol': 'DAT/ETH', 'base': 'DAT', 'quote': 'ETH'}, 'DAT/USD': {'id': 'tDATUSD', 'symbol': 'DAT/USD', 'base': 'DAT', 'quote': 'USD'}, 'EDO/BTC': {'id': 'tEDOBTC', 'symbol': 'EDO/BTC', 'base': 'EDO', 'quote': 'BTC'}, 'EDO/ETH': {'id': 'tEDOETH', 'symbol': 'EDO/ETH', 'base': 'EDO', 'quote': 'ETH'}, 'EDO/USD': {'id': 'tEDOUSD', 'symbol': 'EDO/USD', 'base': 'EDO', 'quote': 'USD'}, 'EOS/BTC': {'id': 'tEOSBTC', 'symbol': 'EOS/BTC', 'base': 'EOS', 'quote': 'BTC'}, 'EOS/ETH': {'id': 'tEOSETH', 'symbol': 'EOS/ETH', 'base': 'EOS', 'quote': 'ETH'}, 'EOS/USD': {'id': 'tEOSUSD', 'symbol': 'EOS/USD', 'base': 'EOS', 'quote': 'USD'}, 'ETC/BTC': {'id': 'tETCBTC', 'symbol': 'ETC/BTC', 'base': 'ETC', 'quote': 'BTC'}, 'ETC/USD': {'id': 'tETCUSD', 'symbol': 'ETC/USD', 'base': 'ETC', 'quote': 'USD'}, 'ETH/BTC': {'id': 'tETHBTC', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC'}, 'ETH/USD': {'id': 'tETHUSD', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD'}, 'ETP/BTC': {'id': 'tETPBTC', 'symbol': 'ETP/BTC', 'base': 'ETP', 'quote': 'BTC'}, 'ETP/ETH': {'id': 'tETPETH', 'symbol': 'ETP/ETH', 'base': 'ETP', 'quote': 'ETH'}, 'ETP/USD': {'id': 'tETPUSD', 'symbol': 'ETP/USD', 'base': 'ETP', 'quote': 'USD'}, 'IOTA/BTC': {'id': 'tIOTBTC', 'symbol': 'IOTA/BTC', 'base': 'IOTA', 'quote': 'BTC'}, 'IOTA/ETH': {'id': 'tIOTETH', 'symbol': 'IOTA/ETH', 'base': 'IOTA', 'quote': 'ETH'}, 'IOTA/USD': {'id': 'tIOTUSD', 'symbol': 'IOTA/USD', 'base': 'IOTA', 'quote': 'USD'}, 'LTC/BTC': {'id': 'tLTCBTC', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC'}, 'LTC/USD': {'id': 'tLTCUSD', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD'}, 'NEO/BTC': {'id': 'tNEOBTC', 'symbol': 'NEO/BTC', 'base': 'NEO', 'quote': 'BTC'}, 'NEO/ETH': {'id': 'tNEOETH', 'symbol': 'NEO/ETH', 'base': 'NEO', 'quote': 'ETH'}, 'NEO/USD': {'id': 'tNEOUSD', 'symbol': 'NEO/USD', 'base': 'NEO', 'quote': 'USD'}, 'OMG/BTC': {'id': 'tOMGBTC', 'symbol': 'OMG/BTC', 'base': 'OMG', 'quote': 'BTC'}, 'OMG/ETH': {'id': 'tOMGETH', 'symbol': 'OMG/ETH', 'base': 'OMG', 'quote': 'ETH'}, 'OMG/USD': {'id': 'tOMGUSD', 'symbol': 'OMG/USD', 'base': 'OMG', 'quote': 'USD'}, 'QTUM/BTC': {'id': 'tQTMBTC', 'symbol': 'QTUM/BTC', 'base': 'QTUM', 'quote': 'BTC'}, 'QTUM/ETH': {'id': 'tQTMETH', 'symbol': 'QTUM/ETH', 'base': 'QTUM', 'quote': 'ETH'}, 'QTUM/USD': {'id': 'tQTMUSD', 'symbol': 'QTUM/USD', 'base': 'QTUM', 'quote': 'USD'}, 'RRT/BTC': {'id': 'tRRTBTC', 'symbol': 'RRT/BTC', 'base': 'RRT', 'quote': 'BTC'}, 'RRT/USD': {'id': 'tRRTUSD', 'symbol': 'RRT/USD', 'base': 'RRT', 'quote': 'USD'}, 'SAN/BTC': {'id': 'tSANBTC', 'symbol': 'SAN/BTC', 'base': 'SAN', 'quote': 'BTC'}, 'SAN/ETH': {'id': 'tSANETH', 'symbol': 'SAN/ETH', 'base': 'SAN', 'quote': 'ETH'}, 'SAN/USD': {'id': 'tSANUSD', 'symbol': 'SAN/USD', 'base': 'SAN', 'quote': 'USD'}, 'XMR/BTC': {'id': 'tXMRBTC', 'symbol': 'XMR/BTC', 'base': 'XMR', 'quote': 'BTC'}, 'XMR/USD': {'id': 'tXMRUSD', 'symbol': 'XMR/USD', 'base': 'XMR', 'quote': 'USD'}, 'XRP/BTC': {'id': 'tXRPBTC', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC'}, 'XRP/USD': {'id': 'tXRPUSD', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD'}, 'ZEC/BTC': {'id': 'tZECBTC', 'symbol': 'ZEC/BTC', 'base': 'ZEC', 'quote': 'BTC'}, 'ZEC/USD': {'id': 'tZECUSD', 'symbol': 'ZEC/USD', 'base': 'ZEC', 'quote': 'USD'}, }, 'fees': { 'trading': { 'maker': 0.1 / 100, 'taker': 0.2 / 100, }, 'funding': { 'withdraw': { 'BTC': 0.0005, 'BCH': 0.0005, 'ETH': 0.01, 'EOS': 0.1, 'LTC': 0.001, 'OMG': 0.1, 'IOT': 0.0, 'NEO': 0.0, 'ETC': 0.01, 'XRP': 0.02, 'ETP': 0.01, 'ZEC': 0.001, 'BTG': 0.0, 'DASH': 0.01, 'XMR': 0.04, 'QTM': 0.01, 'EDO': 0.5, 'DAT': 1.0, 'AVT': 0.5, 'SAN': 0.1, 'USDT': 5.0, }, }, }, }) def common_currency_code(self, currency): # issue #4 Bitfinex names Dash as DSH, instead of DASH if currency == 'DSH': return 'DASH' if currency == 'QTM': return 'QTUM' # issue #796 if currency == 'IOT': return 'IOTA' return currency def fetch_balance(self, params={}): response = self.privatePostAuthRWallets() balanceType = self.safe_string(params, 'type', 'exchange') result = {'info': response} for b in range(0, len(response)): balance = response[b] accountType, currency, total, interest, available = balance if accountType == balanceType: if currency[0] == 't': currency = currency[1:] uppercase = currency.upper() uppercase = self.common_currency_code(uppercase) account = self.account() account['free'] = available account['total'] = total if account['free']: account['used'] = account['total'] - account['free'] result[uppercase] = account return self.parse_balance(result) def fetch_order_book(self, symbol, params={}): orderbook = self.publicGetBookSymbolPrecision(self.extend({ 'symbol': self.market_id(symbol), 'precision': 'R0', }, params)) timestamp = self.milliseconds() result = { 'bids': [], 'asks': [], 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), } for i in range(0, len(orderbook)): order = orderbook[i] price = order[1] amount = order[2] side = 'bids' if (amount > 0) else 'asks' amount = abs(amount) result[side].append([price, amount]) result['bids'] = self.sort_by(result['bids'], 0, True) result['asks'] = self.sort_by(result['asks'], 0) return result def parse_ticker(self, ticker, market=None): timestamp = self.milliseconds() symbol = None if market: symbol = market['symbol'] length = len(ticker) return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': ticker[length - 2], 'low': ticker[length - 1], 'bid': ticker[length - 10], 'ask': ticker[length - 8], 'vwap': None, 'open': None, 'close': None, 'first': None, 'last': ticker[length - 4], 'change': ticker[length - 6], 'percentage': ticker[length - 5], 'average': None, 'baseVolume': ticker[length - 3], 'quoteVolume': None, 'info': ticker, } def fetch_tickers(self, symbols=None, params={}): tickers = self.publicGetTickers(self.extend({ 'symbols': ','.join(self.ids), }, params)) result = {} for i in range(0, len(tickers)): ticker = tickers[i] id = ticker[0] market = self.markets_by_id[id] symbol = market['symbol'] result[symbol] = self.parse_ticker(ticker, market) return result def fetch_ticker(self, symbol, params={}): market = self.markets[symbol] ticker = self.publicGetTickerSymbol(self.extend({ 'symbol': market['id'], }, params)) return self.parse_ticker(ticker, market) def parse_trade(self, trade, market): id, timestamp, amount, price = trade side = 'sell' if (amount < 0) else 'buy' if amount < 0: amount = -amount return { 'id': str(id), 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': market['symbol'], 'type': None, 'side': side, 'price': price, 'amount': amount, } def fetch_trades(self, symbol, since=None, limit=None, params={}): market = self.market(symbol) request = { 'symbol': market['id'], } if since: request['start'] = since if limit: request['limit'] = limit response = self.publicGetTradesSymbolHist(self.extend(request, params)) return self.parse_trades(response, market, since, limit) def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): market = self.market(symbol) request = { 'symbol': market['id'], 'timeframe': self.timeframes[timeframe], } if limit: request['limit'] = limit if since: request['start'] = since request = self.extend(request, params) response = self.publicGetCandlesTradeTimeframeSymbolHist(request) return self.parse_ohlcvs(response, market, timeframe, since, limit) def create_order(self, symbol, type, side, amount, price=None, params={}): raise NotSupported(self.id + ' createOrder not implemented yet') def cancel_order(self, id, symbol=None, params={}): raise NotSupported(self.id + ' cancelOrder not implemented yet') def fetch_order(self, id, symbol=None, params={}): raise NotSupported(self.id + ' fetchOrder not implemented yet') def withdraw(self, currency, amount, address, params={}): raise NotSupported(self.id + ' withdraw not implemented yet') def nonce(self): return self.milliseconds() def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): request = self.version + '/' + self.implode_params(path, params) query = self.omit(params, self.extract_params(path)) url = self.urls['api'] + '/' + request if api == 'public': if query: url += '?' + self.urlencode(query) else: self.check_required_credentials() nonce = str(self.nonce()) body = self.json(query) auth = '/api' + '/' + request + nonce + body signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha384) headers = { 'bfx-nonce': nonce, 'bfx-apikey': self.apiKey, 'bfx-signature': signature, 'Content-Type': 'application/json', } return {'url': url, 'method': method, 'body': body, 'headers': headers} def request(self, path, api='public', method='GET', params={}, headers=None, body=None): response = self.fetch2(path, api, method, params, headers, body) if response: if 'message' in response: if response['message'].find('not enough exchange balance') >= 0: raise InsufficientFunds(self.id + ' ' + self.json(response)) raise ExchangeError(self.id + ' ' + self.json(response)) return response elif response == '': raise ExchangeError(self.id + ' returned empty response') return response
unknown
codeparrot/codeparrot-clean
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_DATA_STANDALONE_H_ #define TENSORFLOW_CORE_DATA_STANDALONE_H_ #include <functional> #include <memory> #include <optional> #include <string> #include <vector> #include "xla/tsl/platform/status.h" #include "xla/tsl/platform/statusor.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/data/tfdataz_metrics.h" #include "tensorflow/core/data/unbounded_thread_pool.h" #include "tensorflow/core/framework/cancellation.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/function_handle_cache.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/model.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace data { namespace standalone { // The purpose of the API in this file is to facilitate standalone execution of // a tf.data input pipeline graph. // // The API exposes two abstractions -- a `Dataset` and an `Iterator` -- which // encapsulate TensorFlow runtime. // // The `Dataset` abstraction represents an input pipeline as a collection // of data sources and a logical plan of transformations that operate over the // data. // // The `Iterator` abstraction represents an execution of an input pipeline that // can be used to enumerate its elements. // // Example usage: // // // Create a `Dataset` by running the `graph_def` graph. // tensorflow::data:standalone::Dataset::Params params; // std::unique_ptr<tensorflow::data::standalone::Dataset> dataset; // Status s = tensorflow::data::standalone::Dataset::FromGraph( // params, graph_def, &dataset); // if (!s.ok()) { /* error handling */ } // // std::unique_ptr<tensorflow::data::standalone::Iterator> iterator; // s = dataset->MakeIterator(&iterator); // if (!s.ok()) { /* error handling */ } // // bool end_of_input = false; // while (!end_of_input) { // std::vector<tensorflow::Tensor> outputs; // s = iterator->GetNext(&outputs, &end_of_input); // if (!s.ok()) { /* error handling */ } // if (!end_of_input) { /* output handling */ } // } class Dataset; // Represents an execution of an input pipeline that can be used to enumerate // its elements. class Iterator { public: virtual ~Iterator(); // Returns the next element of the input pipeline (if there is one) and an // indication of whether the end of the input pipeline has been reached. absl::Status GetNext(std::vector<Tensor>* outputs, bool* end_of_input); // Saves a checkpoint of the iterator. Returns Tensors that can be called with // `Restore()`. absl::StatusOr<std::vector<Tensor>> Save(); // Restores the iterator from a checkpoint. `saved_iterator` is the serialized // iterator saved by calling `Save()`. absl::Status Restore(const std::vector<Tensor>& saved_iterator); // Returns the dataset model for performance analysis. std::shared_ptr<model::Model> model() const; private: friend class Dataset; Iterator(IteratorBase* iterator, IteratorContext* ctx, SerializationContext* serialization_ctx); std::unique_ptr<IteratorBase> iterator_; std::unique_ptr<IteratorContext> ctx_; std::unique_ptr<SerializationContext> serialization_ctx_; std::shared_ptr<TfDatazMetricsCollector> tf_dataz_metrics_collector_; }; // Represents an input pipeline as a collection of data sources and a logical // plan of transformations that operate over the data. class Dataset { public: // Metadata options for `Dataset` creation. struct MetadataOptions { std::string data_service_address; }; // Parameters for `Dataset` creation (e.g. TensorFlow runtime configuration). struct Params { SessionOptions session_options; MetadataOptions metadata_options; }; // Creates a new `Dataset` instance by running the given dataset graph. static absl::Status FromGraph(Params params, const GraphDef& graph_def, std::unique_ptr<Dataset>* result); ~Dataset(); // Creates an iterator for this dataset. absl::Status MakeIterator(std::unique_ptr<Iterator>* result); // Creates an iterator, optionally with a split provider. absl::Status MakeIterator( std::vector<std::unique_ptr<SplitProvider>> split_providers, std::unique_ptr<Iterator>* result); // Creates split providers for this dataset. absl::Status MakeSplitProviders( std::vector<std::unique_ptr<SplitProvider>>* result); // Returns a pointer to the underlying dataset. const DatasetBase* Get() const; private: Dataset(DatasetBase* finalized_dataset, DatasetBase* original_dataset, DeviceMgr* device_mgr, ProcessFunctionLibraryRuntime* pflr, FunctionLibraryDefinition* flib_def, thread::ThreadPool* pool, std::function<void(std::function<void()>)> runner); DatasetBase* finalized_dataset_; // owned DatasetBase* original_dataset_; // owned std::unique_ptr<DeviceMgr> device_mgr_; std::unique_ptr<FunctionLibraryDefinition> flib_def_; std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_; std::unique_ptr<thread::ThreadPool> interop_threadpool_; std::unique_ptr<FunctionHandleCache> function_handle_cache_; std::function<void(std::function<void()>)> runner_; ResourceMgr resource_mgr_; CancellationManager cancellation_manager_; UnboundedThreadPool unbounded_thread_pool_; }; } // namespace standalone } // namespace data } // namespace tensorflow #endif // TENSORFLOW_CORE_DATA_STANDALONE_H_
c
github
https://github.com/tensorflow/tensorflow
tensorflow/core/data/standalone.h
# This file is part of SEALEVEL - a tool to estimates future sea-level rise # constrained by past obervations and long-term sea-level commitment # Copyright (C) 2016 Matthias Mengel working at PIK Potsdam # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # LICENSE.txt for more details. import os import numpy as np import dimarray as da project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) inputdatadir = os.path.join(project_dir, "data/input/") ######## IPCC mean sl contributions ######## # see Chapter 13, Fifth IPCC Report of WG1, Table 13.5 ipccdata = np.loadtxt( inputdatadir + "ipcc_ar5/slr_contributions_ch13.csv", skiprows=1, usecols=( 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)) ipcc_columnames = [ "RCP3PD_med", "RCP3PD_low", "RCP3PD_high", "RCP45_med", "RCP45_low", "RCP45_high", "RCP60_med", "RCP60_low", "RCP60_high", "RCP85_med", "RCP85_low", "RCP85_high"] ipcc_rownames = [ "thermexp", "gic", "gis_smb", "ant_smb", "gis_sid", "ant_sid", "LandWaterStorage", "mean_slr_2081_2100", "Greenland_Sum", "Antarctica_Sum", "Ice-sheet_rapid_dyn", "rate_slr_2081_2100", "mean_slr_2046_2065", "mean_slr_2100"] def get_ipcc_range(rcp, contribution): lowind = ipcc_columnames.index(rcp + "_low") medind = ipcc_columnames.index(rcp + "_med") highind = ipcc_columnames.index(rcp + "_high") rowind = ipcc_rownames.index(contribution) contrib = ipccdata[rowind, :] return np.array([contrib[lowind], contrib[medind], contrib[highind]]) * 1e3 # in mm ipcc_contrib_estimates = {} for contrib in ["thermexp", "gic", "gis_smb", "ant_smb", "gis_sid", "ant_sid"]: ipcc_contrib_estimates[contrib] = {} for rcp in ["RCP3PD", "RCP45", "RCP85"]: ipcc_contrib_estimates[contrib][rcp] = get_ipcc_range(rcp, contrib) ipcc_contrib_estimates["gis"] = {} for rcp in ["RCP3PD", "RCP45", "RCP85"]: ipcc_contrib_estimates["gis"][rcp] = ( ipcc_contrib_estimates["gis_sid"][rcp] + ipcc_contrib_estimates["gis_smb"][rcp]) ######## IPCC global mean temperature estimate ######## ## get IPCC AR5 global mean temperature pathways for each RCP scenario ## they can be downloaded from ## http://www.ipcc.ch/report/ar5/wg1/docs/ar5_wg1_annexI_all.zip ## define 1951-1980 to preindustrial (1850-1860) ## global temperature increase based on hadCrut v4.0 data ## see sealevel/get_gmt_data.py for calculation preind_to_1951_1980 = 0.2640 tas_data = {} for scen in ['rcp26','rcp45','rcp60','rcp85']: try: tas = np.loadtxt(os.path.join(inputdatadir,'ipcc_ar5', 'WGIAR5_FD_AnnexI_series_tas_modelmean_'+scen+'_world_annual.txt')) except IOError: raise IOError, ("IPCC global mean temperature data missing, " "please run sealevel/download_input_data.py") tasd = da.DimArray(tas[:,1],dims="time",axes=tas[:,0]) ## create anomaly to hadcrutv4 1850-1860 mean ## which was used throughout the study as "relative to preindustrial" tas_data[scen] = tasd - tasd[1951:1980].mean() + preind_to_1951_1980
unknown
codeparrot/codeparrot-clean
import json from django.http import HttpResponse from django.shortcuts import get_object_or_404, render from mobility.decorators import mobile_template from product_details import product_details from kitsune.products.models import Product, Topic from kitsune.sumo.utils import get_browser from kitsune.wiki.decorators import check_simple_wiki_locale from kitsune.wiki.facets import topics_for, documents_for @check_simple_wiki_locale @mobile_template('products/{mobile/}products.html') def product_list(request, template): """The product picker page.""" products = Product.objects.filter(visible=True) return render(request, template, { 'products': products}) @check_simple_wiki_locale @mobile_template('products/{mobile/}product.html') def product_landing(request, template, slug): """The product landing page.""" product = get_object_or_404(Product, slug=slug) if request.is_ajax(): # Return a list of topics/subtopics for the product topic_list = list() for t in Topic.objects.filter(product=product, visible=True): topic_list.append({'id': t.id, 'title': t.title}) return HttpResponse(json.dumps({'topics': topic_list}), content_type='application/json') if slug == 'firefox': latest_version = product_details.json_data['firefox_versions']['LATEST_FIREFOX_VERSION'] else: versions = product.versions.filter(default=True) if versions: latest_version = versions[0].min_version else: latest_version = 0 return render(request, template, { 'product': product, 'products': Product.objects.filter(visible=True), 'topics': topics_for(product=product, parent=None), 'search_params': {'product': slug}, 'latest_version': latest_version }) @check_simple_wiki_locale @mobile_template('products/{mobile/}documents.html') def document_listing(request, template, product_slug, topic_slug, subtopic_slug=None): """The document listing page for a product + topic.""" product = get_object_or_404(Product, slug=product_slug) topic = get_object_or_404(Topic, slug=topic_slug, product=product, parent__isnull=True) doc_kw = {'locale': request.LANGUAGE_CODE, 'products': [product]} if subtopic_slug is not None: subtopic = get_object_or_404(Topic, slug=subtopic_slug, product=product, parent=topic) doc_kw['topics'] = [subtopic] else: subtopic = None doc_kw['topics'] = [topic] documents, fallback_documents = documents_for(**doc_kw) user_agent = request.META.get('HTTP_USER_AGENT', '') browser = get_browser(user_agent) show_fx_download = (product.slug == 'thunderbird' and browser != 'Firefox') return render(request, template, { 'product': product, 'topic': topic, 'subtopic': subtopic, 'topics': topics_for(product=product, parent=None), 'subtopics': topics_for(product=product, parent=topic), 'documents': documents, 'fallback_documents': fallback_documents, 'search_params': {'product': product_slug}, 'show_fx_download': show_fx_download})
unknown
codeparrot/codeparrot-clean
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## """ The ``virtual`` module contains utility classes for virtual dependencies. """ from itertools import product as iproduct from six import iteritems from pprint import pformat import spack.util.spack_yaml as syaml from yaml.error import MarkedYAMLError import spack import spack.error class ProviderIndex(object): """This is a dict of dicts used for finding providers of particular virtual dependencies. The dict of dicts looks like: { vpkg name : { full vpkg spec : set(packages providing spec) } } Callers can use this to first find which packages provide a vpkg, then find a matching full spec. e.g., in this scenario: { 'mpi' : { mpi@:1.1 : set([mpich]), mpi@:2.3 : set([mpich2@1.9:]) } } Calling providers_for(spec) will find specs that provide a matching implementation of MPI. """ def __init__(self, specs=None, restrict=False): """Create a new ProviderIndex. Optional arguments: specs List (or sequence) of specs. If provided, will call `update` on this ProviderIndex with each spec in the list. restrict "restricts" values to the verbatim input specs; do not pre-apply package's constraints. TODO: rename this. It is intended to keep things as broad as possible without overly restricting results, so it is not the best name. """ if specs is None: specs = [] self.restrict = restrict self.providers = {} for spec in specs: if not isinstance(spec, spack.spec.Spec): spec = spack.spec.Spec(spec) if spec.virtual: continue self.update(spec) def update(self, spec): if not isinstance(spec, spack.spec.Spec): spec = spack.spec.Spec(spec) if not spec.name: # Empty specs do not have a package return assert(not spec.virtual) pkg_provided = spec.package_class.provided for provided_spec, provider_specs in iteritems(pkg_provided): for provider_spec in provider_specs: # TODO: fix this comment. # We want satisfaction other than flags provider_spec.compiler_flags = spec.compiler_flags.copy() if spec.satisfies(provider_spec, deps=False): provided_name = provided_spec.name provider_map = self.providers.setdefault(provided_name, {}) if provided_spec not in provider_map: provider_map[provided_spec] = set() if self.restrict: provider_set = provider_map[provided_spec] # If this package existed in the index before, # need to take the old versions out, as they're # now more constrained. old = set( [s for s in provider_set if s.name == spec.name]) provider_set.difference_update(old) # Now add the new version. provider_set.add(spec) else: # Before putting the spec in the map, constrain # it so that it provides what was asked for. constrained = spec.copy() constrained.constrain(provider_spec) provider_map[provided_spec].add(constrained) def providers_for(self, *vpkg_specs): """Gives specs of all packages that provide virtual packages with the supplied specs.""" providers = set() for vspec in vpkg_specs: # Allow string names to be passed as input, as well as specs if type(vspec) == str: vspec = spack.spec.Spec(vspec) # Add all the providers that satisfy the vpkg spec. if vspec.name in self.providers: for p_spec, spec_set in self.providers[vspec.name].items(): if p_spec.satisfies(vspec, deps=False): providers.update(spec_set) # Return providers in order. Defensively copy. return sorted(s.copy() for s in providers) # TODO: this is pretty darned nasty, and inefficient, but there # are not that many vdeps in most specs. def _cross_provider_maps(self, lmap, rmap): result = {} for lspec, rspec in iproduct(lmap, rmap): try: constrained = lspec.constrained(rspec) except spack.spec.UnsatisfiableSpecError: continue # lp and rp are left and right provider specs. for lp_spec, rp_spec in iproduct(lmap[lspec], rmap[rspec]): if lp_spec.name == rp_spec.name: try: const = lp_spec.constrained(rp_spec, deps=False) result.setdefault(constrained, set()).add(const) except spack.spec.UnsatisfiableSpecError: continue return result def __contains__(self, name): """Whether a particular vpkg name is in the index.""" return name in self.providers def satisfies(self, other): """Check that providers of virtual specs are compatible.""" common = set(self.providers) & set(other.providers) if not common: return True # This ensures that some provider in other COULD satisfy the # vpkg constraints on self. result = {} for name in common: crossed = self._cross_provider_maps(self.providers[name], other.providers[name]) if crossed: result[name] = crossed return all(c in result for c in common) def to_yaml(self, stream=None): provider_list = self._transform( lambda vpkg, pset: [ vpkg.to_node_dict(), [p.to_node_dict() for p in pset]], list) syaml.dump({'provider_index': {'providers': provider_list}}, stream=stream) @staticmethod def from_yaml(stream): try: yfile = syaml.load(stream) except MarkedYAMLError as e: raise spack.spec.SpackYAMLError( "error parsing YAML ProviderIndex cache:", str(e)) if not isinstance(yfile, dict): raise ProviderIndexError("YAML ProviderIndex was not a dict.") if 'provider_index' not in yfile: raise ProviderIndexError( "YAML ProviderIndex does not start with 'provider_index'") index = ProviderIndex() providers = yfile['provider_index']['providers'] index.providers = _transform( providers, lambda vpkg, plist: ( spack.spec.Spec.from_node_dict(vpkg), set(spack.spec.Spec.from_node_dict(p) for p in plist))) return index def merge(self, other): """Merge `other` ProviderIndex into this one.""" other = other.copy() # defensive copy. for pkg in other.providers: if pkg not in self.providers: self.providers[pkg] = other.providers[pkg] continue spdict, opdict = self.providers[pkg], other.providers[pkg] for provided_spec in opdict: if provided_spec not in spdict: spdict[provided_spec] = opdict[provided_spec] continue spdict[provided_spec] = \ spdict[provided_spec].union(opdict[provided_spec]) def remove_provider(self, pkg_name): """Remove a provider from the ProviderIndex.""" empty_pkg_dict = [] for pkg, pkg_dict in self.providers.items(): empty_pset = [] for provided, pset in pkg_dict.items(): same_name = set(p for p in pset if p.fullname == pkg_name) pset.difference_update(same_name) if not pset: empty_pset.append(provided) for provided in empty_pset: del pkg_dict[provided] if not pkg_dict: empty_pkg_dict.append(pkg) for pkg in empty_pkg_dict: del self.providers[pkg] def copy(self): """Deep copy of this ProviderIndex.""" clone = ProviderIndex() clone.providers = self._transform( lambda vpkg, pset: (vpkg, set((p.copy() for p in pset)))) return clone def __eq__(self, other): return self.providers == other.providers def _transform(self, transform_fun, out_mapping_type=dict): return _transform(self.providers, transform_fun, out_mapping_type) def __str__(self): return pformat( _transform(self.providers, lambda k, v: (k, list(v)))) def _transform(providers, transform_fun, out_mapping_type=dict): """Syntactic sugar for transforming a providers dict. transform_fun takes a (vpkg, pset) mapping and runs it on each pair in nested dicts. """ def mapiter(mappings): if isinstance(mappings, dict): return iteritems(mappings) else: return iter(mappings) return dict( (name, out_mapping_type([ transform_fun(vpkg, pset) for vpkg, pset in mapiter(mappings)])) for name, mappings in providers.items()) class ProviderIndexError(spack.error.SpackError): """Raised when there is a problem with a ProviderIndex."""
unknown
codeparrot/codeparrot-clean
## Input ```javascript // @loggerTestOnly @validateNoJSXInTryStatements @outputMode:"lint" import {identity} from 'shared-runtime'; function Component(props) { let el; try { let value; try { value = identity(props.foo); } catch { el = <div value={value} />; } } catch { return null; } return el; } ``` ## Code ```javascript // @loggerTestOnly @validateNoJSXInTryStatements @outputMode:"lint" import { identity } from "shared-runtime"; function Component(props) { let el; try { let value; try { value = identity(props.foo); } catch { el = <div value={value} />; } } catch { return null; } return el; } ``` ## Logs ``` {"kind":"CompileError","detail":{"options":{"category":"ErrorBoundaries","reason":"Avoid constructing JSX within try/catch","description":"React does not immediately render components when JSX is rendered, so any errors from this component will not be caught by the try/catch. To catch errors in rendering a given component, wrap that component in an error boundary. (https://react.dev/reference/react/Component#catching-rendering-errors-with-an-error-boundary)","details":[{"kind":"error","loc":{"start":{"line":11,"column":11,"index":241},"end":{"line":11,"column":32,"index":262},"filename":"invalid-jsx-in-catch-in-outer-try-with-catch.ts"},"message":"Avoid constructing JSX within try/catch"}]}},"fnLoc":null} {"kind":"CompileSuccess","fnLoc":{"start":{"line":4,"column":0,"index":110},"end":{"line":17,"column":1,"index":317},"filename":"invalid-jsx-in-catch-in-outer-try-with-catch.ts"},"fnName":"Component","memoSlots":4,"memoBlocks":2,"memoValues":2,"prunedMemoBlocks":0,"prunedMemoValues":0} ``` ### Eval output (kind: exception) Fixture not implemented
unknown
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/invalid-jsx-in-catch-in-outer-try-with-catch.expect.md
# -*- coding: utf-8 -*- # Copyright (C) 2015 enen92 # # This program is free software; you can redistribute it and/or modify it under the terms # of the GNU General Public License as published by the Free Software Foundation; # either version 2 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; # without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with this program; # if not, see <http://www.gnu.org/licenses/>. import xbmc,xbmcgui import os import urllib import time import thelogodb import logowindow from addoncommon.common_variables import * from addoncommon.tvldutils import * class Downloader: def __init__(self,dictionary,replace,failed_log=[]): self.dictionary = dictionary self.logo_folder = settings.getSetting('logo-folder') self.total_downloaded = 0 self.log = '[B]DOWNLOADED LOGOS:[/B]\n\n' if replace: for channel in self.dictionary: channel_url = channel['channel_logo'] channel_name = channel['channel_name'] channel_selected = channel['selected_channel'] localfile = os.path.join(self.logo_folder,channel_selected.replace('/','_')+'.png') self.download(localfile,channel_url,channel_name) try:res = resize(localfile) except:pass else: for channel in self.dictionary: channel_url = channel['channel_logo'] channel_name = channel['channel_name'] localfile = os.path.join(self.logo_folder,channel_name.replace('/','_')+'.png') self.download(localfile,channel_url,channel_name) try:res = resize(localfile) except:pass #Append failed log if failed_log: self.log=self.log + '\n\n[B]FAILED/MISSING CHANNEL LOGOS:[/B]\n\n' for failed in failed_log: self.log=self.log + failed + '\n' if settings.getSetting('hide_log') != 'true': yes_no = xbmcgui.Dialog().yesno('TVLogo Downloader', '%s logos downloaded. Do you want to check the entire log?' % (str(self.total_downloaded))) if yes_no: window = logowindow.dialog_log('DialogTextViewer.xml',self.log) window.doModal() def download(self,path,url,name): try: if os.path.isfile(path) is True: while os.path.exists(path): os.remove(path); break except: pass dp = xbmcgui.DialogProgress() dp.create('TVLogo Downloader') dp.update(0,name) xbmc.sleep(500) start_time = time.time() try: urllib.urlretrieve(url, path, lambda nb, bs, fs: self.dialogdown(name,nb, bs, fs, dp, start_time)) dp.close() try: self.log=self.log+'[B]'+name+'[/B] : '+path+'\n' except: self.log=self.log+'[B]'+removeNonAscii(name)+'[/B] : '+removeNonAscii(path)+'\n' self.total_downloaded += 1 return True except: try: while os.path.exists(path): os.remove(path); break except: pass dp.close() return False def dialogdown(self,name,numblocks, blocksize, filesize, dp, start_time): try: percent = min(numblocks * blocksize * 100 / filesize, 100) currently_downloaded = float(numblocks) * blocksize / (1024 * 1024) kbps_speed = numblocks * blocksize / (time.time() - start_time) if kbps_speed > 0: eta = (filesize - numblocks * blocksize) / kbps_speed else: eta = 0 kbps_speed = kbps_speed / 1024 total = float(filesize) / (1024 * 1024) mbs = '%.02f MB %s %.02f MB' % (currently_downloaded,'downloaded', total) e = ' (%.0f Kb/s) ' % kbps_speed tempo = 'Time left:' + ' %02d:%02d' % divmod(eta, 60) dp.update(percent,name +' - '+ mbs + e,tempo) except: percent = 100 dp.update(percent) if dp.iscanceled(): dp.close() raise StopDownloading('Stopped Downloading') class StopDownloading(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) def download_entire_package(packageid): channels = thelogodb.Channels().by_package(packageid) channels_ = [] for channel in channels: if channel['strLogoWide']: channels_.append(channel) channels = channels_ del channels_ if channels: channels_to_download = [] for channel in channels: channels_to_download.append(channel_to_downloaddict(channel)) if channels_to_download: Downloader(channels_to_download,False) else: mensagemok('TVLogo Downloader','No logos available') return else: mensagemok('TVLogo Downloader','No logos available') return
unknown
codeparrot/codeparrot-clean
import json from django.contrib import messages from django.core.urlresolvers import reverse from django.http import Http404, HttpResponse, HttpResponseRedirect from django.shortcuts import render from django.utils.decorators import method_decorator from django.utils.translation import ugettext_noop, ugettext as _ from django.views.generic import TemplateView, View from tastypie.http import HttpBadRequest from corehq.apps.crud.views import BaseCRUDFormView from corehq.apps.domain.decorators import login_and_domain_required from corehq.apps.domain.views import DomainViewMixin from corehq.apps.hqwebapp.views import BaseSectionPageView from corehq.apps.indicators.admin.crud import IndicatorCRUDFormRequestManager from corehq.apps.indicators.admin.forms import BulkCopyIndicatorsForm from corehq.apps.indicators.dispatcher import require_edit_indicators from corehq.apps.indicators.forms import ImportIndicatorsFromJsonFileForm from corehq.apps.indicators.models import ( IndicatorDefinition, DynamicIndicatorDefinition, ) from corehq.apps.indicators.utils import get_indicator_domains, get_namespaces from dimagi.utils.decorators.memoized import memoized from dimagi.utils.modules import to_function @require_edit_indicators @login_and_domain_required def default_admin(request, domain, template="reports/base_template.html", **kwargs): if request.domain not in get_indicator_domains(): raise Http404 from corehq.apps.indicators.admin import BaseIndicatorAdminInterface context = dict( domain=domain, project=domain, report=dict( title="Select an Indicator Definition Type", show=True, slug=None, is_async=True, section_name=BaseIndicatorAdminInterface.section_name, ) ) return render(request, template, context) class IndicatorAdminCRUDFormView(BaseCRUDFormView): base_loc = "corehq.apps.indicators.admin.forms" template_name = "indicators/forms/crud.add_indicator.html" form_request_manager = IndicatorCRUDFormRequestManager @method_decorator(require_edit_indicators) def dispatch(self, request, *args, **kwargs): return super(IndicatorAdminCRUDFormView, self).dispatch(request, *args, **kwargs) class BulkCopyIndicatorsView(TemplateView): indicator_loc = "corehq.apps.indicators.models" template_name = "indicators/forms/copy_to_domain.html" @method_decorator(require_edit_indicators) def dispatch(self, request, domain, indicator_type=None, *args, **kwargs): self.domain = domain try: self.indicator_class = to_function("%s.%s" % (self.indicator_loc, indicator_type)) except AttributeError: return HttpBadRequest("%s.%s does not exist" % (self.indicator_loc, indicator_type)) status = {} if request.method == 'POST': form = BulkCopyIndicatorsForm(data=request.POST, domain=self.domain, couch_user=request.couch_user, indicator_class=self.indicator_class) if form.is_valid(): status = form.copy_indicators() else: form = BulkCopyIndicatorsForm(domain=self.domain, couch_user=request.couch_user, indicator_class=self.indicator_class) return render(request, self.template_name, { "form": form, "status": status, "domain": self.domain, "indicator_type": self.indicator_class.__name__, "indicator_name": self.indicator_class.get_nice_name(), }) class BulkExportIndicatorsView(View): urlname = 'indicators_bulk_export' @method_decorator(require_edit_indicators) def dispatch(self, request, *args, **kwargs): return super(BulkExportIndicatorsView, self).dispatch(request, *args, **kwargs) def get(self, request, domain, *args, **kwargs): namespaces = get_namespaces(domain) db = IndicatorDefinition.get_db() def _clean_indicator(doc): del doc['_id'] del doc['_rev'] del doc['domain'] return doc data = {} for view_type in [ 'indicator_definitions', 'dynamic_indicator_definitions', ]: data[view_type] = [] for namespace in namespaces: key = ["type", namespace, domain] result = db.view( 'indicators/%s' % view_type, reduce=False, startkey=key, endkey=key+[{}], include_docs=True, ).all() data[view_type].extend([_clean_indicator(d['doc']) for d in result]) response = HttpResponse(json.dumps(data), mimetype='application/json') response['Content-Disposition'] = 'attachment; filename=%(domain)s-indicators.json' % { 'domain': domain, } return response class BulkImportIndicatorsView(BaseSectionPageView, DomainViewMixin): urlname = 'indicators_upload_bulk' section_name = ugettext_noop("Administer Indicators") page_title = ugettext_noop("Bulk Import Indicators") template_name = 'indicators/bulk_import.html' @method_decorator(login_and_domain_required) @method_decorator(require_edit_indicators) def dispatch(self, request, *args, **kwargs): request.preview_bootstrap3 = True return super(BulkImportIndicatorsView, self).dispatch(request, *args, **kwargs) @property @memoized def import_form(self): if self.request.method == 'POST': return ImportIndicatorsFromJsonFileForm(self.request.POST) return ImportIndicatorsFromJsonFileForm() @property def page_context(self): return { 'import_form': self.import_form, 'domain': self.domain, } @property def section_url(self): return reverse('default_indicator_admin', args=[self.domain]) @property def page_url(self): return reverse(self.urlname, args=[self.domain]) def post(self, request, *args, **kwargs): upload = request.FILES.get('json_file') if upload and self.import_form.is_valid(): data = json.loads(upload.read()) for (view_type, indicator_class) in [ (u'indicator_definitions', IndicatorDefinition), (u'dynamic_indicator_definitions', DynamicIndicatorDefinition), ]: for doc in data[view_type]: copied = indicator_class.copy_to_domain( self.domain, doc, override=self.import_form.cleaned_data['override_existing'] ) messages.success( request, _("Imported indicators!") ) return HttpResponseRedirect(self.page_url) messages.error( request, _("Failed up import any indicators. Check your file.") ) return self.get(request, *args, **kwargs)
unknown
codeparrot/codeparrot-clean
import sys, os import contextlib # find_library(name) returns the pathname of a library, or None. if os.name == "nt": def _get_build_version(): """Return the version of MSVC that was used to build Python. For Python 2.3 and up, the version number is included in sys.version. For earlier versions, assume the compiler is MSVC 6. """ # This function was copied from Lib/distutils/msvccompiler.py prefix = "MSC v." i = sys.version.find(prefix) if i == -1: return 6 i = i + len(prefix) s, rest = sys.version[i:].split(" ", 1) majorVersion = int(s[:-2]) - 6 minorVersion = int(s[2:3]) / 10.0 # I don't think paths are affected by minor version in version 6 if majorVersion == 6: minorVersion = 0 if majorVersion >= 6: return majorVersion + minorVersion # else we don't know what version of the compiler this is return None def find_msvcrt(): """Return the name of the VC runtime dll""" version = _get_build_version() if version is None: # better be safe than sorry return None if version <= 6: clibname = 'msvcrt' else: clibname = 'msvcr%d' % (version * 10) # If python was built with in debug mode import imp if imp.get_suffixes()[0][0] == '_d.pyd': clibname += 'd' return clibname+'.dll' def find_library(name): if name in ('c', 'm'): return find_msvcrt() # See MSDN for the REAL search order. for directory in os.environ['PATH'].split(os.pathsep): fname = os.path.join(directory, name) if os.path.isfile(fname): return fname if fname.lower().endswith(".dll"): continue fname = fname + ".dll" if os.path.isfile(fname): return fname return None if os.name == "ce": # search path according to MSDN: # - absolute path specified by filename # - The .exe launch directory # - the Windows directory # - ROM dll files (where are they?) # - OEM specified search path: HKLM\Loader\SystemPath def find_library(name): return name if os.name == "posix" and sys.platform == "darwin": from ctypes.macholib.dyld import dyld_find as _dyld_find def find_library(name): possible = ['lib%s.dylib' % name, '%s.dylib' % name, '%s.framework/%s' % (name, name)] for name in possible: try: return _dyld_find(name) except ValueError: continue return None elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump import re, tempfile, errno def _findLib_gcc(name): expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \ '$CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name try: f = os.popen(cmd) try: trace = f.read() finally: rv = f.close() finally: try: os.unlink(ccout) except OSError as e: if e.errno != errno.ENOENT: raise if rv == 10: raise OSError('gcc or cc command not found') res = re.search(expr, trace) if not res: return None return res.group(0) if sys.platform == "sunos5": # use /usr/ccs/bin/dump on solaris def _get_soname(f): if not f: return None cmd = "/usr/ccs/bin/dump -Lpv 2>/dev/null " + f with contextlib.closing(os.popen(cmd)) as f: data = f.read() res = re.search(r'\[.*\]\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) else: def _get_soname(f): # assuming GNU binutils / ELF if not f: return None cmd = 'if ! type objdump >/dev/null 2>&1; then exit 10; fi;' \ "objdump -p -j .dynamic 2>/dev/null " + f f = os.popen(cmd) dump = f.read() rv = f.close() if rv == 10: raise OSError('objdump command not found') with contextlib.closing(os.popen(cmd)) as f: data = f.read() res = re.search(r'\sSONAME\s+([^\s]+)', data) if not res: return None return res.group(1) if (sys.platform.startswith("freebsd") or sys.platform.startswith("openbsd") or sys.platform.startswith("dragonfly")): def _num_version(libname): # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ] parts = libname.split(".") nums = [] try: while parts: nums.insert(0, int(parts.pop())) except ValueError: pass return nums or [ sys.maxsize ] def find_library(name): ename = re.escape(name) expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename) with contextlib.closing(os.popen('/sbin/ldconfig -r 2>/dev/null')) as f: data = f.read() res = re.findall(expr, data) if not res: return _get_soname(_findLib_gcc(name)) res.sort(key=_num_version) return res[-1] else: def _findLib_ldconfig(name): # XXX assuming GLIBC's ldconfig (with option -p) expr = r'/[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) with contextlib.closing(os.popen('/sbin/ldconfig -p 2>/dev/null')) as f: data = f.read() res = re.search(expr, data) if not res: # Hm, this works only for libs needed by the python executable. cmd = 'ldd %s 2>/dev/null' % sys.executable with contextlib.closing(os.popen(cmd)) as f: data = f.read() res = re.search(expr, data) if not res: return None return res.group(0) def _findSoname_ldconfig(name): import struct if struct.calcsize('l') == 4: machine = os.uname()[4] + '-32' else: machine = os.uname()[4] + '-64' mach_map = { 'x86_64-64': 'libc6,x86-64', 'ppc64-64': 'libc6,64bit', 'sparc64-64': 'libc6,64bit', 's390x-64': 'libc6,64bit', 'ia64-64': 'libc6,IA-64', } abi_type = mach_map.get(machine, 'libc6') # XXX assuming GLIBC's ldconfig (with option -p) expr = r'(\S+)\s+\((%s(?:, OS ABI:[^\)]*)?)\)[^/]*(/[^\(\)\s]*lib%s\.[^\(\)\s]*)' \ % (abi_type, re.escape(name)) with contextlib.closing(os.popen('LC_ALL=C LANG=C /sbin/ldconfig -p 2>/dev/null')) as f: data = f.read() res = re.search(expr, data) if not res: return None return res.group(1) def find_library(name): return _findSoname_ldconfig(name) or _get_soname(_findLib_gcc(name)) ################################################################ # test code def test(): from ctypes import cdll if os.name == "nt": print(cdll.msvcrt) print(cdll.load("msvcrt")) print(find_library("msvcrt")) if os.name == "posix": # find and load_version print(find_library("m")) print(find_library("c")) print(find_library("bz2")) # getattr ## print cdll.m ## print cdll.bz2 # load if sys.platform == "darwin": print(cdll.LoadLibrary("libm.dylib")) print(cdll.LoadLibrary("libcrypto.dylib")) print(cdll.LoadLibrary("libSystem.dylib")) print(cdll.LoadLibrary("System.framework/System")) else: print(cdll.LoadLibrary("libm.so")) print(cdll.LoadLibrary("libcrypt.so")) print(find_library("crypt")) if __name__ == "__main__": test()
unknown
codeparrot/codeparrot-clean
""" Caches tiles to Amazon S3. Requires boto (2.0+): http://pypi.python.org/pypi/boto Example configuration: "cache": { "name": "S3", "bucket": "<bucket name>", "access": "<access key>", "secret": "<secret key>" } S3 cache parameters: bucket Required bucket name for S3. If it doesn't exist, it will be created. access Optional access key ID for your S3 account. secret Optional secret access key for your S3 account. use_locks Optional boolean flag for whether to use the locking feature on S3. True by default. A good reason to set this to false would be the additional price and time required for each lock set in S3. path Optional path under bucket to use as the cache dir. ex. 'cache' will put tiles under <bucket>/cache/ reduced_redundancy If set to true, use S3's Reduced Redundancy Storage feature. Storage is cheaper but has lower redundancy on Amazon's servers. Defaults to false. Access and secret keys are under "Security Credentials" at your AWS account page: http://aws.amazon.com/account/ When access or secret are not provided, the environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY will be used http://docs.pythonboto.org/en/latest/s3_tut.html#creating-a-connection """ from time import time as _time, sleep as _sleep from mimetypes import guess_type from time import strptime, time from calendar import timegm try: from boto.s3.bucket import Bucket as S3Bucket from boto.s3.connection import S3Connection except ImportError: # at least we can build the documentation pass def tile_key(layer, coord, format, path = ''): """ Return a tile key string. """ path = path.strip('/') name = layer.name() tile = '%(zoom)d/%(column)d/%(row)d' % coord.__dict__ ext = format.lower() return str('%(path)s/%(name)s/%(tile)s.%(ext)s' % locals()) class Cache: """ """ def __init__(self, bucket, access=None, secret=None, use_locks=True, path='', reduced_redundancy=False): self.bucket = S3Bucket(S3Connection(access, secret), bucket) self.use_locks = bool(use_locks) self.path = path self.reduced_redundancy = reduced_redundancy def lock(self, layer, coord, format): """ Acquire a cache lock for this tile. Returns nothing, but blocks until the lock has been acquired. Does nothing and returns immediately if `use_locks` is false. """ if not self.use_locks: return key_name = tile_key(layer, coord, format, self.path) due = _time() + layer.stale_lock_timeout while _time() < due: if not self.bucket.get_key(key_name+'-lock'): break _sleep(.2) key = self.bucket.new_key(key_name+'-lock') key.set_contents_from_string('locked.', {'Content-Type': 'text/plain'}, reduced_redundancy=self.reduced_redundancy) def unlock(self, layer, coord, format): """ Release a cache lock for this tile. """ key_name = tile_key(layer, coord, format, self.path) self.bucket.delete_key(key_name+'-lock') def remove(self, layer, coord, format): """ Remove a cached tile. """ key_name = tile_key(layer, coord, format, self.path) self.bucket.delete_key(key_name) def read(self, layer, coord, format): """ Read a cached tile. """ key_name = tile_key(layer, coord, format, self.path) key = self.bucket.get_key(key_name) if key is None: return None if layer.cache_lifespan: t = timegm(strptime(key.last_modified, '%a, %d %b %Y %H:%M:%S %Z')) if (time() - t) > layer.cache_lifespan: return None return key.get_contents_as_string() def save(self, body, layer, coord, format): """ Save a cached tile. """ key_name = tile_key(layer, coord, format, self.path) key = self.bucket.new_key(key_name) content_type, encoding = guess_type('example.'+format) headers = content_type and {'Content-Type': content_type} or {} key.set_contents_from_string(body, headers, policy='public-read', reduced_redundancy=self.reduced_redundancy)
unknown
codeparrot/codeparrot-clean
from sympy.physics.pring import wavefunction, energy from sympy.core.compatibility import range from sympy import pi, integrate, sqrt, exp, simplify, I from sympy.abc import m, x, r from sympy.physics.quantum.constants import hbar def test_wavefunction(): Psi = { 0: (1/sqrt(2 * pi)), 1: (1/sqrt(2 * pi)) * exp(I * x), 2: (1/sqrt(2 * pi)) * exp(2 * I * x), 3: (1/sqrt(2 * pi)) * exp(3 * I * x) } for n in Psi: assert simplify(wavefunction(n, x) - Psi[n]) == 0 def test_norm(n=1): # Maximum "n" which is tested: for i in range(n + 1): assert integrate( wavefunction(i, x) * wavefunction(-i, x), (x, 0, 2 * pi)) == 1 def test_orthogonality(n=1): # Maximum "n" which is tested: for i in range(n + 1): for j in range(i+1, n+1): assert integrate( wavefunction(i, x) * wavefunction(j, x), (x, 0, 2 * pi)) == 0 def test_energy(n=1): # Maximum "n" which is tested: for i in range(n+1): assert simplify( energy(i, m, r) - ((i**2 * hbar**2) / (2 * m * r**2))) == 0
unknown
codeparrot/codeparrot-clean
// generated with @7nohe/openapi-react-query-codegen@2.0.0 import { type Options } from "@hey-api/client-axios"; import { useSuspenseQuery, UseSuspenseQueryOptions } from "@tanstack/react-query"; import { AxiosError } from "axios"; import { createTokenAllAdmins, loginAllAdmins } from "../requests/services.gen"; import { CreateTokenAllAdminsError } from "../requests/types.gen"; import * as Common from "./common"; export const useCreateTokenAllAdminsSuspense = < TData = NonNullable<Common.CreateTokenAllAdminsDefaultResponse>, TError = AxiosError<CreateTokenAllAdminsError>, TQueryKey extends Array<unknown> = unknown[], >( clientOptions: Options<unknown, true> = {}, queryKey?: TQueryKey, options?: Omit<UseSuspenseQueryOptions<TData, TError>, "queryKey" | "queryFn">, ) => useSuspenseQuery<TData, TError>({ queryKey: Common.UseCreateTokenAllAdminsKeyFn(clientOptions, queryKey), queryFn: () => createTokenAllAdmins({ ...clientOptions }).then((response) => response.data as TData) as TData, ...options, }); export const useLoginAllAdminsSuspense = < TData = NonNullable<Common.LoginAllAdminsDefaultResponse>, TError = AxiosError<LoginAllAdminsError>, TQueryKey extends Array<unknown> = unknown[], >( clientOptions: Options<unknown, true> = {}, queryKey?: TQueryKey, options?: Omit<UseSuspenseQueryOptions<TData, TError>, "queryKey" | "queryFn">, ) => useSuspenseQuery<TData, TError>({ queryKey: Common.UseLoginAllAdminsKeyFn(clientOptions, queryKey), queryFn: () => loginAllAdmins({ ...clientOptions }).then((response) => response.data as TData) as TData, ...options, });
typescript
github
https://github.com/apache/airflow
airflow-core/src/airflow/api_fastapi/auth/managers/simple/ui/openapi-gen/queries/suspense.ts
from django.forms import CharField, Form, TextInput from django.utils.safestring import mark_safe from .base import WidgetTest class TextInputTest(WidgetTest): widget = TextInput() def test_render(self): self.check_html( self.widget, "email", "", html='<input type="text" name="email">' ) def test_render_none(self): self.check_html( self.widget, "email", None, html='<input type="text" name="email">' ) def test_render_value(self): self.check_html( self.widget, "email", "test@example.com", html=('<input type="text" name="email" value="test@example.com">'), ) def test_render_boolean(self): """ Boolean values are rendered to their string forms ("True" and "False"). """ self.check_html( self.widget, "get_spam", False, html=('<input type="text" name="get_spam" value="False">'), ) self.check_html( self.widget, "get_spam", True, html=('<input type="text" name="get_spam" value="True">'), ) def test_render_quoted(self): self.check_html( self.widget, "email", 'some "quoted" & ampersanded value', html=( '<input type="text" name="email" ' 'value="some &quot;quoted&quot; &amp; ampersanded value">' ), ) def test_render_custom_attrs(self): self.check_html( self.widget, "email", "test@example.com", attrs={"class": "fun"}, html=( '<input type="text" name="email" value="test@example.com" class="fun">' ), ) def test_render_unicode(self): self.check_html( self.widget, "email", "ŠĐĆŽćžšđ", attrs={"class": "fun"}, html=( '<input type="text" name="email" ' 'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" class="fun">' ), ) def test_constructor_attrs(self): widget = TextInput(attrs={"class": "fun", "type": "email"}) self.check_html( widget, "email", "", html='<input type="email" class="fun" name="email">' ) self.check_html( widget, "email", "foo@example.com", html=( '<input type="email" class="fun" value="foo@example.com" name="email">' ), ) def test_attrs_precedence(self): """ `attrs` passed to render() get precedence over those passed to the constructor """ widget = TextInput(attrs={"class": "pretty"}) self.check_html( widget, "email", "", attrs={"class": "special"}, html='<input type="text" class="special" name="email">', ) def test_attrs_safestring(self): widget = TextInput(attrs={"onBlur": mark_safe("function('foo')")}) self.check_html( widget, "email", "", html='<input onBlur="function(\'foo\')" type="text" name="email">', ) def test_use_required_attribute(self): # Text inputs can safely trigger the browser validation. self.assertIs(self.widget.use_required_attribute(None), True) self.assertIs(self.widget.use_required_attribute(""), True) self.assertIs(self.widget.use_required_attribute("resume.txt"), True) def test_fieldset(self): class TestForm(Form): template_name = "forms_tests/use_fieldset.html" field = CharField(widget=self.widget) form = TestForm() self.assertIs(self.widget.use_fieldset, False) self.assertHTMLEqual( '<div><label for="id_field">Field:</label>' '<input type="text" name="field" required id="id_field"></div>', form.render(), )
python
github
https://github.com/django/django
tests/forms_tests/widget_tests/test_textinput.py
# Copyright (c) 2010 Witchspace <witchspace81@gmail.com> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Connect to Litecoin server via JSON-RPC. """ from litecoinrpc.proxy import JSONRPCException, AuthServiceProxy from litecoinrpc.exceptions import _wrap_exception, WalletPassphraseIncorrect, WalletAlreadyUnlocked from litecoinrpc.data import (ServerInfo, AccountInfo, AddressInfo, TransactionInfo, AddressValidation, WorkItem, MiningInfo) class LitecoinConnection(object): """ A LitecoinConnection object defines a connection to a litecoin server. It is a thin wrapper around a JSON-RPC API connection. Up-to-date for SVN revision 198. Arguments to constructor: - *user* -- Authenticate as user. - *password* -- Authentication password. - *host* -- Litecoin JSON-RPC host. - *port* -- Litecoin JSON-RPC port. """ def __init__(self, user, password, host='localhost', port=8332, use_https=False): """ Create a new litecoin server connection. """ url = 'http{s}://{user}:{password}@{host}:{port}/'.format( s='s' if use_https else '', user=user, password=password, host=host, port=port) self.url = url try: self.proxy = AuthServiceProxy(url) except JSONRPCException as e: raise _wrap_exception(e.error) def stop(self): """ Stop litecoin server. """ try: self.proxy.stop() except JSONRPCException as e: raise _wrap_exception(e.error) def getblock(self, hash): """ Returns information about the given block hash. """ try: return self.proxy.getblock(hash) except JSONRPCException as e: raise _wrap_exception(e.error) def getblockcount(self): """ Returns the number of blocks in the longest block chain. """ try: return self.proxy.getblockcount() except JSONRPCException as e: raise _wrap_exception(e.error) def getblockhash(self, index): """ Returns hash of block in best-block-chain at index. :param index: index ob the block """ try: return self.proxy.getblockhash(index) except JSONRPCException as e: raise _wrap_exception(e.error) def getblocknumber(self): """ Returns the block number of the latest block in the longest block chain. Deprecated. Use getblockcount instead. """ return self.getblockcount() def getconnectioncount(self): """ Returns the number of connections to other nodes. """ try: return self.proxy.getconnectioncount() except JSONRPCException as e: raise _wrap_exception(e.error) def getdifficulty(self): """ Returns the proof-of-work difficulty as a multiple of the minimum difficulty. """ try: return self.proxy.getdifficulty() except JSONRPCException as e: raise _wrap_exception(e.error) def getgenerate(self): """ Returns :const:`True` or :const:`False`, depending on whether generation is enabled. """ try: return self.proxy.getgenerate() except JSONRPCException as e: raise _wrap_exception(e.error) def setgenerate(self, generate, genproclimit=None): """ Enable or disable generation (mining) of coins. Arguments: - *generate* -- is :const:`True` or :const:`False` to turn generation on or off. - *genproclimit* -- Number of processors that are used for generation, -1 is unlimited. """ try: if genproclimit is None: return self.proxy.setgenerate(generate) else: return self.proxy.setgenerate(generate, genproclimit) except JSONRPCException as e: raise _wrap_exception(e.error) def gethashespersec(self): """ Returns a recent hashes per second performance measurement while generating. """ try: return self.proxy.gethashespersec() except JSONRPCException as e: raise _wrap_exception(e.error) def getinfo(self): """ Returns an :class:`~litecoinrpc.data.ServerInfo` object containing various state info. """ try: return ServerInfo(**self.proxy.getinfo()) except JSONRPCException as e: raise _wrap_exception(e.error) def getmininginfo(self): """ Returns an :class:`~litecoinrpc.data.MiningInfo` object containing various mining state info. """ try: return MiningInfo(**self.proxy.getmininginfo()) except JSONRPCException as e: raise _wrap_exception(e.error) def getnewaddress(self, account=None): """ Returns a new litecoin address for receiving payments. Arguments: - *account* -- If account is specified (recommended), it is added to the address book so that payments received with the address will be credited to it. """ try: if account is None: return self.proxy.getnewaddress() else: return self.proxy.getnewaddress(account) except JSONRPCException as e: raise _wrap_exception(e.error) def getaccountaddress(self, account): """ Returns the current litecoin address for receiving payments to an account. Arguments: - *account* -- Account for which the address should be returned. """ try: return self.proxy.getaccountaddress(account) except JSONRPCException as e: raise _wrap_exception(e.error) def setaccount(self, litecoinaddress, account): """ Sets the account associated with the given address. Arguments: - *litecoinaddress* -- Litecoin address to associate. - *account* -- Account to associate the address to. """ try: return self.proxy.setaccount(litecoinaddress, account) except JSONRPCException as e: raise _wrap_exception(e.error) def getaccount(self, litecoinaddress): """ Returns the account associated with the given address. Arguments: - *litecoinaddress* -- Litecoin address to get account for. """ try: return self.proxy.getaccount(litecoinaddress) except JSONRPCException as e: raise _wrap_exception(e.error) def getaddressesbyaccount(self, account): """ Returns the list of addresses for the given account. Arguments: - *account* -- Account to get list of addresses for. """ try: return self.proxy.getaddressesbyaccount(account) except JSONRPCException as e: raise _wrap_exception(e.error) def sendtoaddress(self, litecoinaddress, amount, comment=None, comment_to=None): """ Sends *amount* from the server's available balance to *litecoinaddress*. Arguments: - *litecoinaddress* -- Litecoin address to send to. - *amount* -- Amount to send (float, rounded to the nearest 0.01). - *minconf* -- Minimum number of confirmations required for transferred balance. - *comment* -- Comment for transaction. - *comment_to* -- Comment for to-address. """ try: if comment is None: return self.proxy.sendtoaddress(litecoinaddress, amount) elif comment_to is None: return self.proxy.sendtoaddress(litecoinaddress, amount, comment) else: return self.proxy.sendtoaddress(litecoinaddress, amount, comment, comment_to) except JSONRPCException as e: raise _wrap_exception(e.error) def getreceivedbyaddress(self, litecoinaddress, minconf=1): """ Returns the total amount received by a litecoin address in transactions with at least a certain number of confirmations. Arguments: - *litecoinaddress* -- Address to query for total amount. - *minconf* -- Number of confirmations to require, defaults to 1. """ try: return self.proxy.getreceivedbyaddress(litecoinaddress, minconf) except JSONRPCException as e: raise _wrap_exception(e.error) def getreceivedbyaccount(self, account, minconf=1): """ Returns the total amount received by addresses with an account in transactions with at least a certain number of confirmations. Arguments: - *account* -- Account to query for total amount. - *minconf* -- Number of confirmations to require, defaults to 1. """ try: return self.proxy.getreceivedbyaccount(account, minconf) except JSONRPCException as e: raise _wrap_exception(e.error) def gettransaction(self, txid): """ Get detailed information about transaction Arguments: - *txid* -- Transactiond id for which the info should be returned """ try: return TransactionInfo(**self.proxy.gettransaction(txid)) except JSONRPCException as e: raise _wrap_exception(e.error) def getrawtransaction(self, txid, verbose=True): """ Get transaction raw info Arguments: - *txid* -- Transactiond id for which the info should be returned. - *verbose* -- If False, return only the "hex" of the transaction. """ try: if verbose: return TransactionInfo(**self.proxy.getrawtransaction(txid, 1)) return self.proxy.getrawtransaction(txid, 0) except JSONRPCException as e: raise _wrap_exception(e.error) def createrawtransaction(self, inputs, outputs): """ Creates a raw transaction spending given inputs (a list of dictionaries, each containing a transaction id and an output number), sending to given address(es). Returns hex-encoded raw transaction. Example usage: >>> conn.createrawtransaction( [{"txid": "a9d4599e15b53f3eb531608ddb31f48c695c3d0b3538a6bda871e8b34f2f430c", "vout": 0}], {"mkZBYBiq6DNoQEKakpMJegyDbw2YiNQnHT":50}) Arguments: - *inputs* -- A list of {"txid": txid, "vout": n} dictionaries. - *outputs* -- A dictionary mapping (public) addresses to the amount they are to be paid. """ try: return self.proxy.createrawtransaction(inputs, outputs) except JSONRPCException as e: raise _wrap_exception(e.error) def signrawtransaction(self, hexstring, previous_transactions=None, private_keys=None): """ Sign inputs for raw transaction (serialized, hex-encoded). Returns a dictionary with the keys: "hex": raw transaction with signature(s) (hex-encoded string) "complete": 1 if transaction has a complete set of signature(s), 0 if not Arguments: - *hexstring* -- A hex string of the transaction to sign. - *previous_transactions* -- A (possibly empty) list of dictionaries of the form: {"txid": txid, "vout": n, "scriptPubKey": hex, "redeemScript": hex}, representing previous transaction outputs that this transaction depends on but may not yet be in the block chain. - *private_keys* -- A (possibly empty) list of base58-encoded private keys that, if given, will be the only keys used to sign the transaction. """ try: return dict(self.proxy.signrawtransaction(hexstring, previous_transactions, private_keys)) except JSONRPCException as e: raise _wrap_exception(e.error) def sendrawtransaction(self, hexstring): """ Sends the provided raw trasnaction. Returns the transaction id of the sent transaction. Arguments: - *hexstring* -- A hex string of the transaction, as returned by createrawtransaction. """ try: return self.proxy.sendrawtransaction(hexstring) except JSONRPCException as e: raise _wrap_exception(e.error) def decoderawtransaction(self, hexstring): """ Produces a human-readable JSON object for a raw transaction. Arguments: - *hexstring* -- A hex string of the transaction to be decoded. """ try: return dict(self.proxy.decoderawtransaction(hexstring)) except JSONRPCException as e: raise _wrap_exception(e.error) def listsinceblock(self, block_hash): try: res = self.proxy.listsinceblock(block_hash) res['transactions'] = [TransactionInfo(**x) for x in res['transactions']] return res except JSONRPCException as e: raise _wrap_exception(e.error) def listreceivedbyaddress(self, minconf=1, includeempty=False): """ Returns a list of addresses. Each address is represented with a :class:`~litecoinrpc.data.AddressInfo` object. Arguments: - *minconf* -- Minimum number of confirmations before payments are included. - *includeempty* -- Whether to include addresses that haven't received any payments. """ try: return [AddressInfo(**x) for x in self.proxy.listreceivedbyaddress(minconf, includeempty)] except JSONRPCException as e: raise _wrap_exception(e.error) def listaccounts(self, minconf=1, as_dict=False): """ Returns a list of account names. Arguments: - *minconf* -- Minimum number of confirmations before payments are included. - *as_dict* -- Returns a dictionary of account names, with their balance as values. """ try: if as_dict: return dict(self.proxy.listaccounts(minconf)) else: return self.proxy.listaccounts(minconf).keys() except JSONRPCException as e: raise _wrap_exception(e.error) def listreceivedbyaccount(self, minconf=1, includeempty=False): """ Returns a list of accounts. Each account is represented with a :class:`~litecoinrpc.data.AccountInfo` object. Arguments: - *minconf* -- Minimum number of confirmations before payments are included. - *includeempty* -- Whether to include addresses that haven't received any payments. """ try: return [AccountInfo(**x) for x in self.proxy.listreceivedbyaccount(minconf, includeempty)] except JSONRPCException as e: raise _wrap_exception(e.error) def listtransactions(self, account=None, count=10, from_=0, address=None): """ Returns a list of the last transactions for an account. Each transaction is represented with a :class:`~litecoinrpc.data.TransactionInfo` object. Arguments: - *account* -- Account to list transactions from. Return transactions from all accounts if None. - *count* -- Number of transactions to return. - *from_* -- Skip the first <from_> transactions. - *address* -- Receive address to consider """ accounts = [account] if account is not None else self.listaccounts(as_dict=True).iterkeys() try: return [TransactionInfo(**tx) for acc in accounts for tx in self.proxy.listtransactions(acc, count, from_) if address is None or tx["address"] == address] except JSONRPCException as e: raise _wrap_exception(e.error) def backupwallet(self, destination): """ Safely copies ``wallet.dat`` to *destination*, which can be a directory or a path with filename. Arguments: - *destination* -- directory or path with filename to backup wallet to. """ try: return self.proxy.backupwallet(destination) except JSONRPCException as e: raise _wrap_exception(e.error) def validateaddress(self, validateaddress): """ Validate a litecoin address and return information for it. The information is represented by a :class:`~litecoinrpc.data.AddressValidation` object. Arguments: -- Address to validate. - *validateaddress* """ try: return AddressValidation(**self.proxy.validateaddress(validateaddress)) except JSONRPCException as e: raise _wrap_exception(e.error) def getbalance(self, account=None, minconf=None): """ Get the current balance, either for an account or the total server balance. Arguments: - *account* -- If this parameter is specified, returns the balance in the account. - *minconf* -- Minimum number of confirmations required for transferred balance. """ args = [] if account is not None: args.append(account) if minconf is not None: args.append(minconf) try: return self.proxy.getbalance(*args) except JSONRPCException as e: raise _wrap_exception(e.error) def move(self, fromaccount, toaccount, amount, minconf=1, comment=None): """ Move from one account in your wallet to another. Arguments: - *fromaccount* -- Source account name. - *toaccount* -- Destination account name. - *amount* -- Amount to transfer. - *minconf* -- Minimum number of confirmations required for transferred balance. - *comment* -- Comment to add to transaction log. """ try: if comment is None: return self.proxy.move(fromaccount, toaccount, amount, minconf) else: return self.proxy.move(fromaccount, toaccount, amount, minconf, comment) except JSONRPCException as e: raise _wrap_exception(e.error) def sendfrom(self, fromaccount, tolitecoinaddress, amount, minconf=1, comment=None, comment_to=None): """ Sends amount from account's balance to litecoinaddress. This method will fail if there is less than amount litecoins with minconf confirmations in the account's balance (unless account is the empty-string-named default account; it behaves like the sendtoaddress method). Returns transaction ID on success. Arguments: - *fromaccount* -- Account to send from. - *tolitecoinaddress* -- Litecoin address to send to. - *amount* -- Amount to send (float, rounded to the nearest 0.01). - *minconf* -- Minimum number of confirmations required for transferred balance. - *comment* -- Comment for transaction. - *comment_to* -- Comment for to-address. """ try: if comment is None: return self.proxy.sendfrom(fromaccount, tolitecoinaddress, amount, minconf) elif comment_to is None: return self.proxy.sendfrom(fromaccount, tolitecoinaddress, amount, minconf, comment) else: return self.proxy.sendfrom(fromaccount, tolitecoinaddress, amount, minconf, comment, comment_to) except JSONRPCException as e: raise _wrap_exception(e.error) def sendmany(self, fromaccount, todict, minconf=1, comment=None): """ Sends specified amounts from account's balance to litecoinaddresses. This method will fail if there is less than total amount litecoins with minconf confirmations in the account's balance (unless account is the empty-string-named default account; Returns transaction ID on success. Arguments: - *fromaccount* -- Account to send from. - *todict* -- Dictionary with Litecoin addresses as keys and amounts as values. - *minconf* -- Minimum number of confirmations required for transferred balance. - *comment* -- Comment for transaction. """ try: if comment is None: return self.proxy.sendmany(fromaccount, todict, minconf) else: return self.proxy.sendmany(fromaccount, todict, minconf, comment) except JSONRPCException as e: raise _wrap_exception(e.error) def verifymessage(self, litecoinaddress, signature, message): """ Verifies a signature given the litecoinaddress used to sign, the signature itself, and the message that was signed. Returns :const:`True` if the signature is valid, and :const:`False` if it is invalid. Arguments: - *litecoinaddress* -- the litecoinaddress used to sign the message - *signature* -- the signature to be verified - *message* -- the message that was originally signed """ try: return self.proxy.verifymessage(litecoinaddress, signature, message) except JSONRPCException as e: raise _wrap_exception(e.error) def getwork(self, data=None): """ Get work for remote mining, or submit result. If data is specified, the server tries to solve the block using the provided data and returns :const:`True` if it was successful. If not, the function returns formatted hash data (:class:`~litecoinrpc.data.WorkItem`) to work on. Arguments: - *data* -- Result from remote mining. """ try: if data is None: # Only if no data provided, it returns a WorkItem return WorkItem(**self.proxy.getwork()) else: return self.proxy.getwork(data) except JSONRPCException as e: raise _wrap_exception(e.error) def listunspent(self, minconf=1, maxconf=999999): """ Returns a list of unspent transaction inputs in the wallet. Arguments: - *minconf* -- Minimum number of confirmations required to be listed. - *maxconf* -- Maximal number of confirmations allowed to be listed. """ try: return [TransactionInfo(**tx) for tx in self.proxy.listunspent(minconf, maxconf)] except JSONRPCException as e: raise _wrap_exception(e.error) def keypoolrefill(self): "Fills the keypool, requires wallet passphrase to be set." try: self.proxy.keypoolrefill() except JSONRPCException as e: raise _wrap_exception(e.error) def walletpassphrase(self, passphrase, timeout, dont_raise=False): """ Stores the wallet decryption key in memory for <timeout> seconds. - *passphrase* -- The wallet passphrase. - *timeout* -- Time in seconds to keep the wallet unlocked (by keeping the passphrase in memory). - *dont_raise* -- instead of raising `~litecoinrpc.exceptions.WalletPassphraseIncorrect` return False. """ try: self.proxy.walletpassphrase(passphrase, timeout) return True except JSONRPCException as e: json_exception = _wrap_exception(e.error) if dont_raise: if isinstance(json_exception, WalletPassphraseIncorrect): return False elif isinstance(json_exception, WalletAlreadyUnlocked): return True raise json_exception def walletlock(self): """ Removes the wallet encryption key from memory, locking the wallet. After calling this method, you will need to call walletpassphrase again before being able to call any methods which require the wallet to be unlocked. """ try: return self.proxy.walletlock() except JSONRPCException as e: raise _wrap_exception(e.error) def walletpassphrasechange(self, oldpassphrase, newpassphrase, dont_raise=False): """ Changes the wallet passphrase from <oldpassphrase> to <newpassphrase>. Arguments: - *dont_raise* -- instead of raising `~litecoinrpc.exceptions.WalletPassphraseIncorrect` return False. """ try: self.proxy.walletpassphrasechange(oldpassphrase, newpassphrase) return True except JSONRPCException as e: json_exception = _wrap_exception(e.error) if dont_raise and isinstance(json_exception, WalletPassphraseIncorrect): return False raise json_exception
unknown
codeparrot/codeparrot-clean
from keras.layers import Dense, Dropout, LSTM, Activation from keras.layers import merge, Input, Embedding from keras.models import Model from keras.regularizers import l2 from keras.constraints import maxnorm from keras.utils import plot_model from keras.optimizers import RMSprop, Nadam, Adagrad, Adam, Adamax from keras.initializers import glorot_uniform, glorot_normal # Model 1 - Baseline -- 56.68% Val Acc -- 60.07% Test-dev Acc def text_model(embedding_matrix, num_tokens, embedding_dim, text_input, dropout_rate, regularization_rate): print("Creating text model...") model = Embedding(num_tokens, embedding_dim, weights=[embedding_matrix], trainable=False)(text_input) model = Activation('tanh')(model) model = LSTM(units=1024, return_sequences=True, U_regularizer=l2(0.1), kernel_initializer=glorot_normal())(model) model = Dropout(dropout_rate)(model) model = LSTM(units=1024, U_regularizer=l2(0.1), kernel_initializer=glorot_normal())(model) model = Dropout(dropout_rate)(model) model = Dense(1024, activation='tanh', W_constraint=maxnorm(3), kernel_initializer=glorot_normal(), kernel_regularizer=l2(regularization_rate))(model) return model def img_model(img_input, regularization_rate): print("Creating image model...") model = Dense(1024, activation='tanh', W_constraint=maxnorm(3), kernel_initializer=glorot_normal(), kernel_regularizer=l2(regularization_rate))(img_input) return model def baseline(embedding_matrix, num_tokens, embedding_dim, dropout_rate, regularization_rate, num_classes): img_input = Input(shape=(2048,)) text_input = Input(shape=(None,)) vgg_model = img_model(img_input, regularization_rate) lstm_model = text_model(embedding_matrix, num_tokens, embedding_dim, text_input, dropout_rate, regularization_rate) print("Merging final model...") fc_model = merge([vgg_model, lstm_model], mode='mul') fc_model = Dropout(dropout_rate)(fc_model) fc_model = Dense(1000, activation='tanh', W_constraint=maxnorm(3), kernel_initializer=glorot_normal(), kernel_regularizer=l2(regularization_rate))(fc_model) fc_model = Dropout(dropout_rate)(fc_model) fc_model = Dense(num_classes, activation='softmax', W_constraint=maxnorm(3), kernel_initializer=glorot_normal(), kernel_regularizer=l2(regularization_rate))(fc_model) model = Model(inputs=[img_input, text_input], outputs=fc_model) opt = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) print (model.summary()) plot_model(model, to_file='model_plots/model_baseline.png') return model
unknown
codeparrot/codeparrot-clean
/*! * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import { VStack } from "@chakra-ui/react"; import { useSearchParams, useParams } from "react-router-dom"; import { FilterBar, type FilterValue } from "src/components/FilterBar"; import { SearchParamsKeys, type SearchParamsKeysType } from "src/constants/searchParams"; import { useFiltersHandler, type FilterableSearchParamsKeys } from "src/utils"; const { ASSET_EVENT_DATE_RANGE: ASSET_EVENT_DATE_RANGE_PARAM, DAG_ID_PATTERN: DAG_ID_PATTERN_PARAM, DAG_VERSION: DAG_VERSION_PARAM, DURATION_GTE: DURATION_GTE_PARAM, DURATION_LTE: DURATION_LTE_PARAM, LOGICAL_DATE_RANGE: LOGICAL_DATE_RANGE_PARAM, MAP_INDEX: MAP_INDEX_PARAM, NAME_PATTERN: NAME_PATTERN_PARAM, OPERATOR_NAME_PATTERN: OPERATOR_NAME_PATTERN_PARAM, POOL_NAME_PATTERN: POOL_NAME_PATTERN_PARAM, QUEUE_NAME_PATTERN: QUEUE_NAME_PATTERN_PARAM, RUN_ID_PATTERN: RUN_ID_PATTERN_PARAM, TASK_STATE: STATE_PARAM, TRY_NUMBER: TRY_NUMBER_PARAM, }: SearchParamsKeysType = SearchParamsKeys; export const TaskInstancesFilter = () => { const { dagId, runId } = useParams(); const paramKeys: Array<FilterableSearchParamsKeys> = [ NAME_PATTERN_PARAM as FilterableSearchParamsKeys, LOGICAL_DATE_RANGE_PARAM as FilterableSearchParamsKeys, ASSET_EVENT_DATE_RANGE_PARAM as FilterableSearchParamsKeys, DURATION_GTE_PARAM as FilterableSearchParamsKeys, DURATION_LTE_PARAM as FilterableSearchParamsKeys, TRY_NUMBER_PARAM as FilterableSearchParamsKeys, MAP_INDEX_PARAM as FilterableSearchParamsKeys, DAG_VERSION_PARAM as FilterableSearchParamsKeys, OPERATOR_NAME_PATTERN_PARAM as FilterableSearchParamsKeys, POOL_NAME_PATTERN_PARAM as FilterableSearchParamsKeys, QUEUE_NAME_PATTERN_PARAM as FilterableSearchParamsKeys, STATE_PARAM as FilterableSearchParamsKeys, ]; if (runId === undefined) { paramKeys.unshift(RUN_ID_PATTERN_PARAM as FilterableSearchParamsKeys); } if (dagId === undefined) { paramKeys.unshift(DAG_ID_PATTERN_PARAM as FilterableSearchParamsKeys); } const [searchParams] = useSearchParams(); const { filterConfigs, handleFiltersChange } = useFiltersHandler(paramKeys); const initialValues: Record<string, FilterValue> = {}; filterConfigs.forEach((config) => { const value = searchParams.get(config.key); if (value !== null && value !== "") { if (config.type === "number") { const parsedValue = Number(value); initialValues[config.key] = isNaN(parsedValue) ? value : parsedValue; } else { initialValues[config.key] = value; } } }); return ( <VStack align="start" justifyContent="space-between"> <VStack alignItems="flex-start" gap={1}> <FilterBar configs={filterConfigs} initialValues={initialValues} onFiltersChange={handleFiltersChange} /> </VStack> </VStack> ); };
typescript
github
https://github.com/apache/airflow
airflow-core/src/airflow/ui/src/pages/TaskInstances/TaskInstancesFilter.tsx
"""Provides device automations for Alarm control panel.""" from typing import List, Optional import voluptuous as vol from homeassistant.const import ( ATTR_CODE, ATTR_ENTITY_ID, ATTR_SUPPORTED_FEATURES, CONF_CODE, CONF_DEVICE_ID, CONF_DOMAIN, CONF_ENTITY_ID, CONF_TYPE, SERVICE_ALARM_ARM_AWAY, SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_ARM_NIGHT, SERVICE_ALARM_DISARM, SERVICE_ALARM_TRIGGER, ) from homeassistant.core import Context, HomeAssistant from homeassistant.helpers import entity_registry import homeassistant.helpers.config_validation as cv from . import ATTR_CODE_ARM_REQUIRED, DOMAIN from .const import ( SUPPORT_ALARM_ARM_AWAY, SUPPORT_ALARM_ARM_HOME, SUPPORT_ALARM_ARM_NIGHT, SUPPORT_ALARM_TRIGGER, ) ACTION_TYPES = {"arm_away", "arm_home", "arm_night", "disarm", "trigger"} ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend( { vol.Required(CONF_TYPE): vol.In(ACTION_TYPES), vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN), vol.Optional(CONF_CODE): cv.string, } ) async def async_get_actions(hass: HomeAssistant, device_id: str) -> List[dict]: """List device actions for Alarm control panel devices.""" registry = await entity_registry.async_get_registry(hass) actions = [] # Get all the integrations entities for this device for entry in entity_registry.async_entries_for_device(registry, device_id): if entry.domain != DOMAIN: continue state = hass.states.get(entry.entity_id) # We need a state or else we can't populate the HVAC and preset modes. if state is None: continue supported_features = state.attributes[ATTR_SUPPORTED_FEATURES] # Add actions for each entity that belongs to this integration if supported_features & SUPPORT_ALARM_ARM_AWAY: actions.append( { CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "arm_away", } ) if supported_features & SUPPORT_ALARM_ARM_HOME: actions.append( { CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "arm_home", } ) if supported_features & SUPPORT_ALARM_ARM_NIGHT: actions.append( { CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "arm_night", } ) actions.append( { CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "disarm", } ) if supported_features & SUPPORT_ALARM_TRIGGER: actions.append( { CONF_DEVICE_ID: device_id, CONF_DOMAIN: DOMAIN, CONF_ENTITY_ID: entry.entity_id, CONF_TYPE: "trigger", } ) return actions async def async_call_action_from_config( hass: HomeAssistant, config: dict, variables: dict, context: Optional[Context] ) -> None: """Execute a device action.""" config = ACTION_SCHEMA(config) service_data = {ATTR_ENTITY_ID: config[CONF_ENTITY_ID]} if CONF_CODE in config: service_data[ATTR_CODE] = config[CONF_CODE] if config[CONF_TYPE] == "arm_away": service = SERVICE_ALARM_ARM_AWAY elif config[CONF_TYPE] == "arm_home": service = SERVICE_ALARM_ARM_HOME elif config[CONF_TYPE] == "arm_night": service = SERVICE_ALARM_ARM_NIGHT elif config[CONF_TYPE] == "disarm": service = SERVICE_ALARM_DISARM elif config[CONF_TYPE] == "trigger": service = SERVICE_ALARM_TRIGGER await hass.services.async_call( DOMAIN, service, service_data, blocking=True, context=context ) async def async_get_action_capabilities(hass, config): """List action capabilities.""" state = hass.states.get(config[CONF_ENTITY_ID]) code_required = state.attributes.get(ATTR_CODE_ARM_REQUIRED) if state else False if config[CONF_TYPE] == "trigger" or ( config[CONF_TYPE] != "disarm" and not code_required ): return {} return {"extra_fields": vol.Schema({vol.Optional(CONF_CODE): str})}
unknown
codeparrot/codeparrot-clean
//===--- EscapeUtils.swift ------------------------------------------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // // This file provides utilities for transitively visiting all uses of a value. // The most common use case is to check if a value "escapes" to some destination // (e.g. an instruction) or if it "escapes" the current function at all. // // The APIs on `Value` and/or `ProjectedValue` are // * `isEscaping(using:)` // * `isEscapingWhenWalkingDown(using:)` // * `visit(using:)` // * `visitByWalkingDown(using:)` // // where a `EscapeVisitor` can be passed to the `using` argument to configure // the visit. // // The term "escaping" means that the "bit pattern" of the value is visible // at the destination. For example, in stack promotion we check if a reference to // an allocated object can escape it's function, i.e. if the bit pattern of the // reference can be visible outside it's function. // But it's also possible to check the "escapeness" of trivial values, e.g. an // `Int`. An `Int` escapes if its bit pattern is visible at the destination. // Though, by default trivial values are ignored. This can be configured with // `EscapeVisitor.followTrivialTypes`. // // By default, there is no distinction between addresses and value-type values. // Even if the value in question has an address type, it's considered escaping // if the stored value is escaping. // This can be configured with `EscapeVisitor.followLoads`. // // The visit algorithm works by starting a walk at the value and alternately // walking in two directions: // * Starting at root definitions, like allocations: walks down from defs to uses // ("Where does the value go to?") // * Starting at stores, walks up from uses to defs // ("Were does the value come from?") // // The value "escapes" if the walk reaches a point where the further flow of the value // cannot be tracked anymore. // Example: // \code // %1 = alloc_ref $X // 1. initial value: walk down to the `store` // %2 = alloc_stack $X // 3. walk down to %3 // store %1 to %2 // 2. walk up to `%2` // %3 = load %2 // 4. continue walking down to the `return` // return %3 // 5. The value is escaping! // \endcode // // The traversal stops at points where the current path doesn't match the original projection. // For example, let's assume this function is called on a projected value with path `s0.c1`. // \code // %value : $Struct<X> // current path == s0.c1, the initial value // %ref = struct_extract %value, #field0 // current path == c1 // %addr = ref_element_addr %ref, #field2 // mismatch: `c1` != `c2` -> ignored // \endcode // //===----------------------------------------------------------------------===// import SIL extension ProjectedValue { /// Returns true if the projected value escapes. /// /// The provided `visitor` can be used to override the handling a certain defs and uses during /// the walk. See `EscapeVisitor` for details. /// func isEscaping( using visitor: some EscapeVisitor = DefaultVisitor(), initialWalkingDirection: EscapeUtilityTypes.WalkingDirection = .up, complexityBudget: Int = Int.max, _ context: some Context ) -> Bool { var walker = EscapeWalker(visitor: visitor, complexityBudget: complexityBudget, context) let result: WalkResult switch initialWalkingDirection { case .up: result = walker.walkUp(addressOrValue: value, path: path.escapePath) case .down: result = walker.walkDown(addressOrValue: value, path: path.escapePath) } return result == .abortWalk } /// Returns the result of the visitor if the projected value does not escape. /// /// This function is similar to `isEscaping() -> Bool`, but instead of returning a Bool, /// it returns the `result` of the `visitor`, if the projected value does not escape. /// Returns nil, if the projected value escapes. /// func visit<V: EscapeVisitorWithResult>( using visitor: V, initialWalkingDirection: EscapeUtilityTypes.WalkingDirection = .up, complexityBudget: Int = Int.max, _ context: some Context ) -> V.Result? { var walker = EscapeWalker(visitor: visitor, complexityBudget: complexityBudget, context) let result: WalkResult switch initialWalkingDirection { case .up: result = walker.walkUp(addressOrValue: value, path: path.escapePath) case .down: result = walker.walkDown(addressOrValue: value, path: path.escapePath) } if result == .abortWalk { walker.visitor.cleanupOnAbort() return nil } return walker.visitor.result } } extension Value { /// The un-projected version of `ProjectedValue.isEscaping()`. func isEscaping( using visitor: some EscapeVisitor = DefaultVisitor(), initialWalkingDirection: EscapeUtilityTypes.WalkingDirection = .up, _ context: some Context ) -> Bool { return self.at(SmallProjectionPath()).isEscaping(using: visitor, initialWalkingDirection: initialWalkingDirection, context) } /// The un-projected version of `ProjectedValue.visit()`. func visit<V: EscapeVisitorWithResult>( using visitor: V, initialWalkingDirection: EscapeUtilityTypes.WalkingDirection = .up, _ context: some Context ) -> V.Result? { return self.at(SmallProjectionPath()).visit(using: visitor, initialWalkingDirection: initialWalkingDirection, context) } } /// This protocol is used to customize `ProjectedValue.isEscaping` (and similar functions) /// by implementing `visitUse` and `visitDef` which are called for all uses and definitions /// encountered during a walk. protocol EscapeVisitor { typealias UseResult = EscapeUtilityTypes.UseVisitResult typealias DefResult = EscapeUtilityTypes.DefVisitResult typealias EscapePath = EscapeUtilityTypes.EscapePath /// Called during the DefUse walk for each use mutating func visitUse(operand: Operand, path: EscapePath) -> UseResult /// Called during the UseDef walk for each definition mutating func visitDef(def: Value, path: EscapePath) -> DefResult /// If true, the traversals follow values with trivial types. var followTrivialTypes: Bool { get } /// If true, the traversal follows loaded values. var followLoads: Bool { get } } extension EscapeVisitor { mutating func visitUse(operand: Operand, path: EscapePath) -> UseResult { return .continueWalk } mutating func visitDef(def: Value, path: EscapePath) -> DefResult { return .continueWalkUp } var followTrivialTypes: Bool { false } var followLoads: Bool { true } } /// A visitor which returns a `result`. protocol EscapeVisitorWithResult : EscapeVisitor { associatedtype Result var result: Result { get } mutating func cleanupOnAbort() } extension EscapeVisitorWithResult { mutating func cleanupOnAbort() {} } // FIXME: This ought to be marked private, but that triggers a compiler bug // in debug builds (rdar://117413192) struct DefaultVisitor : EscapeVisitor {} struct EscapeUtilityTypes { enum WalkingDirection { case up case down } /// The EscapePath is updated and maintained during the up-walk and down-walk. /// /// It's passed to the EscapeVisitor's `visitUse` and `visitDef`. struct EscapePath: SmallProjectionWalkingPath { /// During the walk, a projection path indicates where the initial value is /// contained in an aggregate. /// Example for a walk-down: /// \code /// %1 = alloc_ref // 1. initial value, path = empty /// %2 = struct $S (%1) // 2. path = s0 /// %3 = tuple (%other, %1) // 3. path = t1.s0 /// %4 = tuple_extract %3, 1 // 4. path = s0 /// %5 = struct_extract %4, #field // 5. path = empty /// \endcode /// let projectionPath: SmallProjectionPath /// This flag indicates if stored values should be included in the walk. /// If the initial value is stored to some memory allocation, we usually don't /// care if other values are stored to that location as well. Example: /// \code /// %1 = alloc_ref $X // 1. initial value, walk down to the `store` /// %2 = alloc_stack $X // 3. walk down to the second `store` /// store %1 to %2 // 2. walk up to %2 /// store %other to %2 // 4. ignore (followStores == false): %other doesn't impact the "escapeness" of %1 /// \endcode /// /// But once the up-walk sees a load, it has to follow stores from that point on. /// Example: /// \code /// bb0(%function_arg): // 7. escaping! %1 escapes through %function_arg /// %1 = alloc_ref $X // 1. initial value, walk down to the second `store` /// %addr = alloc_stack %X // 5. walk down to the first `store` /// store %function_arg to %addr // 6. walk up to %function_arg (followStores == true) /// %2 = load %addr // 4. walk up to %addr, followStores = true /// %3 = ref_element_addr %2, #f // 3. walk up to %2 /// store %1 to %3 // 2. walk up to %3 /// \endcode /// let followStores: Bool /// Set to true if an address is stored. /// This unusual situation can happen if an address is converted to a raw pointer and that pointer /// is stored to a memory location. /// In this case the walkers need to follow load instructions even if the visitor and current projection /// path don't say so. let addressIsStored: Bool /// Not nil, if the exact type of the current value is know. /// /// This is used for destructor analysis. /// Example: /// \code /// %1 = alloc_ref $Derived // 1. initial value, knownType = $Derived /// %2 = upcast %1 to $Base // 2. knownType = $Derived /// destroy_value %2 : $Base // 3. We know that the destructor of $Derived is called here /// \endcode let knownType: Type? func with(projectionPath: SmallProjectionPath) -> Self { return Self(projectionPath: projectionPath, followStores: self.followStores, addressIsStored: self.addressIsStored, knownType: self.knownType) } func with(followStores: Bool) -> Self { return Self(projectionPath: self.projectionPath, followStores: followStores, addressIsStored: self.addressIsStored, knownType: self.knownType) } func with(addressStored: Bool) -> Self { return Self(projectionPath: self.projectionPath, followStores: self.followStores, addressIsStored: addressStored, knownType: self.knownType) } func with(knownType: Type?) -> Self { return Self(projectionPath: self.projectionPath, followStores: self.followStores, addressIsStored: self.addressIsStored, knownType: knownType) } func merge(with other: EscapePath) -> EscapePath { let mergedPath = self.projectionPath.merge(with: other.projectionPath) let mergedFollowStores = self.followStores || other.followStores let mergedAddrStored = self.addressIsStored || other.addressIsStored let mergedKnownType: Type? if let ty = self.knownType { if let otherTy = other.knownType, ty != otherTy { mergedKnownType = nil } else { mergedKnownType = ty } } else { mergedKnownType = other.knownType } return EscapePath(projectionPath: mergedPath, followStores: mergedFollowStores, addressIsStored: mergedAddrStored, knownType: mergedKnownType) } } enum DefVisitResult { case ignore case continueWalkUp case walkDown case abort } enum UseVisitResult { case ignore case continueWalk case abort } } /// EscapeWalker is both a DefUse walker and UseDef walker. It implements both, the up-, and down-walk. fileprivate struct EscapeWalker<V: EscapeVisitor> : ValueDefUseWalker, AddressDefUseWalker, ValueUseDefWalker, AddressUseDefWalker { typealias Path = EscapeUtilityTypes.EscapePath init(visitor: V, complexityBudget: Int = Int.max, _ context: some Context) { self.calleeAnalysis = context.calleeAnalysis self.visitor = visitor self.complexityBudget = complexityBudget } //===--------------------------------------------------------------------===// // Walking down //===--------------------------------------------------------------------===// mutating func walkDown(addressOrValue: Value, path: Path) -> WalkResult { if addressOrValue.type.isAddress { return walkDownUses(ofAddress: addressOrValue, path: path) } else { return walkDownUses(ofValue: addressOrValue, path: path) } } mutating func cachedWalkDown(addressOrValue: Value, path: Path) -> WalkResult { if let path = walkDownCache.needWalk(for: addressOrValue, path: path) { return walkDown(addressOrValue: addressOrValue, path: path) } else { return .continueWalk } } mutating func walkDown(value: Operand, path: Path) -> WalkResult { if complexityBudgetExceeded(value.value) { return .abortWalk } if hasRelevantType(value.value, at: path.projectionPath) { switch visitor.visitUse(operand: value, path: path) { case .continueWalk: return walkDownDefault(value: value, path: path) case .ignore: return .continueWalk case .abort: return .abortWalk } } return .continueWalk } /// ``ValueDefUseWalker`` conformance: called when the value def-use walk can't continue, /// i.e. when the result of the use is not a value. mutating func leafUse(value operand: Operand, path: Path) -> WalkResult { let instruction = operand.instruction switch instruction { case let rta as RefTailAddrInst: if let path = pop(.tailElements, from: path, yielding: rta) { return walkDownUses(ofAddress: rta, path: path.with(knownType: nil)) } case let rea as RefElementAddrInst: if let path = pop(.classField, index: rea.fieldIndex, from: path, yielding: rea) { return walkDownUses(ofAddress: rea, path: path.with(knownType: nil)) } case let pb as ProjectBoxInst: if let path = pop(.classField, index: pb.fieldIndex, from: path, yielding: pb) { return walkDownUses(ofAddress: pb, path: path.with(knownType: nil)) } case is StoreInst, is StoreWeakInst, is StoreUnownedInst: let store = instruction as! StoringInstruction assert(operand == store.sourceOperand) if !followLoads(at: path) { return walkUp(address: store.destination, path: path.with(addressStored: true)) } return walkUp(address: store.destination, path: path) case is DestroyValueInst, is ReleaseValueInst, is StrongReleaseInst: if handleDestroy(of: operand.value, path: path) == .abortWalk { return .abortWalk } case is ReturnInstruction: return isEscaping case is ApplyInst, is TryApplyInst, is BeginApplyInst: return walkDownCallee(argOp: operand, apply: instruction as! FullApplySite, path: path) case let pai as PartialApplyInst: // Check whether the partially applied argument can escape in the body. if walkDownCallee(argOp: operand, apply: pai, path: path.with(knownType: nil)) == .abortWalk { return .abortWalk } // Additionally we need to follow the partial_apply value for two reasons: // 1. the closure (with the captured values) itself can escape // and the use "transitively" escapes // 2. something can escape in a destructor when the context is destroyed return walkDownUses(ofValue: pai, path: path.with(knownType: nil)) case let pta as PointerToAddressInst: return walkDownUses(ofAddress: pta, path: path.with(knownType: nil)) case let cv as ConvertFunctionInst: return walkDownUses(ofValue: cv, path: path.with(knownType: nil)) case let bi as BuiltinInst: switch bi.id { case .DestroyArray: // If it's not the array base pointer operand -> bail. Though, that shouldn't happen // because the other operands (metatype, count) shouldn't be visited anyway. if operand.index != 1 { return isEscaping } // Class references, which are directly located in the array elements cannot escape, // because those are passed as `self` to their deinits - and `self` cannot escape in a deinit. if !path.projectionPath.mayHaveClassProjection { return .continueWalk } return isEscaping case .AtomicLoad: // Treat atomic loads as regular loads and just walk down their uses. if !followLoads(at: path) { return .continueWalk } // Even when analyzing atomics, a loaded trivial value can be ignored. if hasRelevantType(bi, at: path.projectionPath) { return .continueWalk } return walkDownUses(ofValue: bi, path: path.with(knownType: nil)) case .AtomicStore, .AtomicRMW: // If we shouldn't follow the store, then we can keep walking. if !path.followStores { return .continueWalk } // Be conservative and just say the store is escaping. return isEscaping case .CmpXChg: // If we have to follow loads or stores of a cmpxchg, then just bail. if followLoads(at: path) || path.followStores { return isEscaping } return .continueWalk case .Fence: // Fences do not affect escape analysis. return .continueWalk default: return isEscaping } case is StrongRetainInst, is RetainValueInst, is DebugValueInst, is ValueMetatypeInst, is InitExistentialMetatypeInst, is OpenExistentialMetatypeInst, is ExistentialMetatypeInst, is DeallocRefInst, is FixLifetimeInst, is ClassifyBridgeObjectInst, is BridgeObjectToWordInst, is EndBorrowInst, is StrongRetainInst, is RetainValueInst, is ClassMethodInst, is SuperMethodInst, is ObjCMethodInst, is ObjCSuperMethodInst, is WitnessMethodInst, is DeallocStackRefInst: return .continueWalk case is DeallocStackInst: // dealloc_stack %f : $@noescape @callee_guaranteed () -> () // type is a value assert(operand.value.definingInstruction is PartialApplyInst) return .continueWalk default: return isEscaping } return .continueWalk } mutating func walkDown(address: Operand, path: Path) -> WalkResult { if complexityBudgetExceeded(address.value) { return .abortWalk } if hasRelevantType(address.value, at: path.projectionPath) { switch visitor.visitUse(operand: address, path: path) { case .continueWalk: return walkDownDefault(address: address, path: path) case .ignore: return .continueWalk case .abort: return .abortWalk } } return .continueWalk } /// ``AddressDefUseWalker`` conformance: called when the address def-use walk can't continue, /// i.e. when the result of the use is not an address. mutating func leafUse(address operand: Operand, path: Path) -> WalkResult { let instruction = operand.instruction switch instruction { case is StoreInst, is StoreWeakInst, is StoreUnownedInst: let store = instruction as! StoringInstruction assert(operand == store.destinationOperand) if let si = store as? StoreInst, si.storeOwnership == .assign { if handleDestroy(of: operand.value, path: path.with(knownType: nil)) == .abortWalk { return .abortWalk } } if path.followStores { return walkUp(value: store.source, path: path) } case let storeBorrow as StoreBorrowInst: assert(operand == storeBorrow.destinationOperand) return walkDownUses(ofAddress: storeBorrow, path: path) case let copyAddr as CopyAddrInst: if !followLoads(at: path) { return .continueWalk } if operand == copyAddr.sourceOperand { return walkUp(address: copyAddr.destination, path: path) } else { if !copyAddr.isInitializationOfDestination { if handleDestroy(of: operand.value, path: path.with(knownType: nil)) == .abortWalk { return .abortWalk } } if path.followStores { assert(operand == copyAddr.destinationOperand) return walkUp(value: copyAddr.source, path: path) } } case is DestroyAddrInst: if handleDestroy(of: operand.value, path: path) == .abortWalk { return .abortWalk } case is ReturnInstruction: return isEscaping case is ApplyInst, is TryApplyInst, is BeginApplyInst: return walkDownCallee(argOp: operand, apply: instruction as! FullApplySite, path: path) case let pai as PartialApplyInst: if walkDownCallee(argOp: operand, apply: pai, path: path.with(knownType: nil)) == .abortWalk { return .abortWalk } // We need to follow the partial_apply value for two reasons: // 1. the closure (with the captured values) itself can escape // 2. something can escape in a destructor when the context is destroyed if followLoads(at: path) || pai.capturesAddress(of: operand) { return walkDownUses(ofValue: pai, path: path.with(knownType: nil)) } case is LoadInst, is LoadWeakInst, is LoadUnownedInst, is LoadBorrowInst: if !followLoads(at: path) { return .continueWalk } let svi = instruction as! SingleValueInstruction // Even when analyzing addresses, a loaded trivial value can be ignored. if svi.hasTrivialNonPointerType { return .continueWalk } return walkDownUses(ofValue: svi, path: path.with(knownType: nil)) case let atp as AddressToPointerInst: return walkDownUses(ofValue: atp, path: path.with(knownType: nil)) case is DeallocStackInst, is InjectEnumAddrInst, is FixLifetimeInst, is EndBorrowInst, is EndAccessInst, is IsUniqueInst, is DebugValueInst: return .continueWalk case let uac as UncheckedAddrCastInst: if uac.type != uac.fromAddress.type { // It's dangerous to continue walking over an `unchecked_addr_cast` which casts between two different types. // We can only do this if the result is known to be the end of the walk, i.e. the cast result is not used // in a relevant way. for uacUse in uac.uses { // Following instructions turned out to appear in code coming from the stdlib. switch uacUse.instruction { case is IsUniqueInst: break case is LoadInst, is LoadBorrowInst, is ApplyInst, is TryApplyInst: if followLoads(at: path) { return .abortWalk } default: return .abortWalk } } } return walkDownUses(ofAddress: uac, path: path) default: return isEscaping } return .continueWalk } /// Check whether the value escapes through the deinitializer private func handleDestroy(of value: Value, path: Path) -> WalkResult { // Even if this is a destroy_value of a struct/tuple/enum, the called destructor(s) only take a // single class reference as parameter. let p = path.projectionPath.popAllValueFields() if p.isEmpty { // The object to destroy (= the argument of the destructor) cannot escape itself. return .continueWalk } if !visitor.followLoads && p.matches(pattern: SmallProjectionPath(.anyValueFields).push(.anyClassField)) { // Any address of a class property of the object to destroy cannot escape the destructor. // (Whereas a value stored in such a property could escape.) return .continueWalk } if path.followStores { return isEscaping } if let exactTy = path.knownType { guard let destructor = calleeAnalysis.getDestructor(ofExactType: exactTy) else { return isEscaping } if destructor.effects.escapeEffects.canEscape(argumentIndex: 0, path: pathForArgumentEscapeChecking(p)) { return isEscaping } } else { // We don't know the exact type, so get all possible called destructure from // the callee analysis. guard let destructors = calleeAnalysis.getDestructors(of: value.type) else { return isEscaping } for destructor in destructors { if destructor.effects.escapeEffects.canEscape(argumentIndex: 0, path: pathForArgumentEscapeChecking(p)) { return isEscaping } } } return .continueWalk } /// Handle an apply (full or partial) during the walk-down. private mutating func walkDownCallee(argOp: Operand, apply: ApplySite, path: Path) -> WalkResult { guard let calleeArgIdx = apply.calleeArgumentIndex(of: argOp) else { // The callee or a type dependent operand of the apply does not let escape anything. return .continueWalk } // Indirect arguments cannot escape the function, but loaded values from such can. if argOp.value.type.isAddress && !followLoads(at: path) { if let beginApply = apply as? BeginApplyInst { // begin_apply can yield an address value. if !indirectResultEscapes(of: beginApply, path: path) { return .continueWalk } } else if !apply.isAddressable(operand: argOp) { // The result does not depend on the argument's address. return .continueWalk } } if argOp.value.type.isNoEscapeFunction { // Per definition a `partial_apply [on_stack]` cannot escape the callee. // Potential escapes of its captured values are already handled when visiting the `partial_apply`. return .continueWalk } // Argument effects do not consider any potential stores to the argument (or it's content). // Therefore, if we need to track stores, the argument effects do not correctly describe what we need. // For example, argument 0 in the following function is marked as not-escaping, although there // is a store to the argument: // // sil [escapes !%0.**] @callee(@inout X, @owned X) -> () { // bb0(%0 : $*X, %1 : $X): // store %1 to %0 : $*X // } if path.followStores { return isEscaping } guard let callees = calleeAnalysis.getCallees(callee: apply.callee) else { // The callees are not know, e.g. if the callee is a closure, class method, etc. return isEscaping } for callee in callees { let effects = callee.effects if !effects.escapeEffects.canEscape(argumentIndex: calleeArgIdx, path: pathForArgumentEscapeChecking(path.projectionPath)) { continue } if walkDownArgument(calleeArgIdx: calleeArgIdx, argPath: path, apply: apply, effects: effects) == .abortWalk { return .abortWalk } } return .continueWalk } private mutating func indirectResultEscapes(of beginApply: BeginApplyInst, path: Path) -> Bool { for result in beginApply.yieldedValues where result.type.isAddress { if walkDownUses(ofAddress: result, path: path) == .abortWalk { return true } } return false } /// Handle `.escaping` effects for an apply argument during the walk-down. private mutating func walkDownArgument(calleeArgIdx: Int, argPath: Path, apply: ApplySite, effects: FunctionEffects) -> WalkResult { var matched = false for effect in effects.escapeEffects.arguments { switch effect.kind { case .escapingToArgument(let toArgIdx, let toPath): // Note: exclusive argument -> argument effects cannot appear, so we don't need to handle them here. if effect.matches(calleeArgIdx, argPath.projectionPath) { guard let argOp = apply.operand(forCalleeArgumentIndex: toArgIdx) else { return isEscaping } // Continue at the destination of an arg-to-arg escape. let arg = argOp.value let p = Path(projectionPath: toPath, followStores: false, addressIsStored: argPath.addressIsStored, knownType: nil) if walkUp(addressOrValue: arg, path: p) == .abortWalk { return .abortWalk } matched = true } case .escapingToReturn(let toPath, let exclusive): if effect.matches(calleeArgIdx, argPath.projectionPath) { guard let fas = apply as? FullApplySite, let result = fas.singleDirectResult, result.type.isObject else { return isEscaping } let p = Path(projectionPath: toPath, followStores: false, addressIsStored: argPath.addressIsStored, knownType: exclusive ? argPath.knownType : nil) if walkDownUses(ofValue: result, path: p) == .abortWalk { return .abortWalk } matched = true } case .notEscaping: break } } if !matched { return isEscaping } return .continueWalk } //===--------------------------------------------------------------------===// // Walking up //===--------------------------------------------------------------------===// mutating func walkUp(addressOrValue: Value, path: Path) -> WalkResult { if addressOrValue.type.isAddress { return walkUp(address: addressOrValue, path: path) } else { return walkUp(value: addressOrValue, path: path) } } mutating func walkUp(value: Value, path: Path) -> WalkResult { if complexityBudgetExceeded(value) { return .abortWalk } if hasRelevantType(value, at: path.projectionPath) { switch visitor.visitDef(def: value, path: path) { case .continueWalkUp: return walkUpDefault(value: value, path: path) case .walkDown: return cachedWalkDown(addressOrValue: value, path: path.with(knownType: nil)) case .ignore: return .continueWalk case .abort: return .abortWalk } } return .continueWalk } /// ``ValueUseDefWalker`` conformance: called when the value use-def walk can't continue, /// i.e. when the operand (if any) of the instruction of a definition is not a value. mutating func rootDef(value def: Value, path: Path) -> WalkResult { switch def { case is AllocRefInst, is AllocRefDynamicInst: return cachedWalkDown(addressOrValue: def, path: path.with(knownType: def.type)) case is AllocBoxInst: return cachedWalkDown(addressOrValue: def, path: path.with(knownType: nil)) case let arg as Argument: guard let termResult = TerminatorResult(arg) else { return isEscaping } switch termResult.terminator { case let ta as TryApplyInst: if termResult.successor != ta.normalBlock { return isEscaping } return walkUpApplyResult(apply: ta, path: path.with(knownType: nil)) default: return isEscaping } case let ap as ApplyInst: return walkUpApplyResult(apply: ap, path: path.with(knownType: nil)) case is LoadInst, is LoadWeakInst, is LoadUnownedInst, is LoadBorrowInst: if !followLoads(at: path) { // When walking up we shouldn't end up at a load where followLoads is false, // because going from a (non-followLoads) address to a load always involves a class indirection. // There is one exception: loading a raw pointer, e.g. // %l = load %a : $Builtin.RawPointer // %a = pointer_to_address %l // the up-walk starts at %a return isEscaping } return walkUp(address: (def as! UnaryInstruction).operand.value, path: path.with(followStores: true).with(knownType: nil)) case let atp as AddressToPointerInst: return walkUp(address: atp.address, path: path.with(knownType: nil)) default: return isEscaping } } mutating func walkUp(address: Value, path: Path) -> WalkResult { if complexityBudgetExceeded(address) { return .abortWalk } if hasRelevantType(address, at: path.projectionPath) { switch visitor.visitDef(def: address, path: path) { case .continueWalkUp: return walkUpDefault(address: address, path: path) case .walkDown: return cachedWalkDown(addressOrValue: address, path: path) case .ignore: return .continueWalk case .abort: return .abortWalk } } return .continueWalk } /// ``AddressUseDefWalker`` conformance: called when the address use-def walk can't continue, /// i.e. when the operand (if any) of the instruction of a definition is not an address. mutating func rootDef(address def: Value, path: Path) -> WalkResult { switch def { case is AllocStackInst: return cachedWalkDown(addressOrValue: def, path: path.with(knownType: nil)) case let arg as FunctionArgument: if !followLoads(at: path) && arg.convention.isExclusiveIndirect && !path.followStores { return cachedWalkDown(addressOrValue: def, path: path.with(knownType: nil)) } else { return isEscaping } case is PointerToAddressInst: return walkUp(value: (def as! SingleValueInstruction).operands[0].value, path: path.with(knownType: nil)) case let rta as RefTailAddrInst: return walkUp(value: rta.instance, path: path.push(.tailElements, index: 0).with(knownType: nil)) case let rea as RefElementAddrInst: return walkUp(value: rea.instance, path: path.push(.classField, index: rea.fieldIndex).with(knownType: nil)) case let pb as ProjectBoxInst: return walkUp(value: pb.box, path: path.push(.classField, index: pb.fieldIndex).with(knownType: nil)) case let storeBorrow as StoreBorrowInst: return walkUp(address: storeBorrow.destination, path: path) default: return isEscaping } } /// Walks up from the return to the source argument if there is an "exclusive" /// escaping effect on an argument. private mutating func walkUpApplyResult(apply: FullApplySite, path: Path) -> WalkResult { guard let callees = calleeAnalysis.getCallees(callee: apply.callee) else { return .abortWalk } for callee in callees { var matched = false for effect in callee.effects.escapeEffects.arguments { switch effect.kind { case .escapingToReturn(let toPath, let exclusive): if exclusive && path.projectionPath.matches(pattern: toPath) { guard let argOp = apply.operand(forCalleeArgumentIndex: effect.argumentIndex) else { return .abortWalk } let arg = argOp.value let p = Path(projectionPath: effect.pathPattern, followStores: path.followStores, addressIsStored: path.addressIsStored, knownType: nil) if walkUp(addressOrValue: arg, path: p) == .abortWalk { return .abortWalk } matched = true } case .notEscaping, .escapingToArgument: break } } if !matched { return isEscaping } } return .continueWalk } //===--------------------------------------------------------------------===// // private state //===--------------------------------------------------------------------===// var visitor: V // The caches are not only useful for performance, but are need to avoid infinite // recursions of walkUp-walkDown cycles. var walkDownCache = WalkerCache<Path>() var walkUpCache = WalkerCache<Path>() // Only this number of up/and down walks are done until the walk aborts. // Used to avoid quadratic complexity in some scenarios. var complexityBudget: Int private let calleeAnalysis: CalleeAnalysis //===--------------------------------------------------------------------===// // private utility functions //===--------------------------------------------------------------------===// /// Tries to pop the given projection from path, if the projected `value` has a relevant type. private func pop(_ kind: Path.FieldKind, index: Int? = nil, from path: Path, yielding value: Value) -> Path? { if let newPath = path.popIfMatches(kind, index: index), hasRelevantType(value, at: newPath.projectionPath) { return newPath } return nil } private func hasRelevantType(_ value: Value, at path: SmallProjectionPath) -> Bool { if visitor.followTrivialTypes && // When part of a class field only need to follow non-trivial types !path.hasClassProjection { return true } if !value.hasTrivialNonPointerType { return true } return false } private func followLoads(at path: Path) -> Bool { return visitor.followLoads || // When part of a class field we have to follow loads. path.projectionPath.mayHaveClassProjection || path.addressIsStored } private func pathForArgumentEscapeChecking(_ path: SmallProjectionPath) -> SmallProjectionPath { if visitor.followLoads { return path } return path.popLastClassAndValuesFromTail() } private mutating func complexityBudgetExceeded(_ v: Value) -> Bool { if complexityBudget <= 0 { return true } complexityBudget = complexityBudget &- 1 return false } // Set a breakpoint here to debug when a value is escaping. private var isEscaping: WalkResult { .abortWalk } } private extension SmallProjectionPath { var escapePath: EscapeUtilityTypes.EscapePath { EscapeUtilityTypes.EscapePath(projectionPath: self, followStores: false, addressIsStored: false, knownType: nil) } } private extension PartialApplyInst { func capturesAddress(of operand: Operand) -> Bool { assert(operand.value.type.isAddress) guard let conv = convention(of: operand) else { fatalError("callee operand of partial_apply cannot have address type") } switch conv { case .indirectIn, .indirectInGuaranteed: // A partial_apply copies the values from indirect-in arguments, but does not capture the address. return false case .indirectInout, .indirectInoutAliasable, .packInout: return true case .directOwned, .directUnowned, .directGuaranteed, .packOwned, .packGuaranteed: fatalError("invalid convention for address operand") case .indirectOut, .packOut, .indirectInCXX: fatalError("invalid convention for partial_apply") } } } //===--------------------------------------------------------------------===// // Tests //===--------------------------------------------------------------------===// let escapeInfoTest = FunctionTest("escape_info") { function, arguments, context in print("Escape information for \(function.name):") struct Visitor : EscapeVisitorWithResult { var result: Set<String> = Set() mutating func visitUse(operand: Operand, path: EscapePath) -> UseResult { if operand.instruction is ReturnInstruction { result.insert("return[\(path.projectionPath)]") return .ignore } return .continueWalk } mutating func visitDef(def: Value, path: EscapePath) -> DefResult { guard let arg = def as? FunctionArgument else { return .continueWalkUp } result.insert("arg\(arg.index)[\(path.projectionPath)]") return .walkDown } } // Dump the EscapeInfo query results for all `alloc_ref` instructions in the function. // for inst in function.instructions { if let allocRef = inst as? AllocRefInst { let resultStr: String if let result = allocRef.visit(using: Visitor(), context) { if result.isEmpty { resultStr = " - " } else { resultStr = Array(result).sorted().joined(separator: ",") } } else { resultStr = "global" } print("\(resultStr): \(allocRef)") } } print("End function \(function.name)\n") } let addressEscapeInfoTest = FunctionTest("address_escape_info") { function, arguments, context in // Dumps the EscapeInfo query results for addresses escaping to function calls. // The `fix_lifetime` instruction is used as marker for addresses and values to query. print("Address escape information for \(function.name):") var valuesToCheck = [Value]() var applies = [Instruction]() for inst in function.instructions { switch inst { case let fli as FixLifetimeInst: valuesToCheck.append(fli.operand.value) case is FullApplySite: applies.append(inst) default: break } } struct Visitor : EscapeVisitor { let apply: Instruction mutating func visitUse(operand: Operand, path: EscapePath) -> UseResult { let user = operand.instruction if user == apply { return .abort } if user is ReturnInstruction { // Anything which is returned cannot escape to an instruction inside the function. return .ignore } return .continueWalk } var followTrivialTypes: Bool { true } var followLoads: Bool { false } } // test `isEscaping(addressesOf:)` for value in valuesToCheck { print("value:\(value)") for apply in applies { if value.allContainedAddresses.isEscaping(using: Visitor(apply: apply), context) { print(" ==> \(apply)") } else { print(" - \(apply)") } } } // test `canReferenceSameField` for each pair of `fix_lifetime`. if !valuesToCheck.isEmpty { for lhsIdx in 0..<(valuesToCheck.count - 1) { for rhsIdx in (lhsIdx + 1) ..< valuesToCheck.count { print("pair \(lhsIdx) - \(rhsIdx)") let lhs = valuesToCheck[lhsIdx] let rhs = valuesToCheck[rhsIdx] print(lhs) print(rhs) let projLhs = lhs.allContainedAddresses let projRhs = rhs.allContainedAddresses let mayAlias = projLhs.canAddressAlias(with: projRhs, context) if mayAlias != projRhs.canAddressAlias(with: projLhs, context) { fatalError("canAddressAlias(with:) must be symmetric") } let addrReachable: Bool if lhs.type.isAddress && !rhs.type.isAddress { let anythingReachableFromRhs = rhs.at(SmallProjectionPath(.anything)) addrReachable = projLhs.canAddressAlias(with: anythingReachableFromRhs, context) if mayAlias && !addrReachable { fatalError("mayAlias implies addrReachable") } } else { addrReachable = false } if mayAlias { print("may alias") } else if addrReachable { print("address reachable but no alias") } else { print("no alias") } } } } print("End function \(function.name)\n") }
swift
github
https://github.com/apple/swift
SwiftCompilerSources/Sources/Optimizer/Utilities/EscapeUtils.swift
# PAPAS Access Point Administration System # Copyright (c) 2010 Revolution Linux inc. <info@revolutionlinux.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ This module is intended to provide a simple interface for child processes, without having to know what the implementation is. """ import subprocess import fcntl import os def set_blocking(fileobj, block=True): fd = fileobj.fileno() flags = fcntl.fcntl(fd, fcntl.F_GETFL) if block: fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) else: fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) class Child(object): def __init__(self, command): #Open a subprocess self._child = subprocess.Popen(command, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, universal_newlines=True, ) set_blocking(self._child.stdout, False) def send(self, data): self._child.stdin.write(data) def send_line(self, line): self.send(line + "\n") def read(self, max_bytes=-1): try: return self._child.stdout.read(max_bytes) except IOError as e: if e.errno == 11: #Resouce temporarily unavailable -> no data to read return "" else: raise def get_echo(self): try: return bool(termios.tcgetattr(self._child.stdin.fileno())[3] & termios.ECHO) except Exception: return None
unknown
codeparrot/codeparrot-clean
/* * Copyright (C) 2008 The Guava Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.common.collect.testing.testers; import static com.google.common.collect.testing.features.CollectionSize.SEVERAL; import static com.google.common.collect.testing.features.CollectionSize.ZERO; import static com.google.common.collect.testing.features.MapFeature.ALLOWS_NULL_KEYS; import static com.google.common.collect.testing.features.MapFeature.ALLOWS_NULL_KEY_QUERIES; import static com.google.common.collect.testing.features.MapFeature.FAILS_FAST_ON_CONCURRENT_MODIFICATION; import static com.google.common.collect.testing.features.MapFeature.SUPPORTS_REMOVE; import static com.google.common.collect.testing.testers.ReflectionFreeAssertThrows.assertThrows; import com.google.common.annotations.GwtCompatible; import com.google.common.collect.testing.AbstractMapTester; import com.google.common.collect.testing.WrongType; import com.google.common.collect.testing.features.CollectionSize; import com.google.common.collect.testing.features.MapFeature; import java.util.ConcurrentModificationException; import java.util.Iterator; import java.util.Map.Entry; import org.junit.Ignore; /** * A generic JUnit test which tests {@code remove} operations on a map. Can't be invoked directly; * please see {@link com.google.common.collect.testing.MapTestSuiteBuilder}. * * @author George van den Driessche * @author Chris Povirk */ @GwtCompatible @Ignore("test runners must not instantiate and run this directly, only via suites we build") // @Ignore affects the Android test runner, which respects JUnit 4 annotations on JUnit 3 tests. @SuppressWarnings("JUnit4ClassUsedInJUnit3") public class MapRemoveTester<K, V> extends AbstractMapTester<K, V> { @MapFeature.Require(SUPPORTS_REMOVE) @CollectionSize.Require(absent = ZERO) public void testRemove_present() { int initialSize = getMap().size(); assertEquals("remove(present) should return the associated value", v0(), getMap().remove(k0())); assertEquals( "remove(present) should decrease a map's size by one.", initialSize - 1, getMap().size()); expectMissing(e0()); } @MapFeature.Require({FAILS_FAST_ON_CONCURRENT_MODIFICATION, SUPPORTS_REMOVE}) @CollectionSize.Require(SEVERAL) public void testRemovePresentConcurrentWithEntrySetIteration() { assertThrows( ConcurrentModificationException.class, () -> { Iterator<Entry<K, V>> iterator = getMap().entrySet().iterator(); getMap().remove(k0()); iterator.next(); }); } @MapFeature.Require({FAILS_FAST_ON_CONCURRENT_MODIFICATION, SUPPORTS_REMOVE}) @CollectionSize.Require(SEVERAL) public void testRemovePresentConcurrentWithKeySetIteration() { assertThrows( ConcurrentModificationException.class, () -> { Iterator<K> iterator = getMap().keySet().iterator(); getMap().remove(k0()); iterator.next(); }); } @MapFeature.Require({FAILS_FAST_ON_CONCURRENT_MODIFICATION, SUPPORTS_REMOVE}) @CollectionSize.Require(SEVERAL) public void testRemovePresentConcurrentWithValuesIteration() { assertThrows( ConcurrentModificationException.class, () -> { Iterator<V> iterator = getMap().values().iterator(); getMap().remove(k0()); iterator.next(); }); } @MapFeature.Require(SUPPORTS_REMOVE) public void testRemove_notPresent() { assertNull("remove(notPresent) should return null", getMap().remove(k3())); expectUnchanged(); } @MapFeature.Require({SUPPORTS_REMOVE, ALLOWS_NULL_KEYS}) @CollectionSize.Require(absent = ZERO) public void testRemove_nullPresent() { initMapWithNullKey(); int initialSize = getMap().size(); assertEquals( "remove(null) should return the associated value", getValueForNullKey(), getMap().remove(null)); assertEquals( "remove(present) should decrease a map's size by one.", initialSize - 1, getMap().size()); expectMissing(entry(null, getValueForNullKey())); } @MapFeature.Require(absent = SUPPORTS_REMOVE) @CollectionSize.Require(absent = ZERO) public void testRemove_unsupported() { assertThrows(UnsupportedOperationException.class, () -> getMap().remove(k0())); expectUnchanged(); assertEquals("remove(present) should not remove the element", v0(), get(k0())); } @MapFeature.Require(absent = SUPPORTS_REMOVE) public void testRemove_unsupportedNotPresent() { try { assertNull( "remove(notPresent) should return null or throw UnsupportedOperationException", getMap().remove(k3())); } catch (UnsupportedOperationException tolerated) { } expectUnchanged(); expectMissing(e3()); } @MapFeature.Require(value = SUPPORTS_REMOVE, absent = ALLOWS_NULL_KEY_QUERIES) public void testRemove_nullQueriesNotSupported() { try { assertNull( "remove(null) should return null or throw NullPointerException", getMap().remove(null)); } catch (NullPointerException tolerated) { } expectUnchanged(); } @MapFeature.Require({SUPPORTS_REMOVE, ALLOWS_NULL_KEY_QUERIES}) public void testRemove_nullSupportedMissing() { assertNull("remove(null) should return null", getMap().remove(null)); expectUnchanged(); } @MapFeature.Require(SUPPORTS_REMOVE) public void testRemove_wrongType() { try { assertNull(getMap().remove(WrongType.VALUE)); } catch (ClassCastException tolerated) { } expectUnchanged(); } }
java
github
https://github.com/google/guava
android/guava-testlib/src/com/google/common/collect/testing/testers/MapRemoveTester.java
{ "ZREVRANGEBYSCORE": { "summary": "Returns members in a sorted set within a range of scores in reverse order.", "complexity": "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N)).", "group": "sorted_set", "since": "2.2.0", "arity": -4, "function": "zrevrangebyscoreCommand", "history": [ [ "2.1.6", "`min` and `max` can be exclusive." ] ], "deprecated_since": "6.2.0", "replaced_by": "`ZRANGE` with the `REV` and `BYSCORE` arguments", "doc_flags": [ "DEPRECATED" ], "command_flags": [ "READONLY" ], "acl_categories": [ "SORTEDSET" ], "key_specs": [ { "flags": [ "RO", "ACCESS" ], "begin_search": { "index": { "pos": 1 } }, "find_keys": { "range": { "lastkey": 0, "step": 1, "limit": 0 } } } ], "reply_schema": { "anyOf": [ { "type": "array", "description": "List of the elements in the specified score range, as not WITHSCORES", "uniqueItems": true, "items": { "type": "string", "description": "Element" } }, { "type": "array", "description": "List of the elements and their scores in the specified score range, as WITHSCORES used", "uniqueItems": true, "items": { "type": "array", "description": "Tuple of element and its score", "minItems": 2, "maxItems": 2, "items": [ { "type": "string", "description": "element" }, { "type": "number", "description": "score" } ] } } ] }, "arguments": [ { "name": "key", "type": "key", "key_spec_index": 0 }, { "name": "max", "type": "double" }, { "name": "min", "type": "double" }, { "name": "withscores", "token": "WITHSCORES", "type": "pure-token", "optional": true }, { "token": "LIMIT", "name": "limit", "type": "block", "optional": true, "arguments": [ { "name": "offset", "type": "integer" }, { "name": "count", "type": "integer" } ] } ] } }
json
github
https://github.com/redis/redis
src/commands/zrevrangebyscore.json
//===-- ExpandDeducedTypeTests.cpp ------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "TweakTesting.h" #include "gmock/gmock.h" #include "gtest/gtest.h" using ::testing::StartsWith; namespace clang { namespace clangd { namespace { TWEAK_TEST(ExpandDeducedType); TEST_F(ExpandDeducedTypeTest, Test) { Header = R"cpp( namespace ns { struct Class { struct Nested {}; }; void Func(); } inline namespace inl_ns { namespace { struct Visible {}; } } )cpp"; EXPECT_AVAILABLE("^a^u^t^o^ i = 0;"); EXPECT_UNAVAILABLE("auto ^i^ ^=^ ^0^;^"); // check primitive type EXPECT_EQ(apply("[[auto]] i = 0;"), "int i = 0;"); EXPECT_EQ(apply("au^to i = 0;"), "int i = 0;"); // check classes and namespaces EXPECT_EQ(apply("^auto C = ns::Class::Nested();"), "ns::Class::Nested C = ns::Class::Nested();"); // check that namespaces are shortened EXPECT_EQ(apply("namespace ns { void f() { ^auto C = Class(); } }"), "namespace ns { void f() { Class C = Class(); } }"); // undefined functions should not be replaced EXPECT_THAT(apply("au^to x = doesnt_exist(); // error-ok"), StartsWith("fail: Could not expand a dependent type")); // function pointers should not be replaced EXPECT_THAT(apply("au^to x = &ns::Func;"), StartsWith("fail: Could not expand type")); // function references should not be replaced EXPECT_THAT(apply("au^to &x = ns::Func;"), StartsWith("fail: Could not expand type")); // lambda types are not replaced EXPECT_UNAVAILABLE("au^to x = []{};"); // inline namespaces EXPECT_EQ(apply("au^to x = inl_ns::Visible();"), "inl_ns::Visible x = inl_ns::Visible();"); // local class EXPECT_EQ(apply("namespace x { void y() { struct S{}; ^auto z = S(); } }"), "namespace x { void y() { struct S{}; S z = S(); } }"); // replace pointers EXPECT_EQ(apply(R"cpp(au^to x = "test";)cpp"), R"cpp(const char * x = "test";)cpp"); // pointers to an array are not replaced EXPECT_THAT(apply(R"cpp(au^to s = &"foobar";)cpp"), StartsWith("fail: Could not expand type")); EXPECT_EQ(apply("ns::Class * foo() { au^to c = foo(); return nullptr; }"), "ns::Class * foo() { ns::Class * c = foo(); return nullptr; }"); EXPECT_EQ( apply("void ns::Func() { au^to x = new ns::Class::Nested{}; }"), "void ns::Func() { ns::Class::Nested * x = new ns::Class::Nested{}; }"); EXPECT_EQ(apply("dec^ltype(auto) x = 10;"), "int x = 10;"); EXPECT_EQ(apply("decltype(au^to) x = 10;"), "int x = 10;"); // references to array types are not replaced EXPECT_THAT(apply(R"cpp(decl^type(auto) s = "foobar"; // error-ok)cpp"), StartsWith("fail: Could not expand type")); // array types are not replaced EXPECT_THAT(apply("int arr[10]; decl^type(auto) foobar = arr; // error-ok"), StartsWith("fail: Could not expand type")); // pointers to an array are not replaced EXPECT_THAT(apply(R"cpp(decl^type(auto) s = &"foobar";)cpp"), StartsWith("fail: Could not expand type")); // expanding types in structured bindings is syntactically invalid. EXPECT_UNAVAILABLE("const ^auto &[x,y] = (int[]){1,2};"); // unknown types in a template should not be replaced EXPECT_THAT(apply("template <typename T> void x() { ^auto y = T::z(); }"), StartsWith("fail: Could not expand a dependent type")); // check primitive type EXPECT_EQ(apply("decl^type(0) i;"), "int i;"); // function should not be replaced EXPECT_THAT(apply("void f(); decl^type(f) g;"), StartsWith("fail: Could not expand type")); // check return type in function proto EXPECT_EQ(apply("decl^type(0) f();"), "int f();"); // check trailing return type EXPECT_EQ(apply("auto f() -> decl^type(0) { return 0; }"), "auto f() -> int { return 0; }"); // check function parameter type EXPECT_EQ(apply("void f(decl^type(0));"), "void f(int);"); // check template parameter type EXPECT_EQ(apply("template <decl^type(0)> struct Foobar {};"), "template <int> struct Foobar {};"); // check default template argument EXPECT_EQ(apply("template <class = decl^type(0)> class Foo {};"), "template <class = int> class Foo {};"); // check template argument EXPECT_EQ(apply("template <class> class Bar {}; Bar<decl^type(0)> b;"), "template <class> class Bar {}; Bar<int> b;"); // dependent types are not replaced EXPECT_THAT(apply("template <class T> struct Foobar { decl^type(T{}) t; };"), StartsWith("fail: Could not expand a dependent type")); // references to array types are not replaced EXPECT_THAT(apply(R"cpp(decl^type("foobar") s; // error-ok)cpp"), StartsWith("fail: Could not expand type")); // array types are not replaced EXPECT_THAT(apply("int arr[10]; decl^type(arr) foobar;"), StartsWith("fail: Could not expand type")); // pointers to an array are not replaced EXPECT_THAT(apply(R"cpp(decl^type(&"foobar") s;)cpp"), StartsWith("fail: Could not expand type")); ExtraArgs.push_back("-std=c++20"); EXPECT_UNAVAILABLE("template <au^to X> class Y;"); EXPECT_THAT(apply("auto X = [](^auto){};"), StartsWith("fail: Could not deduce")); EXPECT_EQ(apply("auto X = [](^auto){return 0;}; int Y = X(42);"), "auto X = [](int){return 0;}; int Y = X(42);"); EXPECT_THAT(apply("auto X = [](^auto){return 0;}; int Y = X(42) + X('c');"), StartsWith("fail: Could not deduce")); // FIXME: should work on constrained auto params, once SourceRange is fixed. EXPECT_UNAVAILABLE("template<class> concept C = true;" "auto X = [](C ^auto *){return 0;};"); // lambda should not be replaced EXPECT_UNAVAILABLE("auto f = [](){}; decl^type(f) g;"); EXPECT_UNAVAILABLE("decl^type([]{}) f;"); } } // namespace } // namespace clangd } // namespace clang
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/clangd/unittests/tweaks/ExpandDeducedTypeTests.cpp
/* * Copyright (c) 2007 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockito.internal.verification.api; import java.util.List; import org.mockito.invocation.Invocation; import org.mockito.invocation.MatchableInvocation; /** * Data needed to perform verification of interactions. * This interface is considered public even though it lives in private package. * In the next major version of Mockito, this class will be moved to public space. */ public interface VerificationData { /** * All invocations recorded on the mock object that is being verified. * Does not include invocations recorded on other mock objects. */ List<Invocation> getAllInvocations(); /** * The target or wanted invocation. * Below example illustrates what is the 'target' invocation: * <pre class="code"><code class="java"> * mock.foo(); // &lt;- invocation 1 * mock.bar(); // &lt;- invocation 2 * * verify(mock).bar(); // &lt;- target invocation * </code></pre> * * Target invocation can contain argument matchers therefore the returned type is {@link MatchableInvocation} * and not {@link Invocation}. * * @since 2.2.12 */ MatchableInvocation getTarget(); }
java
github
https://github.com/mockito/mockito
mockito-core/src/main/java/org/mockito/internal/verification/api/VerificationData.java
/* __next_internal_action_entry_do_not_use__ {"ffab21efdafbe611287bc25c0462b1e0510d13e48b":{"name":"foo"},"ffac840dcaf5e8197cb02b7f3a43c119b7a770b272":{"name":"bar"}} */ import { registerServerReference } from "private-next-rsc-server-reference"; import { cache as $$cache__ } from "private-next-rsc-cache-wrapper"; import { cache as $$reactCache__ } from "react"; // @ts-ignore import { foo, bar } from './foo'; type Foo = { }; type Bar = { }; export { Foo }; export { type Bar }; let $$RSC_SERVER_CACHE_foo = foo; if (typeof foo === "function") { $$RSC_SERVER_CACHE_foo = $$reactCache__(function() { return $$cache__("default", "ffab21efdafbe611287bc25c0462b1e0510d13e48b", 0, foo, Array.prototype.slice.call(arguments)); }); registerServerReference($$RSC_SERVER_CACHE_foo, "ffab21efdafbe611287bc25c0462b1e0510d13e48b", null); Object["defineProperty"]($$RSC_SERVER_CACHE_foo, "name", { value: "foo" }); } export { $$RSC_SERVER_CACHE_foo as foo }; let $$RSC_SERVER_CACHE_bar = bar; if (typeof bar === "function") { $$RSC_SERVER_CACHE_bar = $$reactCache__(function() { return $$cache__("default", "ffac840dcaf5e8197cb02b7f3a43c119b7a770b272", 0, bar, Array.prototype.slice.call(arguments)); }); registerServerReference($$RSC_SERVER_CACHE_bar, "ffac840dcaf5e8197cb02b7f3a43c119b7a770b272", null); Object["defineProperty"]($$RSC_SERVER_CACHE_bar, "name", { value: "bar" }); } export { $$RSC_SERVER_CACHE_bar as bar };
typescript
github
https://github.com/vercel/next.js
crates/next-custom-transforms/tests/fixture/server-actions/server-graph/68/output.tsx
"""Interface for a rate limiter and an in-memory rate limiter.""" from __future__ import annotations import abc import asyncio import threading import time class BaseRateLimiter(abc.ABC): """Base class for rate limiters. Usage of the base limiter is through the acquire and aacquire methods depending on whether running in a sync or async context. Implementations are free to add a timeout parameter to their initialize method to allow users to specify a timeout for acquiring the necessary tokens when using a blocking call. Current limitations: - Rate limiting information is not surfaced in tracing or callbacks. This means that the total time it takes to invoke a chat model will encompass both the time spent waiting for tokens and the time spent making the request. """ @abc.abstractmethod def acquire(self, *, blocking: bool = True) -> bool: """Attempt to acquire the necessary tokens for the rate limiter. This method blocks until the required tokens are available if `blocking` is set to `True`. If `blocking` is set to `False`, the method will immediately return the result of the attempt to acquire the tokens. Args: blocking: If `True`, the method will block until the tokens are available. If `False`, the method will return immediately with the result of the attempt. Returns: `True` if the tokens were successfully acquired, `False` otherwise. """ @abc.abstractmethod async def aacquire(self, *, blocking: bool = True) -> bool: """Attempt to acquire the necessary tokens for the rate limiter. This method blocks until the required tokens are available if `blocking` is set to `True`. If `blocking` is set to `False`, the method will immediately return the result of the attempt to acquire the tokens. Args: blocking: If `True`, the method will block until the tokens are available. If `False`, the method will return immediately with the result of the attempt. Returns: `True` if the tokens were successfully acquired, `False` otherwise. """ class InMemoryRateLimiter(BaseRateLimiter): """An in memory rate limiter based on a token bucket algorithm. This is an in memory rate limiter, so it cannot rate limit across different processes. The rate limiter only allows time-based rate limiting and does not take into account any information about the input or the output, so it cannot be used to rate limit based on the size of the request. It is thread safe and can be used in either a sync or async context. The in memory rate limiter is based on a token bucket. The bucket is filled with tokens at a given rate. Each request consumes a token. If there are not enough tokens in the bucket, the request is blocked until there are enough tokens. These tokens have nothing to do with LLM tokens. They are just a way to keep track of how many requests can be made at a given time. Current limitations: - The rate limiter is not designed to work across different processes. It is an in-memory rate limiter, but it is thread safe. - The rate limiter only supports time-based rate limiting. It does not take into account the size of the request or any other factors. Example: ```python import time from langchain_core.rate_limiters import InMemoryRateLimiter rate_limiter = InMemoryRateLimiter( requests_per_second=0.1, # <-- Can only make a request once every 10 seconds!! check_every_n_seconds=0.1, # Wake up every 100 ms to check whether allowed to make a request, max_bucket_size=10, # Controls the maximum burst size. ) from langchain_anthropic import ChatAnthropic model = ChatAnthropic( model_name="claude-sonnet-4-5-20250929", rate_limiter=rate_limiter ) for _ in range(5): tic = time.time() model.invoke("hello") toc = time.time() print(toc - tic) ``` """ # noqa: E501 def __init__( self, *, requests_per_second: float = 1, check_every_n_seconds: float = 0.1, max_bucket_size: float = 1, ) -> None: """A rate limiter based on a token bucket. These tokens have nothing to do with LLM tokens. They are just a way to keep track of how many requests can be made at a given time. This rate limiter is designed to work in a threaded environment. It works by filling up a bucket with tokens at a given rate. Each request consumes a given number of tokens. If there are not enough tokens in the bucket, the request is blocked until there are enough tokens. Args: requests_per_second: The number of tokens to add per second to the bucket. The tokens represent "credit" that can be used to make requests. check_every_n_seconds: Check whether the tokens are available every this many seconds. Can be a float to represent fractions of a second. max_bucket_size: The maximum number of tokens that can be in the bucket. Must be at least `1`. Used to prevent bursts of requests. """ # Number of requests that we can make per second. self.requests_per_second = requests_per_second # Number of tokens in the bucket. self.available_tokens = 0.0 self.max_bucket_size = max_bucket_size # A lock to ensure that tokens can only be consumed by one thread # at a given time. self._consume_lock = threading.Lock() # The last time we tried to consume tokens. self.last: float | None = None self.check_every_n_seconds = check_every_n_seconds def _consume(self) -> bool: """Try to consume a token. Returns: True means that the tokens were consumed, and the caller can proceed to make the request. A False means that the tokens were not consumed, and the caller should try again later. """ with self._consume_lock: now = time.monotonic() # initialize on first call to avoid a burst if self.last is None: self.last = now elapsed = now - self.last if elapsed * self.requests_per_second >= 1: self.available_tokens += elapsed * self.requests_per_second self.last = now # Make sure that we don't exceed the bucket size. # This is used to prevent bursts of requests. self.available_tokens = min(self.available_tokens, self.max_bucket_size) # As long as we have at least one token, we can proceed. if self.available_tokens >= 1: self.available_tokens -= 1 return True return False def acquire(self, *, blocking: bool = True) -> bool: """Attempt to acquire a token from the rate limiter. This method blocks until the required tokens are available if `blocking` is set to `True`. If `blocking` is set to `False`, the method will immediately return the result of the attempt to acquire the tokens. Args: blocking: If `True`, the method will block until the tokens are available. If `False`, the method will return immediately with the result of the attempt. Returns: `True` if the tokens were successfully acquired, `False` otherwise. """ if not blocking: return self._consume() while not self._consume(): time.sleep(self.check_every_n_seconds) return True async def aacquire(self, *, blocking: bool = True) -> bool: """Attempt to acquire a token from the rate limiter. Async version. This method blocks until the required tokens are available if `blocking` is set to `True`. If `blocking` is set to `False`, the method will immediately return the result of the attempt to acquire the tokens. Args: blocking: If `True`, the method will block until the tokens are available. If `False`, the method will return immediately with the result of the attempt. Returns: `True` if the tokens were successfully acquired, `False` otherwise. """ if not blocking: return self._consume() while not self._consume(): # noqa: ASYNC110 # This code ignores the ASYNC110 warning which is a false positive in this # case. # There is no external actor that can mark that the Event is done # since the tokens are managed by the rate limiter itself. # It needs to wake up to re-fill the tokens. # https://docs.astral.sh/ruff/rules/async-busy-wait/ await asyncio.sleep(self.check_every_n_seconds) return True __all__ = [ "BaseRateLimiter", "InMemoryRateLimiter", ]
python
github
https://github.com/langchain-ai/langchain
libs/core/langchain_core/rate_limiters.py
//! Simple file-locking apis for each OS. //! //! This is not meant to be in the standard library, it does nothing with //! green/native threading. This is just a bare-bones enough solution for //! librustdoc, it is not production quality at all. cfg_select! { target_os = "linux" => { mod linux; use linux as imp; } target_os = "redox" => { mod linux; use linux as imp; } unix => { mod unix; use unix as imp; } windows => { mod windows; use self::windows as imp; } _ => { mod unsupported; use unsupported as imp; } } pub use imp::Lock;
rust
github
https://github.com/rust-lang/rust
compiler/rustc_data_structures/src/flock.rs
#include <gmock/gmock.h> #include <gtest/gtest.h> #include <ATen/ATen.h> #include <ATen/core/Vitals.h> #include <c10/util/env.h> #include <c10/util/irange.h> #include <cstdlib> using namespace at::vitals; using ::testing::HasSubstr; TEST(Vitals, Basic) { std::stringstream buffer; std::streambuf* sbuf = std::cout.rdbuf(); std::cout.rdbuf(buffer.rdbuf()); { c10::utils::set_env("TORCH_VITAL", "1"); TORCH_VITAL_DEFINE(Testing); TORCH_VITAL(Testing, Attribute0) << 1; TORCH_VITAL(Testing, Attribute1) << '1'; TORCH_VITAL(Testing, Attribute2) << 1.0f; TORCH_VITAL(Testing, Attribute3) << 1.0; auto t = at::ones({1, 1}); TORCH_VITAL(Testing, Attribute4) << t; } std::cout.rdbuf(sbuf); auto s = buffer.str(); ASSERT_THAT(s, HasSubstr("Testing.Attribute0\t\t 1")); ASSERT_THAT(s, HasSubstr("Testing.Attribute1\t\t 1")); ASSERT_THAT(s, HasSubstr("Testing.Attribute2\t\t 1")); ASSERT_THAT(s, HasSubstr("Testing.Attribute3\t\t 1")); ASSERT_THAT(s, HasSubstr("Testing.Attribute4\t\t 1")); } TEST(Vitals, MultiString) { std::stringstream buffer; std::streambuf* sbuf = std::cout.rdbuf(); std::cout.rdbuf(buffer.rdbuf()); { c10::utils::set_env("TORCH_VITAL", "1"); TORCH_VITAL_DEFINE(Testing); TORCH_VITAL(Testing, Attribute0) << 1 << " of " << 2; TORCH_VITAL(Testing, Attribute1) << 1; TORCH_VITAL(Testing, Attribute1) << " of "; TORCH_VITAL(Testing, Attribute1) << 2; } std::cout.rdbuf(sbuf); auto s = buffer.str(); ASSERT_THAT(s, HasSubstr("Testing.Attribute0\t\t 1 of 2")); ASSERT_THAT(s, HasSubstr("Testing.Attribute1\t\t 1 of 2")); } TEST(Vitals, OnAndOff) { for (const auto i : c10::irange(2)) { std::stringstream buffer; std::streambuf* sbuf = std::cout.rdbuf(); std::cout.rdbuf(buffer.rdbuf()); { c10::utils::set_env("TORCH_VITAL", i ? "1" : "0"); TORCH_VITAL_DEFINE(Testing); TORCH_VITAL(Testing, Attribute0) << 1; } std::cout.rdbuf(sbuf); auto s = buffer.str(); auto f = s.find("Testing.Attribute0\t\t 1"); if (i) { ASSERT_TRUE(f != std::string::npos); } else { ASSERT_TRUE(f == std::string::npos); } } } TEST(Vitals, APIVitals) { std::stringstream buffer; bool rvalue = false; std::streambuf* sbuf = std::cout.rdbuf(); std::cout.rdbuf(buffer.rdbuf()); { c10::utils::set_env("TORCH_VITAL", "1"); APIVitals api_vitals; rvalue = api_vitals.setVital("TestingSetVital", "TestAttr", "TestValue"); } std::cout.rdbuf(sbuf); auto s = buffer.str(); ASSERT_TRUE(rvalue); ASSERT_THAT(s, HasSubstr("TestingSetVital.TestAttr\t\t TestValue")); }
cpp
github
https://github.com/pytorch/pytorch
aten/src/ATen/test/vitals.cpp
// Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 package command import ( "fmt" "strings" "github.com/hashicorp/cli" "github.com/hashicorp/terraform/internal/addrs" "github.com/hashicorp/terraform/internal/backend/backendrun" "github.com/hashicorp/terraform/internal/command/arguments" "github.com/hashicorp/terraform/internal/command/clistate" "github.com/hashicorp/terraform/internal/command/views" "github.com/hashicorp/terraform/internal/states" "github.com/hashicorp/terraform/internal/terraform" "github.com/hashicorp/terraform/internal/tfdiags" ) // StateMvCommand is a Command implementation that changes bindings // in Terraform state so that existing remote objects bind to new resource instances. type StateMvCommand struct { StateMeta } func (c *StateMvCommand) Run(args []string) int { args = c.Meta.process(args) // We create two metas to track the two states var backupPathOut, statePathOut string var dryRun bool cmdFlags := c.Meta.ignoreRemoteVersionFlagSet("state mv") cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run") cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") cmdFlags.StringVar(&backupPathOut, "backup-out", "-", "backup") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock states") cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") cmdFlags.StringVar(&c.statePath, "state", "", "path") cmdFlags.StringVar(&statePathOut, "state-out", "", "path") if err := cmdFlags.Parse(args); err != nil { c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error())) return 1 } args = cmdFlags.Args() if len(args) != 2 { c.Ui.Error("Exactly two arguments expected.\n") return cli.RunResultHelp } if diags := c.Meta.checkRequiredVersion(); diags != nil { c.showDiagnostics(diags) return 1 } // If backup or backup-out options are set // and the state option is not set, make sure // the backend is local backupOptionSetWithoutStateOption := c.backupPath != "-" && c.statePath == "" backupOutOptionSetWithoutStateOption := backupPathOut != "-" && c.statePath == "" var setLegacyLocalBackendOptions []string if backupOptionSetWithoutStateOption { setLegacyLocalBackendOptions = append(setLegacyLocalBackendOptions, "-backup") } if backupOutOptionSetWithoutStateOption { setLegacyLocalBackendOptions = append(setLegacyLocalBackendOptions, "-backup-out") } if len(setLegacyLocalBackendOptions) > 0 { currentBackend, diags := c.backendFromConfig(&BackendOpts{}) if diags.HasErrors() { c.showDiagnostics(diags) return 1 } // If currentBackend is nil and diags didn't have errors, // this means we have an implicit local backend _, isLocalBackend := currentBackend.(backendrun.Local) if currentBackend != nil && !isLocalBackend { diags = diags.Append( tfdiags.Sourceless( tfdiags.Error, fmt.Sprintf("Invalid command line options: %s", strings.Join(setLegacyLocalBackendOptions[:], ", ")), "Command line options -backup and -backup-out are legacy options that operate on a local state file only. You must specify a local state file with the -state option or switch to the local backend.", ), ) c.showDiagnostics(diags) return 1 } } // Read the from state view := arguments.ViewHuman stateFromMgr, err := c.State(view) if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) return 1 } if c.stateLock { stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) if diags := stateLocker.Lock(stateFromMgr, "state-mv"); diags.HasErrors() { c.showDiagnostics(diags) return 1 } defer func() { if diags := stateLocker.Unlock(); diags.HasErrors() { c.showDiagnostics(diags) } }() } if err := stateFromMgr.RefreshState(); err != nil { c.Ui.Error(fmt.Sprintf("Failed to refresh source state: %s", err)) return 1 } stateFrom := stateFromMgr.State() if stateFrom == nil { c.Ui.Error(errStateNotFound) return 1 } // Read the destination state stateToMgr := stateFromMgr stateTo := stateFrom if statePathOut != "" { c.statePath = statePathOut c.backupPath = backupPathOut stateToMgr, err = c.State(view) if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) return 1 } if c.stateLock { stateLocker := clistate.NewLocker(c.stateLockTimeout, views.NewStateLocker(arguments.ViewHuman, c.View)) if diags := stateLocker.Lock(stateToMgr, "state-mv"); diags.HasErrors() { c.showDiagnostics(diags) return 1 } defer func() { if diags := stateLocker.Unlock(); diags.HasErrors() { c.showDiagnostics(diags) } }() } if err := stateToMgr.RefreshState(); err != nil { c.Ui.Error(fmt.Sprintf("Failed to refresh destination state: %s", err)) return 1 } stateTo = stateToMgr.State() if stateTo == nil { stateTo = states.NewState() } } var diags tfdiags.Diagnostics sourceAddr, moreDiags := c.lookupSingleStateObjectAddr(stateFrom, args[0]) diags = diags.Append(moreDiags) destAddr, moreDiags := c.lookupSingleStateObjectAddr(stateFrom, args[1]) diags = diags.Append(moreDiags) if diags.HasErrors() { c.showDiagnostics(diags) return 1 } prefix := "Move" if dryRun { prefix = "Would move" } const msgInvalidSource = "Invalid source address" const msgInvalidTarget = "Invalid target address" var moved int ssFrom := stateFrom.SyncWrapper() sourceAddrs := c.sourceObjectAddrs(stateFrom, sourceAddr) if len(sourceAddrs) == 0 { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidSource, fmt.Sprintf("Cannot move %s: does not match anything in the current state.", sourceAddr), )) c.showDiagnostics(diags) return 1 } for _, rawAddrFrom := range sourceAddrs { switch addrFrom := rawAddrFrom.(type) { case addrs.ModuleInstance: search := sourceAddr.(addrs.ModuleInstance) addrTo, ok := destAddr.(addrs.ModuleInstance) if !ok { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidTarget, fmt.Sprintf("Cannot move %s to %s: the target must also be a module.", addrFrom, destAddr), )) c.showDiagnostics(diags) return 1 } if len(search) < len(addrFrom) { n := make(addrs.ModuleInstance, 0, len(addrTo)+len(addrFrom)-len(search)) n = append(n, addrTo...) n = append(n, addrFrom[len(search):]...) addrTo = n } if stateTo.Module(addrTo) != nil { c.Ui.Error(fmt.Sprintf(errStateMv, "destination module already exists")) return 1 } ms := ssFrom.Module(addrFrom) if ms == nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidSource, fmt.Sprintf("The current state does not contain %s.", addrFrom), )) c.showDiagnostics(diags) return 1 } moved++ c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), addrTo.String())) if !dryRun { ssFrom.RemoveModule(addrFrom) // Update the address before adding it to the state. ms.Addr = addrTo stateTo.Modules[addrTo.String()] = ms } case addrs.AbsResource: addrTo, ok := destAddr.(addrs.AbsResource) if !ok { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidTarget, fmt.Sprintf("Cannot move %s to %s: the source is a whole resource (not a resource instance) so the target must also be a whole resource.", addrFrom, destAddr), )) c.showDiagnostics(diags) return 1 } diags = diags.Append(c.validateResourceMove(addrFrom, addrTo)) if stateTo.Resource(addrTo) != nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidTarget, fmt.Sprintf("Cannot move to %s: there is already a resource at that address in the current state.", addrTo), )) } rs := ssFrom.Resource(addrFrom) if rs == nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidSource, fmt.Sprintf("The current state does not contain %s.", addrFrom), )) } if diags.HasErrors() { c.showDiagnostics(diags) return 1 } moved++ c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), addrTo.String())) if !dryRun { ssFrom.RemoveResource(addrFrom) // Update the address before adding it to the state. rs.Addr = addrTo stateTo.EnsureModule(addrTo.Module).Resources[addrTo.Resource.String()] = rs } case addrs.AbsResourceInstance: addrTo, ok := destAddr.(addrs.AbsResourceInstance) if !ok { ra, ok := destAddr.(addrs.AbsResource) if !ok { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidTarget, fmt.Sprintf("Cannot move %s to %s: the target must also be a resource instance.", addrFrom, destAddr), )) c.showDiagnostics(diags) return 1 } addrTo = ra.Instance(addrs.NoKey) } diags = diags.Append(c.validateResourceMove(addrFrom.ContainingResource(), addrTo.ContainingResource())) if stateTo.Module(addrTo.Module) == nil { // moving something to a mew module, so we need to ensure it exists stateTo.EnsureModule(addrTo.Module) } if stateTo.ResourceInstance(addrTo) != nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidTarget, fmt.Sprintf("Cannot move to %s: there is already a resource instance at that address in the current state.", addrTo), )) } is := ssFrom.ResourceInstance(addrFrom) if is == nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidSource, fmt.Sprintf("The current state does not contain %s.", addrFrom), )) } if diags.HasErrors() { c.showDiagnostics(diags) return 1 } moved++ c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), args[1])) if !dryRun { fromResourceAddr := addrFrom.ContainingResource() fromResource := ssFrom.Resource(fromResourceAddr) fromProviderAddr := fromResource.ProviderConfig ssFrom.ForgetResourceInstanceAll(addrFrom) ssFrom.RemoveResourceIfEmpty(fromResourceAddr) rs := stateTo.Resource(addrTo.ContainingResource()) if rs == nil { // If we're moving to an address without an index then that // suggests the user's intent is to establish both the // resource and the instance at the same time (since the // address covers both). If there's an index in the // target then allow creating the new instance here. resourceAddr := addrTo.ContainingResource() stateTo.SyncWrapper().SetResourceProvider( resourceAddr, fromProviderAddr, // in this case, we bring the provider along as if we were moving the whole resource ) rs = stateTo.Resource(resourceAddr) } rs.Instances[addrTo.Resource.Key] = is } default: diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidSource, fmt.Sprintf("Cannot move %s: Terraform doesn't know how to move this object.", rawAddrFrom), )) } // Look for any dependencies that may be effected and // remove them to ensure they are recreated in full. for _, mod := range stateTo.Modules { for _, res := range mod.Resources { for _, ins := range res.Instances { if ins.Current == nil { continue } for _, dep := range ins.Current.Dependencies { // check both directions here, since we may be moving // an instance which is in a resource, or a module // which can contain a resource. if dep.TargetContains(rawAddrFrom) || rawAddrFrom.TargetContains(dep) { ins.Current.Dependencies = nil break } } } } } } if dryRun { if moved == 0 { c.Ui.Output("Would have moved nothing.") } return 0 // This is as far as we go in dry-run mode } // Load the backend b, backendDiags := c.backend(".", view) diags = diags.Append(backendDiags) if backendDiags.HasErrors() { c.showDiagnostics(diags) return 1 } // Get schemas, if possible, before writing state var schemas *terraform.Schemas if isCloudMode(b) { var schemaDiags tfdiags.Diagnostics schemas, schemaDiags = c.MaybeGetSchemas(stateTo, nil) diags = diags.Append(schemaDiags) } // Write the new state if err := stateToMgr.WriteState(stateTo); err != nil { c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } if err := stateToMgr.PersistState(schemas); err != nil { c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } // Write the old state if it is different if stateTo != stateFrom { if err := stateFromMgr.WriteState(stateFrom); err != nil { c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } if err := stateFromMgr.PersistState(schemas); err != nil { c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } } c.showDiagnostics(diags) if moved == 0 { c.Ui.Output("No matching objects found.") } else { c.Ui.Output(fmt.Sprintf("Successfully moved %d object(s).", moved)) } return 0 } // sourceObjectAddrs takes a single source object address and expands it to // potentially multiple objects that need to be handled within it. // // In particular, this handles the case where a module is requested directly: // if it has any child modules, then they must also be moved. It also resolves // the ambiguity that an index-less resource address could either be a resource // address or a resource instance address, by making a decision about which // is intended based on the current state of the resource in question. func (c *StateMvCommand) sourceObjectAddrs(state *states.State, matched addrs.Targetable) []addrs.Targetable { var ret []addrs.Targetable switch addr := matched.(type) { case addrs.ModuleInstance: for _, mod := range state.Modules { if len(mod.Addr) < len(addr) { continue // can't possibly be our selection or a child of it } if !mod.Addr[:len(addr)].Equal(addr) { continue } ret = append(ret, mod.Addr) } case addrs.AbsResource: // If this refers to a resource without "count" or "for_each" set then // we'll assume the user intended it to be a resource instance // address instead, to allow for requests like this: // terraform state mv aws_instance.foo aws_instance.bar[1] // That wouldn't be allowed if aws_instance.foo had multiple instances // since we can't move multiple instances into one. if rs := state.Resource(addr); rs != nil { if _, ok := rs.Instances[addrs.NoKey]; ok { ret = append(ret, addr.Instance(addrs.NoKey)) } else { ret = append(ret, addr) } } default: ret = append(ret, matched) } return ret } func (c *StateMvCommand) validateResourceMove(addrFrom, addrTo addrs.AbsResource) tfdiags.Diagnostics { const msgInvalidRequest = "Invalid state move request" var diags tfdiags.Diagnostics if addrFrom.Resource.Mode != addrTo.Resource.Mode { switch addrFrom.Resource.Mode { case addrs.ManagedResourceMode: diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidRequest, fmt.Sprintf("Cannot move %s to %s: a managed resource can be moved only to another managed resource address.", addrFrom, addrTo), )) case addrs.DataResourceMode: diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidRequest, fmt.Sprintf("Cannot move %s to %s: a data resource can be moved only to another data resource address.", addrFrom, addrTo), )) default: // In case a new mode is added in future, this unhelpful error is better than nothing. diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidRequest, fmt.Sprintf("Cannot move %s to %s: cannot change resource mode.", addrFrom, addrTo), )) } } if addrFrom.Resource.Type != addrTo.Resource.Type { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, msgInvalidRequest, fmt.Sprintf("Cannot move %s to %s: resource types don't match.", addrFrom, addrTo), )) } return diags } func (c *StateMvCommand) Help() string { helpText := ` Usage: terraform [global options] state mv [options] SOURCE DESTINATION This command will move an item matched by the address given to the destination address. This command can also move to a destination address in a completely different state file. This can be used for simple resource renaming, moving items to and from a module, moving entire modules, and more. And because this command can also move data to a completely new state, it can also be used for refactoring one configuration into multiple separately managed Terraform configurations. This command will output a backup copy of the state prior to saving any changes. The backup cannot be disabled. Due to the destructive nature of this command, backups are required. If you're moving an item to a different state file, a backup will be created for each state file. Options: -dry-run If set, prints out what would've been moved but doesn't actually move anything. -lock=false Don't hold a state lock during the operation. This is dangerous if others might concurrently run commands against the same workspace. -lock-timeout=0s Duration to retry a state lock. -ignore-remote-version A rare option used for the remote backend only. See the remote backend documentation for more information. -state, state-out, and -backup are legacy options supported for the local backend only. For more information, see the local backend's documentation. ` return strings.TrimSpace(helpText) } func (c *StateMvCommand) Synopsis() string { return "Move an item in the state" } const errStateMv = `Error moving state: %s Please ensure your addresses and state paths are valid. No state was persisted. Your existing states are untouched.`
go
github
https://github.com/hashicorp/terraform
internal/command/state_mv.go
//go:generate go-winres make --arch=386,amd64,arm,arm64 --in=./winresources/winres.json --out=./resource package main import _ "github.com/moby/moby/v2/cmd/dockerd/winresources"
go
github
https://github.com/moby/moby
cmd/dockerd/main_windows.go
# -*- test-case-name: twisted.test.test_woven -*- # # WORK IN PROGRESS: HARD HAT REQUIRED # from __future__ import nested_scopes # Twisted Imports from twisted.python import formmethod, failure from twisted.python.components import registerAdapter, getAdapter from twisted.web import domhelpers, resource, util from twisted.internet import defer # Sibling Imports from twisted.web.woven import model, view, controller, widgets, input, interfaces from twisted.web.microdom import parseString, lmx, Element #other imports import math # map formmethod.Argument to functions that render them: _renderers = {} def registerRenderer(argumentClass, renderer): """Register a renderer for a given argument class. The renderer function should act in the same way as the 'input_XXX' methods of C{FormFillerWidget}. """ assert callable(renderer) global _renderers _renderers[argumentClass] = renderer class FormFillerWidget(widgets.Widget): SPANNING_TYPES = ["hidden", "submit"] def getValue(self, request, argument): """Return value for form input.""" if not self.model.alwaysDefault: values = request.args.get(argument.name, None) if values: try: return argument.coerce(values[0]) except formmethod.InputError: return values[0] return argument.default def getValues(self, request, argument): """Return values for form input.""" if not self.model.alwaysDefault: values = request.args.get(argument.name, None) if values: try: return argument.coerce(values) except formmethod.InputError: return values return argument.default def createShell(self, request, node, data): """Create a `shell' node that will hold the additional form elements, if one is required. """ return lmx(node).table(border="0") def input_single(self, request, content, model, templateAttributes={}): """ Returns a text input node built based upon the node model. Optionally takes an already-coded DOM node merges that information with the model's information. Returns a new (??) lmx node. """ #in a text field, only the following options are allowed (well, more #are, but they're not supported yet - can add them in later) attribs = ['type', 'name', 'value', 'size', 'maxlength', 'readonly'] #only MSIE recognizes readonly and disabled arguments = {} for attrib in attribs: #model hints and values override anything in the template val = model.getHint(attrib, templateAttributes.get(attrib, None)) if val: arguments[attrib] = str(val) value = self.getValue(request, model) if value: arguments["value"] = str(value) arguments["type"] = "text" #these are default arguments["name"] = model.name return content.input(**arguments) def input_string(self, request, content, model, templateAttributes={}): if not templateAttributes.has_key("size"): templateAttributes["size"] = '60' return self.input_single(request, content, model, templateAttributes) input_integer = input_single input_integerrange = input_single input_float = input_single def input_text(self, request, content, model, templateAttributes={}): r = content.textarea( cols=str(model.getHint('cols', templateAttributes.get('cols', '60'))), rows=str(model.getHint('rows', templateAttributes.get('rows', '10'))), name=model.name, wrap=str(model.getHint('wrap', templateAttributes.get('wrap', "virtual")))) r.text(str(self.getValue(request, model))) return r def input_hidden(self, request, content, model, templateAttributes={}): return content.input(type="hidden", name=model.name, value=str(self.getValue(request, model))) def input_submit(self, request, content, model, templateAttributes={}): arguments = {} val = model.getHint("onClick", templateAttributes.get("onClick", None)) if val: arguments["onClick"] = val arguments["type"] = "submit" arguments["name"] = model.name div = content.div() for tag, value, desc in model.choices: args = arguments.copy() args["value"] = tag div.input(**args) div.text(" ") if model.reset: div.input(type="reset") return div def input_choice(self, request, content, model, templateAttributes={}): # am I not evil? allow onChange js events arguments = {} val = model.getHint("onChange", templateAttributes.get("onChange", None)) if val: arguments["onChange"] = val arguments["name"] = model.name s = content.select(**arguments) default = self.getValues(request, model) for tag, value, desc in model.choices: kw = {} if value in default: kw = {'selected' : '1'} s.option(value=tag, **kw).text(desc) return s def input_group(self, request, content, model, groupValues, inputType, templateAttributes={}): """ Base code for a group of objects. Checkgroup will use this, as well as radiogroup. In the attributes, rows means how many rows the group should be arranged into, cols means how many cols the group should be arranged into. Columns take precedence over rows: if both are specified, the output will always generate the correct number of columns. However, if the number of elements in the group exceed (or is smaller than) rows*cols, then the number of rows will be off. A cols attribute of 1 will mean that all the elements will be listed one underneath another. The default is a rows attribute of 1: everything listed next to each other. """ rows = model.getHint('rows', templateAttributes.get('rows', None)) cols = model.getHint('cols', templateAttributes.get('cols', None)) if rows: rows = int(rows) if cols: cols = int(cols) defaults = self.getValues(request, model) if (rows and rows>1) or (cols and cols>1): #build a table s = content.table(border="0") if cols: breakat = cols else: breakat = math.ceil(float(len(groupValues))/rows) for i in range(0, len(groupValues), breakat): tr = s.tr() for j in range(0, breakat): if i+j >= len(groupValues): break tag, value, desc = groupValues[i+j] kw = {} if value in defaults: kw = {'checked' : '1'} tr.td().input(type=inputType, name=model.name, value=tag, **kw).text(desc) else: s = content.div() for tag, value, desc in groupValues: kw = {} if value in defaults: kw = {'checked' : '1'} s.input(type=inputType, name=model.name, value=tag, **kw).text(desc) if cols: s.br() return s def input_checkgroup(self, request, content, model, templateAttributes={}): return self.input_group(request, content, model, model.flags, "checkbox", templateAttributes) def input_radiogroup(self, request, content, model, templateAttributes={}): return self.input_group(request, content, model, model.choices, "radio", templateAttributes) #I don't know why they're the same, but they were. So I removed the #excess code. Maybe someone should look into removing it entirely. input_flags = input_checkgroup def input_boolean(self, request, content, model, templateAttributes={}): kw = {} if self.getValue(request, model): kw = {'checked' : '1'} return content.input(type="checkbox", name=model.name, **kw) def input_file(self, request, content, model, templateAttributes={}): kw = {} for attrib in ['size', 'accept']: val = model.getHint(attrib, templateAttributes.get(attrib, None)) if val: kw[attrib] = str(val) return content.input(type="file", name=model.name, **kw) def input_date(self, request, content, model, templateAttributes={}): breakLines = model.getHint('breaklines', 1) date = self.getValues(request, model) if date == None: year, month, day = "", "", "" else: year, month, day = date div = content.div() div.text("Year: ") div.input(type="text", size="4", maxlength="4", name=model.name, value=str(year)) if breakLines: div.br() div.text("Month: ") div.input(type="text", size="2", maxlength="2", name=model.name, value=str(month)) if breakLines: div.br() div.text("Day: ") div.input(type="text", size="2", maxlength="2", name=model.name, value=str(day)) return div def input_password(self, request, content, model, templateAttributes={}): return content.input( type="password", size=str(templateAttributes.get('size', "60")), name=model.name) def input_verifiedpassword(self, request, content, model, templateAttributes={}): breakLines = model.getHint('breaklines', 1) values = self.getValues(request, model) if isinstance(values, (str, unicode)): values = (values, values) if not values: p1, p2 = "", "" elif len(values) == 1: p1, p2 = values, "" elif len(values) == 2: p1, p2 = values else: p1, p2 = "", "" div = content.div() div.text("Password: ") div.input(type="password", size="20", name=model.name, value=str(p1)) if breakLines: div.br() div.text("Verify: ") div.input(type="password", size="20", name=model.name, value=str(p2)) return div def convergeInput(self, request, content, model, templateNode): name = model.__class__.__name__.lower() if _renderers.has_key(model.__class__): imeth = _renderers[model.__class__] else: imeth = getattr(self,"input_"+name) return imeth(request, content, model, templateNode.attributes).node def createInput(self, request, shell, model, templateAttributes={}): name = model.__class__.__name__.lower() if _renderers.has_key(model.__class__): imeth = _renderers[model.__class__] else: imeth = getattr(self,"input_"+name) if name in self.SPANNING_TYPES: td = shell.tr().td(valign="top", colspan="2") return (imeth(request, td, model).node, shell.tr().td(colspan="2").node) else: if model.allowNone: required = "" else: required = " *" tr = shell.tr() tr.td(align="right", valign="top").text(model.getShortDescription()+":"+required) content = tr.td(valign="top") return (imeth(request, content, model).node, content.div(_class="formDescription"). # because class is a keyword text(model.getLongDescription()).node) def setUp(self, request, node, data): # node = widgets.Widget.generateDOM(self,request,node) lmn = lmx(node) if not node.hasAttribute('action'): lmn['action'] = (request.prepath+request.postpath)[-1] if not node.hasAttribute("method"): lmn['method'] = 'post' lmn['enctype'] = 'multipart/form-data' self.errorNodes = errorNodes = {} # name: nodes which trap errors self.inputNodes = inputNodes = {} for errorNode in domhelpers.findElementsWithAttribute(node, 'errorFor'): errorNodes[errorNode.getAttribute('errorFor')] = errorNode argz={} # list to figure out which nodes are in the template already and which aren't hasSubmit = 0 argList = self.model.fmethod.getArgs() for arg in argList: if isinstance(arg, formmethod.Submit): hasSubmit = 1 argz[arg.name] = arg inNodes = domhelpers.findElements( node, lambda n: n.tagName.lower() in ('textarea', 'select', 'input', 'div')) for inNode in inNodes: t = inNode.getAttribute("type") if t and t.lower() == "submit": hasSubmit = 1 if not inNode.hasAttribute("name"): continue nName = inNode.getAttribute("name") if argz.has_key(nName): #send an empty content shell - we just want the node inputNodes[nName] = self.convergeInput(request, lmx(), argz[nName], inNode) inNode.parentNode.replaceChild(inputNodes[nName], inNode) del argz[nName] # TODO: # * some arg types should only have a single node (text, string, etc) # * some should have multiple nodes (choice, checkgroup) # * some have a bunch of ancillary nodes that are possible values (menu, radiogroup) # these should all be taken into account when walking through the template if argz: shell = self.createShell(request, node, data) # create inputs, in the same order they were passed to us: for remArg in [arg for arg in argList if argz.has_key(arg.name)]: inputNode, errorNode = self.createInput(request, shell, remArg) errorNodes[remArg.name] = errorNode inputNodes[remArg.name] = inputNode if not hasSubmit: lmn.input(type="submit") class FormErrorWidget(FormFillerWidget): def setUp(self, request, node, data): FormFillerWidget.setUp(self, request, node, data) for k, f in self.model.err.items(): en = self.errorNodes[k] tn = self.inputNodes[k] en.setAttribute('class', 'formError') tn.setAttribute('class', 'formInputError') en.childNodes[:]=[] # gurfle, CLEAR IT NOW!@# if isinstance(f, failure.Failure): f = f.getErrorMessage() lmx(en).text(str(f)) class FormDisplayModel(model.MethodModel): def initialize(self, fmethod, alwaysDefault=False): self.fmethod = fmethod self.alwaysDefault = alwaysDefault class FormErrorModel(FormDisplayModel): def initialize(self, fmethod, args, err): FormDisplayModel.initialize(self, fmethod) self.args = args if isinstance(err, failure.Failure): err = err.value if isinstance(err, Exception): self.err = getattr(err, "descriptions", {}) self.desc = err else: self.err = err self.desc = "Please try again" def wmfactory_description(self, request): return str(self.desc) class _RequestHack(model.MethodModel): def wmfactory_hack(self, request): rv = [[str(a), repr(b)] for (a, b) in request._outDict.items()] #print 'hack', rv return rv class FormProcessor(resource.Resource): def __init__(self, formMethod, callback=None, errback=None): resource.Resource.__init__(self) self.formMethod = formMethod if callback is None: callback = self.viewFactory self.callback = callback if errback is None: errback = self.errorViewFactory self.errback = errback def getArgs(self, request): """Return the formmethod.Arguments. Overridable hook to allow pre-processing, e.g. if we want to enable on them depending on one of the inputs. """ return self.formMethod.getArgs() def render(self, request): outDict = {} errDict = {} for methodArg in self.getArgs(request): valmethod = getattr(self,"mangle_"+ (methodArg.__class__.__name__.lower()), None) tmpval = request.args.get(methodArg.name) if valmethod: # mangle the argument to a basic datatype that coerce will like tmpval = valmethod(tmpval) # coerce it try: cv = methodArg.coerce(tmpval) outDict[methodArg.name] = cv except: errDict[methodArg.name] = failure.Failure() if errDict: # there were problems processing the form return self.errback(self.errorModelFactory( request.args, outDict, errDict)).render(request) else: try: if self.formMethod.takesRequest: outObj = self.formMethod.call(request=request, **outDict) else: outObj = self.formMethod.call(**outDict) except formmethod.FormException, e: err = request.errorInfo = self.errorModelFactory( request.args, outDict, e) return self.errback(err).render(request) else: request._outDict = outDict # CHOMP CHOMP! # I wanted better default behavior for debugging, so I could # see the arguments passed, but there is no channel for this in # the existing callback structure. So, here it goes. if isinstance(outObj, defer.Deferred): def _ebModel(err): if err.trap(formmethod.FormException): mf = self.errorModelFactory(request.args, outDict, err.value) return self.errback(mf) raise err (outObj .addCallback(self.modelFactory) .addCallback(self.callback) .addErrback(_ebModel)) return util.DeferredResource(outObj).render(request) else: return self.callback(self.modelFactory(outObj)).render( request) def errorModelFactory(self, args, out, err): return FormErrorModel(self.formMethod, args, err) def errorViewFactory(self, m): v = view.View(m) v.template = ''' <html> <head> <title> Form Error View </title> <style> .formDescription {color: green} .formError {color: red; font-weight: bold} .formInputError {color: #900} </style> </head> <body> Error: <span model="description" /> <form model="."> </form> </body> </html> ''' return v def modelFactory(self, outObj): adapt = getAdapter(outObj, interfaces.IModel, outObj) # print 'factorizing', adapt return adapt def viewFactory(self, model): # return getAdapter(model, interfaces.IView) if model is None: bodyStr = ''' <table model="hack" style="background-color: #99f"> <tr pattern="listItem" view="Widget"> <td model="0" style="font-weight: bold"> </td> <td model="1"> </td> </tr> </table> ''' model = _RequestHack() else: bodyStr = '<div model="." />' v = view.View(model) v.template = ''' <html> <head> <title> Thank You </title> </head> <body> <h1>Thank You for Using Woven</h1> %s </body> </html> ''' % bodyStr return v # manglizers def mangle_single(self, args): if args: return args[0] else: return '' mangle_string = mangle_single mangle_text = mangle_single mangle_integer = mangle_single mangle_password = mangle_single mangle_integerrange = mangle_single mangle_float = mangle_single mangle_choice = mangle_single mangle_boolean = mangle_single mangle_hidden = mangle_single mangle_submit = mangle_single mangle_file = mangle_single mangle_radiogroup = mangle_single def mangle_multi(self, args): if args is None: return [] return args mangle_checkgroup = mangle_multi mangle_flags = mangle_multi from twisted.python.formmethod import FormMethod view.registerViewForModel(FormFillerWidget, FormDisplayModel) view.registerViewForModel(FormErrorWidget, FormErrorModel) registerAdapter(FormDisplayModel, FormMethod, interfaces.IModel)
unknown
codeparrot/codeparrot-clean
""" ================ Confusion matrix ================ Example of confusion matrix usage to evaluate the quality of the output of a classifier on the iris data set. The diagonal elements represent the number of points for which the predicted label is equal to the true label, while off-diagonal elements are those that are mislabeled by the classifier. The higher the diagonal values of the confusion matrix the better, indicating many correct predictions. The figures show the confusion matrix with and without normalization by class support size (number of elements in each class). This kind of normalization can be interesting in case of class imbalance to have a more visual interpretation of which class is being misclassified. Here the results are not as good as they could be as our choice for the regularization parameter C was not the best. In real life applications this parameter is usually chosen using :ref:`grid_search`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.cross_validation import train_test_split from sklearn.metrics import confusion_matrix # import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target # Split the data into a training set and a test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Run classifier, using a model that is too regularized (C too low) to see # the impact on the results classifier = svm.SVC(kernel='linear', C=0.01) y_pred = classifier.fit(X_train, y_train).predict(X_test) def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(iris.target_names)) plt.xticks(tick_marks, iris.target_names, rotation=45) plt.yticks(tick_marks, iris.target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cm = confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm) # Normalize the confusion matrix by row (i.e by the number of samples # in each class) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print('Normalized confusion matrix') print(cm_normalized) plt.figure() plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix') plt.show()
unknown
codeparrot/codeparrot-clean
{ "name": "data-router", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "data-router", "dependencies": { "react": "18.2.0", "react-dom": "18.2.0", "react-router-dom": "^6.15.0" }, "devDependencies": { "@rollup/plugin-replace": "5.0.2", "@types/node": "18.11.18", "@types/react": "18.0.27", "@types/react-dom": "18.0.10", "@vitejs/plugin-react": "3.0.1", "typescript": "4.9.5", "vite": "4.0.4" } }, "node_modules/@ampproject/remapping": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", "dev": true, "dependencies": { "@jridgewell/gen-mapping": "^0.3.0", "@jridgewell/trace-mapping": "^0.3.9" }, "engines": { "node": ">=6.0.0" } }, "node_modules/@babel/code-frame": { "version": "7.21.4", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.21.4.tgz", "integrity": "sha512-LYvhNKfwWSPpocw8GI7gpK2nq3HSDuEPC/uSYaALSJu9xjsalaaYFOq0Pwt5KmVqwEbZlDu81aLXwBOmD/Fv9g==", "dev": true, "dependencies": { "@babel/highlight": "^7.18.6" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/compat-data": { "version": "7.22.3", "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.3.tgz", "integrity": "sha512-aNtko9OPOwVESUFp3MZfD8Uzxl7JzSeJpd7npIoxCasU37PFbAQRpKglkaKwlHOyeJdrREpo8TW8ldrkYWwvIQ==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { "version": "7.22.1", "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.1.tgz", "integrity": "sha512-Hkqu7J4ynysSXxmAahpN1jjRwVJ+NdpraFLIWflgjpVob3KNyK3/tIUc7Q7szed8WMp0JNa7Qtd1E9Oo22F9gA==", "dev": true, "dependencies": { "@ampproject/remapping": "^2.2.0", "@babel/code-frame": "^7.21.4", "@babel/generator": "^7.22.0", "@babel/helper-compilation-targets": "^7.22.1", "@babel/helper-module-transforms": "^7.22.1", "@babel/helpers": "^7.22.0", "@babel/parser": "^7.22.0", "@babel/template": "^7.21.9", "@babel/traverse": "^7.22.1", "@babel/types": "^7.22.0", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.2", "semver": "^6.3.0" }, "engines": { "node": ">=6.9.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/babel" } }, "node_modules/@babel/generator": { "version": "7.22.3", "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.3.tgz", "integrity": "sha512-C17MW4wlk//ES/CJDL51kPNwl+qiBQyN7b9SKyVp11BLGFeSPoVaHrv+MNt8jwQFhQWowW88z1eeBx3pFz9v8A==", "dev": true, "dependencies": { "@babel/types": "^7.22.3", "@jridgewell/gen-mapping": "^0.3.2", "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-compilation-targets": { "version": "7.22.1", "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.1.tgz", "integrity": "sha512-Rqx13UM3yVB5q0D/KwQ8+SPfX/+Rnsy1Lw1k/UwOC4KC6qrzIQoY3lYnBu5EHKBlEHHcj0M0W8ltPSkD8rqfsQ==", "dev": true, "dependencies": { "@babel/compat-data": "^7.22.0", "@babel/helper-validator-option": "^7.21.0", "browserslist": "^4.21.3", "lru-cache": "^5.1.1", "semver": "^6.3.0" }, "engines": { "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-environment-visitor": { "version": "7.22.1", "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.1.tgz", "integrity": "sha512-Z2tgopurB/kTbidvzeBrc2To3PUP/9i5MUe+fU6QJCQDyPwSH2oRapkLw3KGECDYSjhQZCNxEvNvZlLw8JjGwA==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-function-name": { "version": "7.21.0", "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz", "integrity": "sha512-HfK1aMRanKHpxemaY2gqBmL04iAPOPRj7DxtNbiDOrJK+gdwkiNRVpCpUJYbUT+aZyemKN8brqTOxzCaG6ExRg==", "dev": true, "dependencies": { "@babel/template": "^7.20.7", "@babel/types": "^7.21.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-hoist-variables": { "version": "7.18.6", "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", "dev": true, "dependencies": { "@babel/types": "^7.18.6" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-imports": { "version": "7.21.4", "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.21.4.tgz", "integrity": "sha512-orajc5T2PsRYUN3ZryCEFeMDYwyw09c/pZeaQEZPH0MpKzSvn3e0uXsDBu3k03VI+9DBiRo+l22BfKTpKwa/Wg==", "dev": true, "dependencies": { "@babel/types": "^7.21.4" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { "version": "7.22.1", "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.1.tgz", "integrity": "sha512-dxAe9E7ySDGbQdCVOY/4+UcD8M9ZFqZcZhSPsPacvCG4M+9lwtDDQfI2EoaSvmf7W/8yCBkGU0m7Pvt1ru3UZw==", "dev": true, "dependencies": { "@babel/helper-environment-visitor": "^7.22.1", "@babel/helper-module-imports": "^7.21.4", "@babel/helper-simple-access": "^7.21.5", "@babel/helper-split-export-declaration": "^7.18.6", "@babel/helper-validator-identifier": "^7.19.1", "@babel/template": "^7.21.9", "@babel/traverse": "^7.22.1", "@babel/types": "^7.22.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-plugin-utils": { "version": "7.21.5", "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.21.5.tgz", "integrity": "sha512-0WDaIlXKOX/3KfBK/dwP1oQGiPh6rjMkT7HIRv7i5RR2VUMwrx5ZL0dwBkKx7+SW1zwNdgjHd34IMk5ZjTeHVg==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-simple-access": { "version": "7.21.5", "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.21.5.tgz", "integrity": "sha512-ENPDAMC1wAjR0uaCUwliBdiSl1KBJAVnMTzXqi64c2MG8MPR6ii4qf7bSXDqSFbr4W6W028/rf5ivoHop5/mkg==", "dev": true, "dependencies": { "@babel/types": "^7.21.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-split-export-declaration": { "version": "7.18.6", "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", "dev": true, "dependencies": { "@babel/types": "^7.18.6" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { "version": "7.21.5", "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.21.5.tgz", "integrity": "sha512-5pTUx3hAJaZIdW99sJ6ZUUgWq/Y+Hja7TowEnLNMm1VivRgZQL3vpBY3qUACVsvw+yQU6+YgfBVmcbLaZtrA1w==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { "version": "7.19.1", "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { "version": "7.21.0", "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.21.0.tgz", "integrity": "sha512-rmL/B8/f0mKS2baE9ZpyTcTavvEuWhTTW8amjzXNvYG4AwBsqTLikfXsEofsJEfKHf+HQVQbFOHy6o+4cnC/fQ==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { "version": "7.22.3", "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.3.tgz", "integrity": "sha512-jBJ7jWblbgr7r6wYZHMdIqKc73ycaTcCaWRq4/2LpuPHcx7xMlZvpGQkOYc9HeSjn6rcx15CPlgVcBtZ4WZJ2w==", "dev": true, "dependencies": { "@babel/template": "^7.21.9", "@babel/traverse": "^7.22.1", "@babel/types": "^7.22.3" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { "version": "7.18.6", "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", "dev": true, "dependencies": { "@babel/helper-validator-identifier": "^7.18.6", "chalk": "^2.0.0", "js-tokens": "^4.0.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/parser": { "version": "7.22.4", "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.4.tgz", "integrity": "sha512-VLLsx06XkEYqBtE5YGPwfSGwfrjnyPP5oiGty3S8pQLFDFLaS8VwWSIxkTXpcvr5zeYLE6+MBNl2npl/YnfofA==", "dev": true, "bin": { "parser": "bin/babel-parser.js" }, "engines": { "node": ">=6.0.0" } }, "node_modules/@babel/plugin-transform-react-jsx-self": { "version": "7.21.0", "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.21.0.tgz", "integrity": "sha512-f/Eq+79JEu+KUANFks9UZCcvydOOGMgF7jBrcwjHa5jTZD8JivnhCJYvmlhR/WTXBWonDExPoW0eO/CR4QJirA==", "dev": true, "dependencies": { "@babel/helper-plugin-utils": "^7.20.2" }, "engines": { "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "node_modules/@babel/plugin-transform-react-jsx-source": { "version": "7.19.6", "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.19.6.tgz", "integrity": "sha512-RpAi004QyMNisst/pvSanoRdJ4q+jMCWyk9zdw/CyLB9j8RXEahodR6l2GyttDRyEVWZtbN+TpLiHJ3t34LbsQ==", "dev": true, "dependencies": { "@babel/helper-plugin-utils": "^7.19.0" }, "engines": { "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "node_modules/@babel/template": { "version": "7.21.9", "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.21.9.tgz", "integrity": "sha512-MK0X5k8NKOuWRamiEfc3KEJiHMTkGZNUjzMipqCGDDc6ijRl/B7RGSKVGncu4Ro/HdyzzY6cmoXuKI2Gffk7vQ==", "dev": true, "dependencies": { "@babel/code-frame": "^7.21.4", "@babel/parser": "^7.21.9", "@babel/types": "^7.21.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { "version": "7.22.4", "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.4.tgz", "integrity": "sha512-Tn1pDsjIcI+JcLKq1AVlZEr4226gpuAQTsLMorsYg9tuS/kG7nuwwJ4AB8jfQuEgb/COBwR/DqJxmoiYFu5/rQ==", "dev": true, "dependencies": { "@babel/code-frame": "^7.21.4", "@babel/generator": "^7.22.3", "@babel/helper-environment-visitor": "^7.22.1", "@babel/helper-function-name": "^7.21.0", "@babel/helper-hoist-variables": "^7.18.6", "@babel/helper-split-export-declaration": "^7.18.6", "@babel/parser": "^7.22.4", "@babel/types": "^7.22.4", "debug": "^4.1.0", "globals": "^11.1.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/types": { "version": "7.22.4", "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.4.tgz", "integrity": "sha512-Tx9x3UBHTTsMSW85WB2kphxYQVvrZ/t1FxD88IpSgIjiUJlCm9z+xWIDwyo1vffTwSqteqyznB8ZE9vYYk16zA==", "dev": true, "dependencies": { "@babel/helper-string-parser": "^7.21.5", "@babel/helper-validator-identifier": "^7.19.1", "to-fast-properties": "^2.0.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@esbuild/android-arm": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.16.17.tgz", "integrity": "sha512-N9x1CMXVhtWEAMS7pNNONyA14f71VPQN9Cnavj1XQh6T7bskqiLLrSca4O0Vr8Wdcga943eThxnVp3JLnBMYtw==", "cpu": [ "arm" ], "dev": true, "optional": true, "os": [ "android" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/android-arm64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.16.17.tgz", "integrity": "sha512-MIGl6p5sc3RDTLLkYL1MyL8BMRN4tLMRCn+yRJJmEDvYZ2M7tmAf80hx1kbNEUX2KJ50RRtxZ4JHLvCfuB6kBg==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "android" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/android-x64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.16.17.tgz", "integrity": "sha512-a3kTv3m0Ghh4z1DaFEuEDfz3OLONKuFvI4Xqczqx4BqLyuFaFkuaG4j2MtA6fuWEFeC5x9IvqnX7drmRq/fyAQ==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "android" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/darwin-arm64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.16.17.tgz", "integrity": "sha512-/2agbUEfmxWHi9ARTX6OQ/KgXnOWfsNlTeLcoV7HSuSTv63E4DqtAc+2XqGw1KHxKMHGZgbVCZge7HXWX9Vn+w==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "darwin" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/darwin-x64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.16.17.tgz", "integrity": "sha512-2By45OBHulkd9Svy5IOCZt376Aa2oOkiE9QWUK9fe6Tb+WDr8hXL3dpqi+DeLiMed8tVXspzsTAvd0jUl96wmg==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "darwin" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/freebsd-arm64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.16.17.tgz", "integrity": "sha512-mt+cxZe1tVx489VTb4mBAOo2aKSnJ33L9fr25JXpqQqzbUIw/yzIzi+NHwAXK2qYV1lEFp4OoVeThGjUbmWmdw==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "freebsd" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/freebsd-x64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.16.17.tgz", "integrity": "sha512-8ScTdNJl5idAKjH8zGAsN7RuWcyHG3BAvMNpKOBaqqR7EbUhhVHOqXRdL7oZvz8WNHL2pr5+eIT5c65kA6NHug==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "freebsd" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-arm": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.16.17.tgz", "integrity": "sha512-iihzrWbD4gIT7j3caMzKb/RsFFHCwqqbrbH9SqUSRrdXkXaygSZCZg1FybsZz57Ju7N/SHEgPyaR0LZ8Zbe9gQ==", "cpu": [ "arm" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-arm64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.16.17.tgz", "integrity": "sha512-7S8gJnSlqKGVJunnMCrXHU9Q8Q/tQIxk/xL8BqAP64wchPCTzuM6W3Ra8cIa1HIflAvDnNOt2jaL17vaW+1V0g==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-ia32": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.16.17.tgz", "integrity": "sha512-kiX69+wcPAdgl3Lonh1VI7MBr16nktEvOfViszBSxygRQqSpzv7BffMKRPMFwzeJGPxcio0pdD3kYQGpqQ2SSg==", "cpu": [ "ia32" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-loong64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.16.17.tgz", "integrity": "sha512-dTzNnQwembNDhd654cA4QhbS9uDdXC3TKqMJjgOWsC0yNCbpzfWoXdZvp0mY7HU6nzk5E0zpRGGx3qoQg8T2DQ==", "cpu": [ "loong64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-mips64el": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.16.17.tgz", "integrity": "sha512-ezbDkp2nDl0PfIUn0CsQ30kxfcLTlcx4Foz2kYv8qdC6ia2oX5Q3E/8m6lq84Dj/6b0FrkgD582fJMIfHhJfSw==", "cpu": [ "mips64el" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-ppc64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.16.17.tgz", "integrity": "sha512-dzS678gYD1lJsW73zrFhDApLVdM3cUF2MvAa1D8K8KtcSKdLBPP4zZSLy6LFZ0jYqQdQ29bjAHJDgz0rVbLB3g==", "cpu": [ "ppc64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-riscv64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.16.17.tgz", "integrity": "sha512-ylNlVsxuFjZK8DQtNUwiMskh6nT0vI7kYl/4fZgV1llP5d6+HIeL/vmmm3jpuoo8+NuXjQVZxmKuhDApK0/cKw==", "cpu": [ "riscv64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-s390x": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.16.17.tgz", "integrity": "sha512-gzy7nUTO4UA4oZ2wAMXPNBGTzZFP7mss3aKR2hH+/4UUkCOyqmjXiKpzGrY2TlEUhbbejzXVKKGazYcQTZWA/w==", "cpu": [ "s390x" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/linux-x64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.16.17.tgz", "integrity": "sha512-mdPjPxfnmoqhgpiEArqi4egmBAMYvaObgn4poorpUaqmvzzbvqbowRllQ+ZgzGVMGKaPkqUmPDOOFQRUFDmeUw==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "linux" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/netbsd-x64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.16.17.tgz", "integrity": "sha512-/PzmzD/zyAeTUsduZa32bn0ORug+Jd1EGGAUJvqfeixoEISYpGnAezN6lnJoskauoai0Jrs+XSyvDhppCPoKOA==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "netbsd" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/openbsd-x64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.16.17.tgz", "integrity": "sha512-2yaWJhvxGEz2RiftSk0UObqJa/b+rIAjnODJgv2GbGGpRwAfpgzyrg1WLK8rqA24mfZa9GvpjLcBBg8JHkoodg==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "openbsd" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/sunos-x64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.16.17.tgz", "integrity": "sha512-xtVUiev38tN0R3g8VhRfN7Zl42YCJvyBhRKw1RJjwE1d2emWTVToPLNEQj/5Qxc6lVFATDiy6LjVHYhIPrLxzw==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "sunos" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/win32-arm64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.16.17.tgz", "integrity": "sha512-ga8+JqBDHY4b6fQAmOgtJJue36scANy4l/rL97W+0wYmijhxKetzZdKOJI7olaBaMhWt8Pac2McJdZLxXWUEQw==", "cpu": [ "arm64" ], "dev": true, "optional": true, "os": [ "win32" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/win32-ia32": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.16.17.tgz", "integrity": "sha512-WnsKaf46uSSF/sZhwnqE4L/F89AYNMiD4YtEcYekBt9Q7nj0DiId2XH2Ng2PHM54qi5oPrQ8luuzGszqi/veig==", "cpu": [ "ia32" ], "dev": true, "optional": true, "os": [ "win32" ], "engines": { "node": ">=12" } }, "node_modules/@esbuild/win32-x64": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.16.17.tgz", "integrity": "sha512-y+EHuSchhL7FjHgvQL/0fnnFmO4T1bhvWANX6gcnqTjtnKWbTvUMCpGnv2+t+31d7RzyEAYAd4u2fnIhHL6N/Q==", "cpu": [ "x64" ], "dev": true, "optional": true, "os": [ "win32" ], "engines": { "node": ">=12" } }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", "dev": true, "dependencies": { "@jridgewell/set-array": "^1.0.1", "@jridgewell/sourcemap-codec": "^1.4.10", "@jridgewell/trace-mapping": "^0.3.9" }, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", "dev": true, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/set-array": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", "dev": true, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.4.15", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", "dev": true }, "node_modules/@jridgewell/trace-mapping": { "version": "0.3.18", "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz", "integrity": "sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==", "dev": true, "dependencies": { "@jridgewell/resolve-uri": "3.1.0", "@jridgewell/sourcemap-codec": "1.4.14" } }, "node_modules/@jridgewell/trace-mapping/node_modules/@jridgewell/sourcemap-codec": { "version": "1.4.14", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", "dev": true }, "node_modules/@remix-run/router": { "version": "1.8.0", "resolved": "https://registry.npmjs.org/@remix-run/router/-/router-1.8.0.tgz", "integrity": "sha512-mrfKqIHnSZRyIzBcanNJmVQELTnX+qagEDlcKO90RgRBVOZGSGvZKeDihTRfWcqoDn5N/NkUcwWTccnpN18Tfg==", "engines": { "node": ">=14.0.0" } }, "node_modules/@rollup/plugin-replace": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/@rollup/plugin-replace/-/plugin-replace-5.0.2.tgz", "integrity": "sha512-M9YXNekv/C/iHHK+cvORzfRYfPbq0RDD8r0G+bMiTXjNGKulPnCT9O3Ss46WfhI6ZOCgApOP7xAdmCQJ+U2LAA==", "dev": true, "dependencies": { "@rollup/pluginutils": "^5.0.1", "magic-string": "^0.27.0" }, "engines": { "node": ">=14.0.0" }, "peerDependencies": { "rollup": "^1.20.0||^2.0.0||^3.0.0" }, "peerDependenciesMeta": { "rollup": { "optional": true } } }, "node_modules/@rollup/pluginutils": { "version": "5.0.2", "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.0.2.tgz", "integrity": "sha512-pTd9rIsP92h+B6wWwFbW8RkZv4hiR/xKsqre4SIuAOaOEQRxi0lqLke9k2/7WegC85GgUs9pjmOjCUi3In4vwA==", "dev": true, "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", "picomatch": "^2.3.1" }, "engines": { "node": ">=14.0.0" }, "peerDependencies": { "rollup": "^1.20.0||^2.0.0||^3.0.0" }, "peerDependenciesMeta": { "rollup": { "optional": true } } }, "node_modules/@types/estree": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==", "dev": true }, "node_modules/@types/node": { "version": "18.11.18", "resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.18.tgz", "integrity": "sha512-DHQpWGjyQKSHj3ebjFI/wRKcqQcdR+MoFBygntYOZytCqNfkd2ZC4ARDJ2DQqhjH5p85Nnd3jhUJIXrszFX/JA==", "dev": true }, "node_modules/@types/prop-types": { "version": "15.7.5", "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==", "dev": true }, "node_modules/@types/react": { "version": "18.0.27", "resolved": "https://registry.npmjs.org/@types/react/-/react-18.0.27.tgz", "integrity": "sha512-3vtRKHgVxu3Jp9t718R9BuzoD4NcQ8YJ5XRzsSKxNDiDonD2MXIT1TmSkenxuCycZJoQT5d2vE8LwWJxBC1gmA==", "dev": true, "dependencies": { "@types/prop-types": "*", "@types/scheduler": "*", "csstype": "^3.0.2" } }, "node_modules/@types/react-dom": { "version": "18.0.10", "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.10.tgz", "integrity": "sha512-E42GW/JA4Qv15wQdqJq8DL4JhNpB3prJgjgapN3qJT9K2zO5IIAQh4VXvCEDupoqAwnz0cY4RlXeC/ajX5SFHg==", "dev": true, "dependencies": { "@types/react": "*" } }, "node_modules/@types/scheduler": { "version": "0.16.3", "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==", "dev": true }, "node_modules/@vitejs/plugin-react": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-3.0.1.tgz", "integrity": "sha512-mx+QvYwIbbpOIJw+hypjnW1lAbKDHtWK5ibkF/V1/oMBu8HU/chb+SnqJDAsLq1+7rGqjktCEomMTM5KShzUKQ==", "dev": true, "dependencies": { "@babel/core": "^7.20.7", "@babel/plugin-transform-react-jsx-self": "^7.18.6", "@babel/plugin-transform-react-jsx-source": "^7.19.6", "magic-string": "^0.27.0", "react-refresh": "^0.14.0" }, "engines": { "node": "^14.18.0 || >=16.0.0" }, "peerDependencies": { "vite": "^4.0.0" } }, "node_modules/ansi-styles": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", "dev": true, "dependencies": { "color-convert": "^1.9.0" }, "engines": { "node": ">=4" } }, "node_modules/browserslist": { "version": "4.21.7", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.7.tgz", "integrity": "sha512-BauCXrQ7I2ftSqd2mvKHGo85XR0u7Ru3C/Hxsy/0TkfCtjrmAbPdzLGasmoiBxplpDXlPvdjX9u7srIMfgasNA==", "dev": true, "funding": [ { "type": "opencollective", "url": "https://opencollective.com/browserslist" }, { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" }, { "type": "github", "url": "https://github.com/sponsors/ai" } ], "dependencies": { "caniuse-lite": "^1.0.30001489", "electron-to-chromium": "^1.4.411", "node-releases": "^2.0.12", "update-browserslist-db": "^1.0.11" }, "bin": { "browserslist": "cli.js" }, "engines": { "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" } }, "node_modules/caniuse-lite": { "version": "1.0.30001494", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001494.tgz", "integrity": "sha512-sY2B5Qyl46ZzfYDegrl8GBCzdawSLT4ThM9b9F+aDYUrAG2zCOyMbd2Tq34mS1g4ZKBfjRlzOohQMxx28x6wJg==", "dev": true, "funding": [ { "type": "opencollective", "url": "https://opencollective.com/browserslist" }, { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/caniuse-lite" }, { "type": "github", "url": "https://github.com/sponsors/ai" } ] }, "node_modules/chalk": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", "dev": true, "dependencies": { "ansi-styles": "^3.2.1", "escape-string-regexp": "^1.0.5", "supports-color": "^5.3.0" }, "engines": { "node": ">=4" } }, "node_modules/color-convert": { "version": "1.9.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "dev": true, "dependencies": { "color-name": "1.1.3" } }, "node_modules/color-name": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", "dev": true }, "node_modules/convert-source-map": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", "dev": true }, "node_modules/csstype": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==", "dev": true }, "node_modules/debug": { "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dev": true, "dependencies": { "ms": "2.1.2" }, "engines": { "node": ">=6.0" }, "peerDependenciesMeta": { "supports-color": { "optional": true } } }, "node_modules/electron-to-chromium": { "version": "1.4.419", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.419.tgz", "integrity": "sha512-jdie3RiEgygvDTyS2sgjq71B36q2cDSBfPlwzUyuOrfYTNoYWyBxxjGJV/HAu3A2hB0Y+HesvCVkVAFoCKwCSw==", "dev": true }, "node_modules/esbuild": { "version": "0.16.17", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.16.17.tgz", "integrity": "sha512-G8LEkV0XzDMNwXKgM0Jwu3nY3lSTwSGY6XbxM9cr9+s0T/qSV1q1JVPBGzm3dcjhCic9+emZDmMffkwgPeOeLg==", "dev": true, "hasInstallScript": true, "bin": { "esbuild": "bin/esbuild" }, "engines": { "node": ">=12" }, "optionalDependencies": { "@esbuild/android-arm": "0.16.17", "@esbuild/android-arm64": "0.16.17", "@esbuild/android-x64": "0.16.17", "@esbuild/darwin-arm64": "0.16.17", "@esbuild/darwin-x64": "0.16.17", "@esbuild/freebsd-arm64": "0.16.17", "@esbuild/freebsd-x64": "0.16.17", "@esbuild/linux-arm": "0.16.17", "@esbuild/linux-arm64": "0.16.17", "@esbuild/linux-ia32": "0.16.17", "@esbuild/linux-loong64": "0.16.17", "@esbuild/linux-mips64el": "0.16.17", "@esbuild/linux-ppc64": "0.16.17", "@esbuild/linux-riscv64": "0.16.17", "@esbuild/linux-s390x": "0.16.17", "@esbuild/linux-x64": "0.16.17", "@esbuild/netbsd-x64": "0.16.17", "@esbuild/openbsd-x64": "0.16.17", "@esbuild/sunos-x64": "0.16.17", "@esbuild/win32-arm64": "0.16.17", "@esbuild/win32-ia32": "0.16.17", "@esbuild/win32-x64": "0.16.17" } }, "node_modules/escalade": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", "dev": true, "engines": { "node": ">=6" } }, "node_modules/escape-string-regexp": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", "dev": true, "engines": { "node": ">=0.8.0" } }, "node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", "dev": true }, "node_modules/fsevents": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", "dev": true, "hasInstallScript": true, "optional": true, "os": [ "darwin" ], "engines": { "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, "node_modules/function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", "dev": true }, "node_modules/gensync": { "version": "1.0.0-beta.2", "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/globals": { "version": "11.12.0", "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", "dev": true, "engines": { "node": ">=4" } }, "node_modules/has": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", "dev": true, "dependencies": { "function-bind": "^1.1.1" }, "engines": { "node": ">= 0.4.0" } }, "node_modules/has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", "dev": true, "engines": { "node": ">=4" } }, "node_modules/is-core-module": { "version": "2.12.1", "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz", "integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==", "dev": true, "dependencies": { "has": "^1.0.3" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" }, "node_modules/jsesc": { "version": "2.5.2", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", "dev": true, "bin": { "jsesc": "bin/jsesc" }, "engines": { "node": ">=4" } }, "node_modules/json5": { "version": "2.2.3", "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", "dev": true, "bin": { "json5": "lib/cli.js" }, "engines": { "node": ">=6" } }, "node_modules/loose-envify": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, "bin": { "loose-envify": "cli.js" } }, "node_modules/lru-cache": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, "dependencies": { "yallist": "^3.0.2" } }, "node_modules/magic-string": { "version": "0.27.0", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.27.0.tgz", "integrity": "sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==", "dev": true, "dependencies": { "@jridgewell/sourcemap-codec": "^1.4.13" }, "engines": { "node": ">=12" } }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true }, "node_modules/nanoid": { "version": "3.3.6", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", "dev": true, "funding": [ { "type": "github", "url": "https://github.com/sponsors/ai" } ], "bin": { "nanoid": "bin/nanoid.cjs" }, "engines": { "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, "node_modules/node-releases": { "version": "2.0.12", "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.12.tgz", "integrity": "sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==", "dev": true }, "node_modules/path-parse": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", "dev": true }, "node_modules/picocolors": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", "dev": true }, "node_modules/picomatch": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", "dev": true, "engines": { "node": ">=8.6" }, "funding": { "url": "https://github.com/sponsors/jonschlinkert" } }, "node_modules/postcss": { "version": "8.4.24", "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.24.tgz", "integrity": "sha512-M0RzbcI0sO/XJNucsGjvWU9ERWxb/ytp1w6dKtxTKgixdtQDq4rmx/g8W1hnaheq9jgwL/oyEdH5Bc4WwJKMqg==", "dev": true, "funding": [ { "type": "opencollective", "url": "https://opencollective.com/postcss/" }, { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" }, { "type": "github", "url": "https://github.com/sponsors/ai" } ], "dependencies": { "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, "engines": { "node": "^10 || ^12 || >=14" } }, "node_modules/react": { "version": "18.2.0", "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", "dependencies": { "loose-envify": "^1.1.0" }, "engines": { "node": ">=0.10.0" } }, "node_modules/react-dom": { "version": "18.2.0", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.0" }, "peerDependencies": { "react": "^18.2.0" } }, "node_modules/react-refresh": { "version": "0.14.0", "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz", "integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/react-router": { "version": "6.15.0", "resolved": "https://registry.npmjs.org/react-router/-/react-router-6.15.0.tgz", "integrity": "sha512-NIytlzvzLwJkCQj2HLefmeakxxWHWAP+02EGqWEZy+DgfHHKQMUoBBjUQLOtFInBMhWtb3hiUy6MfFgwLjXhqg==", "dependencies": { "@remix-run/router": "1.8.0" }, "engines": { "node": ">=14.0.0" }, "peerDependencies": { "react": ">=16.8" } }, "node_modules/react-router-dom": { "version": "6.15.0", "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-6.15.0.tgz", "integrity": "sha512-aR42t0fs7brintwBGAv2+mGlCtgtFQeOzK0BM1/OiqEzRejOZtpMZepvgkscpMUnKb8YO84G7s3LsHnnDNonbQ==", "dependencies": { "@remix-run/router": "1.8.0", "react-router": "6.15.0" }, "engines": { "node": ">=14.0.0" }, "peerDependencies": { "react": ">=16.8", "react-dom": ">=16.8" } }, "node_modules/resolve": { "version": "1.22.2", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz", "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==", "dev": true, "dependencies": { "is-core-module": "^2.11.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": { "resolve": "bin/resolve" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/rollup": { "version": "3.23.1", "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.23.1.tgz", "integrity": "sha512-ybRdFVHOoljGEFILHLd2g/qateqUdjE6YS41WXq4p3C/WwD3xtWxV4FYWETA1u9TeXQc5K8L8zHE5d/scOvrOQ==", "dev": true, "bin": { "rollup": "dist/bin/rollup" }, "engines": { "node": ">=14.18.0", "npm": ">=8.0.0" }, "optionalDependencies": { "fsevents": "~2.3.2" } }, "node_modules/scheduler": { "version": "0.23.0", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", "dependencies": { "loose-envify": "^1.1.0" } }, "node_modules/semver": { "version": "6.3.0", "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", "dev": true, "bin": { "semver": "bin/semver.js" } }, "node_modules/source-map-js": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/supports-color": { "version": "5.5.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", "dev": true, "dependencies": { "has-flag": "^3.0.0" }, "engines": { "node": ">=4" } }, "node_modules/supports-preserve-symlinks-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", "dev": true, "engines": { "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/to-fast-properties": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", "dev": true, "engines": { "node": ">=4" } }, "node_modules/typescript": { "version": "4.9.5", "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", "integrity": "sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g==", "dev": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" }, "engines": { "node": ">=4.2.0" } }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", "integrity": "sha512-dCwEFf0/oT85M1fHBg4F0jtLwJrutGoHSQXCh7u4o2t1drG+c0a9Flnqww6XUKSfQMPpJBRjU8d4RXB09qtvaA==", "dev": true, "funding": [ { "type": "opencollective", "url": "https://opencollective.com/browserslist" }, { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" }, { "type": "github", "url": "https://github.com/sponsors/ai" } ], "dependencies": { "escalade": "^3.1.1", "picocolors": "^1.0.0" }, "bin": { "update-browserslist-db": "cli.js" }, "peerDependencies": { "browserslist": ">= 4.21.0" } }, "node_modules/vite": { "version": "4.0.4", "resolved": "https://registry.npmjs.org/vite/-/vite-4.0.4.tgz", "integrity": "sha512-xevPU7M8FU0i/80DMR+YhgrzR5KS2ORy1B4xcX/cXLsvnUWvfHuqMmVU6N0YiJ4JWGRJJsLCgjEzKjG9/GKoSw==", "dev": true, "dependencies": { "esbuild": "^0.16.3", "postcss": "^8.4.20", "resolve": "^1.22.1", "rollup": "^3.7.0" }, "bin": { "vite": "bin/vite.js" }, "engines": { "node": "^14.18.0 || >=16.0.0" }, "optionalDependencies": { "fsevents": "~2.3.2" }, "peerDependencies": { "@types/node": ">= 14", "less": "*", "sass": "*", "stylus": "*", "sugarss": "*", "terser": "^5.4.0" }, "peerDependenciesMeta": { "@types/node": { "optional": true }, "less": { "optional": true }, "sass": { "optional": true }, "stylus": { "optional": true }, "sugarss": { "optional": true }, "terser": { "optional": true } } }, "node_modules/yallist": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", "dev": true } } }
json
github
https://github.com/remix-run/react-router
examples/data-router/package-lock.json
import urllib2, urllib API_SSL_SERVER="https://api-secure.recaptcha.net" API_SERVER="http://api.recaptcha.net" VERIFY_SERVER="api-verify.recaptcha.net" class RecaptchaResponse(object): def __init__(self, is_valid, error_code=None): self.is_valid = is_valid self.error_code = error_code def displayhtml(public_key, use_ssl = False, error = None): """Gets the HTML to display for reCAPTCHA public_key -- The public api key use_ssl -- Should the request be sent over ssl? error -- An error message to display (from RecaptchaResponse.error_code) """ error_param = '' if error: error_param = '&error=%s' % error if use_ssl: server = API_SSL_SERVER else: server = API_SERVER return """<script type="text/javascript" src="%(ApiServer)s/challenge?k=%(PublicKey)s%(ErrorParam)s"></script> <noscript> <iframe src="%(ApiServer)s/noscript?k=%(PublicKey)s%(ErrorParam)s" height="300" width="500" frameborder="0"></iframe><br /> <textarea name="recaptcha_challenge_field" rows="3" cols="40"></textarea> <input type='hidden' name='recaptcha_response_field' value='manual_challenge' /> </noscript> """ % { 'ApiServer': server, 'PublicKey': public_key, 'ErrorParam': error_param, } def submit(recaptcha_challenge_field, recaptcha_response_field, private_key, remoteip): """ Submits a reCAPTCHA request for verification. Returns RecaptchaResponse for the request recaptcha_challenge_field -- The value of recaptcha_challenge_field from the form recaptcha_response_field -- The value of recaptcha_response_field from the form private_key -- your reCAPTCHA private key remoteip -- the user's ip address """ if not (recaptcha_response_field and recaptcha_challenge_field and len(recaptcha_response_field) and len(recaptcha_challenge_field)): return RecaptchaResponse(is_valid=False, error_code='incorrect-captcha-sol') def encode_if_necessary(s): if isinstance(s, unicode): return s.encode('utf-8') return s params = urllib.urlencode({ 'privatekey': encode_if_necessary(private_key), 'remoteip': encode_if_necessary(remoteip), 'challenge': encode_if_necessary(recaptcha_challenge_field), 'response': encode_if_necessary(recaptcha_response_field), }) request = urllib2.Request( url = "http://%s/verify" % VERIFY_SERVER, data = params, headers = { "Content-type": "application/x-www-form-urlencoded", "User-agent": "reCAPTCHA Python" } ) httpresp = urllib2.urlopen(request) return_values = httpresp.read().splitlines(); httpresp.close(); return_code = return_values [0] if return_code == "true": return RecaptchaResponse(is_valid=True) else: return RecaptchaResponse(is_valid=False, error_code=return_values[1])
unknown
codeparrot/codeparrot-clean
"""Base class for interactive tests spawning a basic window.""" from inspect import cleandoc from pyglet import gl from pyglet.window import Window from tests.base.interactive import InteractiveTestCase class WindowedTestCase(InteractiveTestCase): """ Base class for tests that show a window, render something in that window and then ask a question to the user whether the contents are correct. Also takes a screenshot when the test is passed, so it can run without interaction afterwards. """ # Defaults window_size = 200, 200 window_options = None window = None question = None take_screenshot = True # Methods to override in implementations def on_expose(self): pass def render(self): pass def draw(self): pass # Implementation of the base test class @classmethod def create_test_case(cls, name, description=None, decorators=None, **kwargs): def run_test(self): for name, value in kwargs.items(): setattr(self, name, value) self._test_main() run_test.__name__ = name if description: run_test.__doc__ = cleandoc(description) if decorators: for decorator in decorators: run_test = decorator(run_test) setattr(cls, name, run_test) def _test_main(self): assert self.question self.window = w = Window(**self._get_window_options()) try: w.push_handlers(self) self.render() w.set_visible() w.dispatch_events() self.user_verify(cleandoc(self.question), self.take_screenshot) finally: w.close() def _get_window_options(self): if self.window_options: options = self.window_options else: options = {} if not 'width' in options: options['width'] = self.window_size[0] if not 'height' in options: options['height'] = self.window_size[1] if not 'visible' in options: options['visible'] = False if not 'resizable' in options: options['resizable'] = True return options
unknown
codeparrot/codeparrot-clean
import unittest from faker import Faker from veritranspay.payment_types import CreditCard, Indomaret, VirtualAccountPermata, VirtualAccountBca, \ VirtualAccountBni, VirtualAccountMandiri, BriEpay, MandiriClickpay, CimbClicks, BCAKlikPay, KlikBCA, GoPay fake = Faker() class CreditCardTests(unittest.TestCase): def test_init(self): ''' Makes sure the expected attributes are required, and persisted as instance attributes. ''' bank = fake.word() token_id = fake.word() bins = fake.sentences() cc = CreditCard(bank=bank, token_id=token_id, bins=bins) self.assertEqual(cc.bank, bank) self.assertEqual(cc.token_id, token_id) self.assertEqual(cc.bins, bins) def test_serialization(self): bank = fake.word() token_id = fake.word() bins = fake.sentences() cc = CreditCard(bank=bank, token_id=token_id, bins=bins) serialized = cc.serialize() expected = { 'payment_type': CreditCard.PAYMENT_TYPE_KEY, CreditCard.PAYMENT_TYPE_KEY: { 'bank': bank, 'token_id': token_id, 'bins': bins } } self.assertEqual(serialized, expected) class CStoreIndomaretTests(unittest.TestCase): def test_init(self): message = fake.word() indomrt = Indomaret(message=message) self.assertEqual(indomrt.store, 'Indomaret') self.assertEqual(indomrt.message, message) def test_serialization(self): message = fake.word() indomrt = Indomaret(message=message) expected = { 'payment_type': 'cstore', 'cstore': { 'store': 'Indomaret', 'message': message } } self.assertDictEqual(expected, indomrt.serialize()) class VirtualAccountPermataTests(unittest.TestCase): def test_init(self): permata = VirtualAccountPermata() self.assertEqual(permata.bank, 'permata') def test_serialization(self): permata = VirtualAccountPermata() expected = { 'payment_type': 'bank_transfer', 'bank_transfer': { 'bank': 'permata' } } self.assertDictEqual(expected, permata.serialize()) class VirtualAccountBcaTests(unittest.TestCase): def test_init(self): bca = VirtualAccountBca() self.assertEqual(bca.bank, 'bca') def test_serialization(self): bca = VirtualAccountBca() expected = { 'payment_type': 'bank_transfer', 'bank_transfer': { 'bank': 'bca' } } self.assertDictEqual(expected, bca.serialize()) class VirtualAccountBniTests(unittest.TestCase): def test_init(self): bni = VirtualAccountBni() self.assertEqual(bni.bank, 'bni') def test_serialization(self): bni = VirtualAccountBni() expected = { 'payment_type': 'bank_transfer', 'bank_transfer': { 'bank': 'bni' } } self.assertDictEqual(expected, bni.serialize()) class VirtualAccountMandiriTests(unittest.TestCase): def test_init(self): mandiri = VirtualAccountMandiri(bill_info1='info1', bill_info2='info2') self.assertEqual(mandiri.bill_info1, 'info1') self.assertEqual(mandiri.bill_info2, 'info2') def test_serialization(self): mandiri = VirtualAccountMandiri(bill_info1='info1', bill_info2='info2') expected = { 'payment_type': 'echannel', 'echannel': { 'bill_info1': "info1", 'bill_info2': "info2", } } self.assertDictEqual(expected, mandiri.serialize()) class BriEpayTests(unittest.TestCase): def test_serialization(self): bri = BriEpay() expected = { 'payment_type': 'bri_epay', 'bri_epay': { } } self.assertDictEqual(expected, bri.serialize()) class BCAKliPayTest(unittest.TestCase): def test_serialization(self): bca_klikpay = BCAKlikPay(type_id=1, description="Pembelian Barang") expected = { 'payment_type': 'bca_klikpay', 'bca_klikpay': { 'type': 1, 'description': 'Pembelian Barang' } } self.assertDictEqual(expected, bca_klikpay.serialize()) class KlikBCATest(unittest.TestCase): def test_serialization(self): klik_bca = KlikBCA(user_id="midtrans1012", description="testing transaction") expected = { 'payment_type': 'bca_klikbca', 'bca_klikbca': { 'description': 'testing transaction', 'user_id': 'midtrans1012' } } self.assertDictEqual(expected, klik_bca.serialize()) class MandiriClickpayTest(unittest.TestCase): def test_serialization(self): data_init = { 'card_number': '4111111111111111', 'input1': '1111111111', 'input2': '145000', 'input3': '54321', 'token': '000000' } mandiri_clickpay = MandiriClickpay(**data_init) expected = { 'payment_type': 'mandiri_clickpay', 'mandiri_clickpay': { 'card_number': '4111111111111111', 'input1': '1111111111', 'input2': '145000', 'input3': '54321', 'token': '000000' } } self.assertDictEqual(expected, mandiri_clickpay.serialize()) class CimbClicksTest(unittest.TestCase): def test_serialization(self): cimb_clicks = CimbClicks(description='Purchase of a special event item') expected = { 'payment_type': 'cimb_clicks', 'cimb_clicks': { 'description': 'Purchase of a special event item' } } self.assertDictEqual(expected, cimb_clicks.serialize()) class GoPayTests(unittest.TestCase): def test_serialization(self): gopay = GoPay() expected = { 'payment_type': 'gopay', 'gopay': { } } self.assertDictEqual(expected, gopay.serialize())
unknown
codeparrot/codeparrot-clean
#include <gtest/gtest.h> #include <cstddef> #include <iterator> #include <unordered_set> #include <c10/core/DispatchKeySet.h> #include <c10/util/irange.h> using namespace c10; // This test exists not to be comprehensive, but to more clearly show // what the semantics of DispatchKeySet are. TEST(DispatchKeySet, ShowSemantics) { // the "CPU" dispatch key is an instance of a per-backend-functionality key. // It corresponds to "dense" functionality, "CPU" backend. // This means that it gets a dense functionality bit, and a cpu backend bit // set. auto dense_cpu_set = DispatchKeySet(DispatchKey::CPU); ASSERT_TRUE(dense_cpu_set.has(DispatchKey::Dense)); ASSERT_TRUE(dense_cpu_set.has_backend(BackendComponent::CPUBit)); ASSERT_TRUE(dense_cpu_set.has(DispatchKey::CPU)); auto dense_lazy_set = DispatchKeySet(DispatchKey::Lazy); ASSERT_TRUE(dense_lazy_set.has(DispatchKey::Dense)); ASSERT_TRUE(dense_lazy_set.has_backend(BackendComponent::LazyBit)); ASSERT_TRUE(dense_lazy_set.has(DispatchKey::Lazy)); // You can think of "Dense/Sparse", and "CPUBit/CUDABit", as "building block" // dispatch keys. You are allowed to directly create keysets out of them! auto dense_cpu_set_from_building_blocks = DispatchKeySet(DispatchKey::Dense) | DispatchKeySet(BackendComponent::CPUBit); ASSERT_TRUE(dense_cpu_set.has(DispatchKey::Dense)); ASSERT_TRUE(dense_cpu_set.has_backend(BackendComponent::CPUBit)); ASSERT_TRUE(dense_cpu_set.has(DispatchKey::CPU)); ASSERT_EQ(dense_cpu_set, dense_cpu_set_from_building_blocks); // Similarly, the AutogradCUDA key gets 2 bits in the keyset: // The "Autograd" functionality bit, and the "CUDA" backend bit auto autograd_cuda = DispatchKeySet(DispatchKey::AutogradCUDA); ASSERT_TRUE(autograd_cuda.has(DispatchKey::AutogradFunctionality)); ASSERT_TRUE(autograd_cuda.has_backend(BackendComponent::CUDABit)); // Because DispatchKeySet uses a condensed internal representation, you cannot // use it to represent the FULL cross product of backends and functionalities // for example: auto autograd_dense_cpu_cuda = DispatchKeySet( {DispatchKey::AutogradFunctionality, DispatchKey::Dense, DispatchKey::CUDA, DispatchKey::CPU}); // this keyset has all of the building block keys: ASSERT_TRUE(autograd_dense_cpu_cuda.has(DispatchKey::AutogradFunctionality)); ASSERT_TRUE(autograd_dense_cpu_cuda.has(DispatchKey::Dense)); ASSERT_TRUE(autograd_dense_cpu_cuda.has_backend(BackendComponent::CUDABit)); ASSERT_TRUE(autograd_dense_cpu_cuda.has_backend(BackendComponent::CPUBit)); // and it also has the "runtime" keys that correspond to the full // cross-product of functionality ASSERT_TRUE(autograd_dense_cpu_cuda.has(DispatchKey::AutogradCPU)); ASSERT_TRUE(autograd_dense_cpu_cuda.has(DispatchKey::AutogradCPU)); ASSERT_TRUE(autograd_dense_cpu_cuda.has(DispatchKey::CPU)); ASSERT_TRUE(autograd_dense_cpu_cuda.has(DispatchKey::CUDA)); // This means that there's no way to represent a keyset with, say, only // Autograd CUDA + Dense CPU. Instead, you should think of a keyset as // inheriting the full set of functionalities + backends of its keys. This // means that the below keysets are all indistinguishable from each other. ASSERT_EQ( autograd_dense_cpu_cuda, DispatchKeySet( {DispatchKey::AutogradCUDA, DispatchKey::AutogradCPU, DispatchKey::CUDA, DispatchKey::CPU})); ASSERT_EQ( autograd_dense_cpu_cuda, DispatchKeySet({DispatchKey::AutogradCUDA, DispatchKey::CPU})); ASSERT_EQ( autograd_dense_cpu_cuda, DispatchKeySet({DispatchKey::CUDA, DispatchKey::AutogradCPU})); // ~~~~~~~~~~ DispatchKeySet iterators ~~~~~~~~~~~ // Iterators allow you to iterate individually through the DispatchKey's in a // DispatchKeySet auto empty_set = DispatchKeySet(); ASSERT_EQ(*empty_set.begin(), *empty_set.end()); // However, only keys that correspond to actual runtime indices of kernels in // the operator table show up when you iterate through a keyset. i.e. // DispatchKey::Dense, and BackendComponent::CPUBit won't show up in an // iterator. auto dense_cpu_iter = dense_cpu_set.begin(); ASSERT_EQ(*dense_cpu_iter++, DispatchKey::CPU); ASSERT_EQ(*dense_cpu_iter, *dense_cpu_set.end()); auto autograd_dense_cpu_cuda_iter = autograd_dense_cpu_cuda.begin(); ASSERT_EQ(*autograd_dense_cpu_cuda_iter++, DispatchKey::CPU); ASSERT_EQ(*autograd_dense_cpu_cuda_iter++, DispatchKey::CUDA); ASSERT_EQ(*autograd_dense_cpu_cuda_iter++, DispatchKey::AutogradCPU); ASSERT_EQ(*autograd_dense_cpu_cuda_iter++, DispatchKey::AutogradCUDA); ASSERT_EQ(*autograd_dense_cpu_cuda_iter, *autograd_dense_cpu_cuda.end()); // But other "functionality bits" that are not defined per-backend DO get // their own slots in the operator table. auto mixed_keyset = DispatchKeySet(BackendComponent::CPUBit) | DispatchKeySet( {DispatchKey::FPGA, // runtime key DispatchKey::Functionalize, // runtime key DispatchKey::Dense}); // NOT a runtime key auto mixed_iter = mixed_keyset.begin(); ASSERT_EQ(*mixed_iter++, DispatchKey::CPU); ASSERT_EQ(*mixed_iter++, DispatchKey::FPGA); ASSERT_EQ(*mixed_iter++, DispatchKey::Functionalize); ASSERT_EQ(*mixed_iter, *mixed_keyset.end()); } TEST(DispatchKeySet, Empty) { DispatchKeySet empty_set; for (uint8_t i = 0; i <= static_cast<uint8_t>(DispatchKey::EndOfRuntimeBackendKeys); i++) { auto tid = static_cast<DispatchKey>(i); if (tid == DispatchKey::Undefined) continue; ASSERT_FALSE(empty_set.has(tid)); } ASSERT_TRUE(empty_set.empty()); DispatchKeySet empty_set2; ASSERT_TRUE(empty_set == empty_set2); } // This covers all keys that correspond to a single backend bit, e.g. // BackendComponent::CPUBit. Even though these are NOT runtime keys, we still // allow adding them directly to a keyset TEST(DispatchKeySet, SingletonBackendComponent) { for (const auto i : c10::irange(1, num_backends)) { auto tid = static_cast<DispatchKey>(i); DispatchKeySet sing(tid); ASSERT_EQ(sing, sing); ASSERT_EQ(sing, DispatchKeySet().add(tid)); ASSERT_EQ(sing, sing.add(tid)); ASSERT_EQ(sing, sing | sing); ASSERT_FALSE(sing.empty()); ASSERT_TRUE(sing.has(tid)); } } // This covers all keys that correspond to a single functionality bit: // - runtime, not-per-backend functionality keys, e.g. // DispatchKey::FuncTorchBatched // - runtime, "fake backend" keys, e.g. DispatchKey::FPGA // - NOT-runtime, per-backend functionality keys, e.g. DispatchKey::Dense // Even though it's not a runtime key, we still allow adding it directly to a // keyset. // DispatchKey:: TEST(DispatchKeySet, SingletonFunctionalityKeys) { for (const auto i : c10::irange(1, num_functionality_keys)) { auto tid = static_cast<DispatchKey>(i); DispatchKeySet sing(tid); ASSERT_EQ(sing, sing); ASSERT_EQ(sing, DispatchKeySet().add(tid)); ASSERT_EQ(sing, sing.add(tid)); ASSERT_EQ(sing, sing | sing); ASSERT_FALSE(sing.empty()); ASSERT_TRUE(sing.has(tid)); ASSERT_EQ(sing.remove(tid), DispatchKeySet()); } } // This covers runtime keys that are per-backend, // and take up more than one bit in a DispatchKeySet. They take up one // functionality bit + one backend bit. e.g. CPU, CUDA, SparseCPU, SparseCUDA, // AutogradCPU, AutogradCUDA TEST(DispatchKeySet, SingletonPerBackendFunctionalityKeys) { for (uint8_t i = static_cast<uint8_t>(DispatchKey::StartOfDenseBackends); i <= static_cast<uint8_t>(DispatchKey::EndOfRuntimeBackendKeys); i++) { auto tid = static_cast<DispatchKey>(i); // Skip these because they aren't real keys. if (tid == DispatchKey::StartOfDenseBackends || tid == DispatchKey::StartOfSparseBackends || tid == DispatchKey::StartOfQuantizedBackends || tid == DispatchKey::StartOfAutogradFunctionalityBackends) { continue; } DispatchKeySet sing(tid); ASSERT_EQ(sing, sing); ASSERT_EQ(sing, DispatchKeySet().add(tid)); ASSERT_EQ(sing, sing.add(tid)); ASSERT_EQ(sing, sing | sing); ASSERT_FALSE(sing.empty()); ASSERT_TRUE(sing.has(tid)); auto functionality_key = toFunctionalityKey(tid); auto backend_key = toBackendComponent(tid); // These two sets should be equivalent: // DispatchKeySet(DispatchKey::CPU) // DispatchKeySet({DispatchKey::Dense, BackendComponent::CPUBit}) auto expected_ks = DispatchKeySet(functionality_key) | DispatchKeySet(backend_key); ASSERT_EQ(sing, expected_ks); // These two sets should be equivalent: // DispatchKeySet(DispatchKey::CPU).remove(DispatchKey::Dense) // DispatchKeySet(BackendComponent::CPUBit) expected_ks = DispatchKeySet(toBackendComponent(tid)); ASSERT_EQ(sing.remove(tid), expected_ks); } } TEST(DispatchKeySet, DoubletonPerBackend) { for (uint8_t i = static_cast<uint8_t>(DispatchKey::StartOfDenseBackends); i <= static_cast<uint8_t>(DispatchKey::EndOfRuntimeBackendKeys); i++) { for (uint8_t j = i + 1; j <= static_cast<uint8_t>(DispatchKey::EndOfRuntimeBackendKeys); j++) { ASSERT_LT(i, j); auto tid1 = static_cast<DispatchKey>(i); auto tid2 = static_cast<DispatchKey>(j); // Skip these because they aren't real keys. if (tid1 == DispatchKey::StartOfDenseBackends || tid1 == DispatchKey::StartOfSparseBackends || tid1 == DispatchKey::StartOfSparseCsrBackends || tid1 == DispatchKey::StartOfQuantizedBackends || tid1 == DispatchKey::StartOfNestedTensorBackends || tid1 == DispatchKey::StartOfAutogradFunctionalityBackends) continue; if (tid2 == DispatchKey::StartOfDenseBackends || tid2 == DispatchKey::StartOfSparseBackends || tid2 == DispatchKey::StartOfSparseCsrBackends || tid2 == DispatchKey::StartOfQuantizedBackends || tid2 == DispatchKey::StartOfNestedTensorBackends || tid2 == DispatchKey::StartOfAutogradFunctionalityBackends) continue; auto backend1 = toBackendComponent(tid1); auto backend2 = toBackendComponent(tid2); auto functionality1 = toFunctionalityKey(tid1); auto functionality2 = toFunctionalityKey(tid2); auto combined = DispatchKeySet({tid1, tid2}); // The combined set has the backend bits ASSERT_TRUE(combined.has_backend(backend1)); ASSERT_TRUE(combined.has_backend(backend2)); // and it has the backend bits ASSERT_TRUE(combined.has(functionality1)); ASSERT_TRUE(combined.has(functionality2)); // and it has the original two runtime keys ASSERT_TRUE(combined.has(tid1)); ASSERT_TRUE(combined.has(tid2)); // Add all of the keys in the keyset to a real set std::unordered_set<DispatchKey> visited_keys; auto iter = combined.begin(); while (*iter != *combined.end()) { visited_keys.insert(*iter); ++iter; } std::unordered_set<DispatchKey> expected_keys; expected_keys.insert( toRuntimePerBackendFunctionalityKey(functionality1, backend1)); expected_keys.insert( toRuntimePerBackendFunctionalityKey(functionality1, backend2)); expected_keys.insert( toRuntimePerBackendFunctionalityKey(functionality2, backend1)); expected_keys.insert( toRuntimePerBackendFunctionalityKey(functionality2, backend2)); ASSERT_EQ(expected_keys, visited_keys); if (backend1 == backend2 || functionality1 == functionality2) { // We have two runtime keys, with either the same backend or the same // per-backend functionalities. E.g. {AutogradCUDA, CUDA} or // {AutogradCPU, AutogradCUDA} There should be 2 total runtime keys in // this set. ASSERT_EQ(2, visited_keys.size()); } else { // since i and j are different keys, they should not have the same // functionality and backend ASSERT_TRUE(backend1 != backend2 && functionality1 != functionality2); // We have two runtime keys, that have different backends + per-backend // functionalities. So we should expect the full cross product of // runtime keys to be in the set. e.g. if i = AutogradCUDA, and j = CPU, // then combined = {AutogradCUDA, AutogradCPU, CUDA, CPU} ASSERT_EQ(4, visited_keys.size()); } } } } TEST(DispatchKeySet, Full) { DispatchKeySet full(DispatchKeySet::FULL); for (const auto i : c10::irange(1, num_functionality_keys)) { auto tid = static_cast<DispatchKey>(i); ASSERT_TRUE(full.has(tid)); } ASSERT_FALSE(full.has(DispatchKey::EndOfFunctionalityKeys)); } TEST(DispatchKeySet, IteratorBasicOps) { DispatchKeySet empty_set; DispatchKeySet full_set(DispatchKeySet::FULL); DispatchKeySet mutated_set = empty_set.add(DispatchKey::CPU); // Constructor + Comparison ASSERT_EQ(*empty_set.begin(), DispatchKey::EndOfFunctionalityKeys); ASSERT_EQ(*empty_set.end(), DispatchKey::EndOfFunctionalityKeys); ASSERT_EQ(*mutated_set.begin(), DispatchKey::CPU); ASSERT_TRUE(empty_set.begin() == empty_set.end()); ASSERT_TRUE(full_set.begin() != full_set.end()); // Increment Ops // NOLINTNEXTLINE(bugprone-inc-dec-in-conditions) ASSERT_TRUE(full_set.begin() == full_set.begin()++); // NOLINTNEXTLINE(bugprone-inc-dec-in-conditions) ASSERT_TRUE(full_set.begin() != ++full_set.begin()); } TEST(DispatchKeySet, getHighestPriorityBackendTypeId) { // AutogradCPU isn't a backend key so it is ignored DispatchKeySet dense_cpu({DispatchKey::AutogradCPU, DispatchKey::CPU}); ASSERT_EQ(DispatchKey::CPU, c10::highestPriorityBackendTypeId(dense_cpu)); // Functionalize isn't a backend key so it is ignored DispatchKeySet sparse_cuda( {DispatchKey::Functionalize, DispatchKey::SparseCUDA}); ASSERT_EQ( DispatchKey::SparseCUDA, c10::highestPriorityBackendTypeId(sparse_cuda)); DispatchKeySet sparse_compressed_cuda( {DispatchKey::Functionalize, DispatchKey::SparseCsrCUDA}); ASSERT_EQ( DispatchKey::SparseCsrCUDA, c10::highestPriorityBackendTypeId(sparse_compressed_cuda)); // quantizedCUDA has higher priority than CUDA DispatchKeySet quantized_cuda( {DispatchKey::CUDA, DispatchKey::QuantizedCUDA}); ASSERT_EQ( DispatchKey::QuantizedCUDA, c10::highestPriorityBackendTypeId(quantized_cuda)); } TEST(DispatchKeySet, IteratorEmpty) { DispatchKeySet empty_set; uint8_t i = 0; for (auto it = empty_set.begin(); it != empty_set.end(); ++it) { i++; } ASSERT_EQ(i, 0); } TEST(DispatchKeySet, IteratorCrossProduct) { // The iterator should return all runtime keys in the set, // including the cross product of {backends} x {functionalities} auto ks = DispatchKeySet({BackendComponent::CPUBit, BackendComponent::CUDABit}) | DispatchKeySet( {DispatchKey::Dense, DispatchKey::FPGA, DispatchKey::AutogradFunctionality}); auto iter = ks.begin(); // iterate through dense backends first. ASSERT_EQ(DispatchKey::CPU, *(iter++)); ASSERT_EQ(DispatchKey::CUDA, *(iter++)); // FPGA doesn't have a backend bit, so it isn't included in the cross product. ASSERT_EQ(DispatchKey::FPGA, *(iter++)); // iterate through the autograd keys laster. ASSERT_EQ(DispatchKey::AutogradCPU, *(iter++)); ASSERT_EQ(DispatchKey::AutogradCUDA, *(iter++)); } TEST(DispatchKeySet, IteratorFull) { DispatchKeySet full_set(DispatchKeySet::FULL); std::ptrdiff_t count = std::distance(full_set.begin(), full_set.end()); // Total # of runtime entries includes an entry for DispatchKey::Undefined, // which is not included when iterating through the DispatchKeySet. ASSERT_EQ(count, std::ptrdiff_t{num_runtime_entries} - 1); } TEST(DispatchKeySet, FailAtEndIterator) { DispatchKeySet full_set(DispatchKeySet::FULL); uint64_t raw_repr = full_set.raw_repr(); // doesn't throw DispatchKeySet::iterator(&raw_repr, num_backends + num_functionality_keys); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) EXPECT_THROW( DispatchKeySet::iterator( &raw_repr, num_backends + num_functionality_keys + 1), c10::Error); } TEST(DispatchKeySet, TestBackendComponentToString) { std::unordered_set<std::string> seen_strings; for (int64_t i = 0; i <= static_cast<int64_t>(BackendComponent::EndOfBackendKeys); i++) { auto k = static_cast<BackendComponent>(i); auto res = std::string(toString(k)); ASSERT_FALSE(res == "UNKNOWN_BACKEND_BIT"); ASSERT_FALSE(seen_strings.count(res) > 0); seen_strings.insert(res); } } TEST(DispatchKeySet, TestEndOfRuntimeBackendKeysAccurate) { DispatchKey k = DispatchKey::Undefined; #define SETTER(fullname, prefix) k = DispatchKey::EndOf##fullname##Backends; C10_FORALL_FUNCTIONALITY_KEYS(SETTER) #undef SETTER ASSERT_TRUE(k == DispatchKey::EndOfRuntimeBackendKeys); } TEST(DispatchKeySet, TestFunctionalityDispatchKeyToString) { std::unordered_set<std::string> seen_strings; for (int i = 0; i <= static_cast<int>(DispatchKey::EndOfAliasKeys); i++) { auto k = static_cast<DispatchKey>(i); // These synthetic keys never actually get used and don't need // to be printed if (k == DispatchKey::EndOfFunctionalityKeys || k == DispatchKey::StartOfDenseBackends || k == DispatchKey::StartOfQuantizedBackends || k == DispatchKey::StartOfSparseBackends || k == DispatchKey::StartOfSparseCsrBackends || k == DispatchKey::StartOfNestedTensorBackends || k == DispatchKey::StartOfAutogradFunctionalityBackends) continue; auto res = std::string(toString(k)); if (i > 0) { ASSERT_TRUE(res.find("Unknown") == std::string::npos) << i << " (before is " << toString(static_cast<DispatchKey>(i - 1)) << ')'; } else { ASSERT_TRUE(res.find("Unknown") == std::string::npos) << i; } ASSERT_TRUE(seen_strings.count(res) == 0); seen_strings.insert(res); } } TEST(DispatchKeySet, TestGetRuntimeDispatchKeySet) { // Check if getRuntimeDispatchKeySet and runtimeDispatchKeySetHas agree. for (auto dk1 : DispatchKeySet(DispatchKeySet::FULL)) { auto dks = getRuntimeDispatchKeySet(dk1); for (auto dk2 : DispatchKeySet(DispatchKeySet::FULL)) { ASSERT_EQ(dks.has(dk2), runtimeDispatchKeySetHas(dk1, dk2)); } } }
cpp
github
https://github.com/pytorch/pytorch
c10/test/core/DispatchKeySet_test.cpp
/* * Copyright 2002-present the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.context.annotation /** * Extension for [AnnotationConfigApplicationContext] allowing * `AnnotationConfigApplicationContext { ... }` style initialization. * * @author Sebastien Deleuze * @since 5.0 */ @Deprecated("Use regular apply method instead.", replaceWith = ReplaceWith("AnnotationConfigApplicationContext().apply(configure)")) fun AnnotationConfigApplicationContext(configure: AnnotationConfigApplicationContext.() -> Unit) = AnnotationConfigApplicationContext().apply(configure)
kotlin
github
https://github.com/spring-projects/spring-framework
spring-context/src/main/kotlin/org/springframework/context/annotation/AnnotationConfigApplicationContextExtensions.kt
import pytest from api.base.settings.defaults import API_BASE from osf_tests.factories import ( AuthUserFactory, PreprintProviderFactory, ) from osf.utils import permissions @pytest.mark.enable_quickfiles_creation class ProviderModeratorDetailTestClass: @pytest.fixture() def admin(self, provider): user = AuthUserFactory() provider.get_group(permissions.ADMIN).user_set.add(user) return user @pytest.fixture() def moderator(self, provider): user = AuthUserFactory() provider.get_group('moderator').user_set.add(user) return user @pytest.fixture() def nonmoderator(self): return AuthUserFactory() def update_payload(self, user_id, permission_group, full_name=None): data = { 'data': { 'attributes': { 'permission_group': permission_group, }, 'type': 'moderators', 'id': user_id } } if full_name: data['data']['attributes']['full_name'] = full_name return data def test_detail_not_authorized(self, app, url, nonmoderator, moderator, admin, provider): # Must be logged in res = app.get(url.format(admin._id), expect_errors=True) assert res.status_code == 401 # Must be mod to get res = app.get(url.format(admin._id), auth=nonmoderator.auth, expect_errors=True) assert res.status_code == 403 # Must be admin to edit res = app.patch_json_api(url.format(moderator._id), self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN), auth=nonmoderator.auth, expect_errors=True) assert res.status_code == 403 # Must be logged in res = app.patch_json_api(url.format(moderator._id), self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN), expect_errors=True) assert res.status_code == 401 # Must be admin to edit res = app.patch_json_api(url.format(moderator._id), self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN), auth=moderator.auth, expect_errors=True) assert res.status_code == 403 def test_detail_successful_gets(self, app, url, moderator, admin, provider): res = app.get(url.format(moderator._id), auth=moderator.auth) assert res.status_code == 200 assert res.json['data']['id'] == moderator._id assert res.json['data']['attributes']['permission_group'] == 'moderator' res = app.get(url.format(admin._id), auth=moderator.auth) assert res.status_code == 200 assert res.json['data']['id'] == admin._id assert res.json['data']['attributes']['permission_group'] == permissions.ADMIN res = app.get(url.format(moderator._id), auth=admin.auth) assert res.status_code == 200 assert res.json['data']['id'] == moderator._id assert res.json['data']['attributes']['permission_group'] == 'moderator' res = app.get(url.format(admin._id), auth=admin.auth) assert res.status_code == 200 assert res.json['data']['id'] == admin._id assert res.json['data']['attributes']['permission_group'] == permissions.ADMIN def test_detail_updates(self, app, url, nonmoderator, moderator, admin, provider): # Admin makes moderator a new admin res = app.patch_json_api(url.format(moderator._id), self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN), auth=admin.auth) assert res.status_code == 200 assert res.json['data']['attributes']['permission_group'] == permissions.ADMIN # Admin makes new admin a moderator again res = app.patch_json_api(url.format(moderator._id), self.update_payload(user_id=moderator._id, permission_group='moderator'), auth=admin.auth) assert res.status_code == 200 assert res.json['data']['attributes']['permission_group'] == 'moderator' # Admin makes mod a mod -- No changes res = app.patch_json_api(url.format(moderator._id), self.update_payload(user_id=moderator._id, permission_group='moderator'), auth=admin.auth) assert res.status_code == 200 assert res.json['data']['attributes']['permission_group'] == 'moderator' # Mod has no perm, even though request would make no changes res = app.patch_json_api(url.format(moderator._id), self.update_payload(user_id=moderator._id, permission_group='moderator'), auth=moderator.auth, expect_errors=True) assert res.status_code == 403 # Admin can't patch non-mod res = app.patch_json_api(url.format(nonmoderator._id), self.update_payload(user_id=nonmoderator._id, permission_group='moderator'), auth=admin.auth, expect_errors=True) assert res.status_code == 404 def test_detail_cannot_remove_last_admin(self, app, url, admin, provider): res = app.patch_json_api(url.format(admin._id), self.update_payload(user_id=admin._id, permission_group='moderator'), auth=admin.auth, expect_errors=True) assert res.status_code == 400 assert 'last admin' in res.json['errors'][0]['detail'] res = app.delete_json_api(url.format(admin._id), auth=admin.auth, expect_errors=True) assert res.status_code == 400 assert 'last admin' in res.json['errors'][0]['detail'] def test_moderator_deletes(self, app, url, moderator, admin, provider): res = app.delete_json_api(url.format(admin._id), auth=moderator.auth, expect_errors=True) assert res.status_code == 403 res = app.delete_json_api(url.format(moderator._id), auth=moderator.auth) assert res.status_code in [200, 204] if res.status_code == 200: assert 'meta' in res.json else: assert not res.body def test_admin_delete_moderator(self, app, url, moderator, admin, provider): res = app.delete_json_api(url.format(moderator._id), auth=admin.auth) assert res.status_code in [200, 204] if res.status_code == 200: assert 'meta' in res.json else: assert not res.body def test_admin_delete_admin(self, app, url, moderator, admin, provider): # Make mod an admin res = app.patch_json_api(url.format(moderator._id), self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN), auth=admin.auth) assert res.json['data']['attributes']['permission_group'] == permissions.ADMIN # Sanity check # Admin delete admin res = app.delete_json_api(url.format(moderator._id), auth=admin.auth) assert res.status_code in [200, 204] if res.status_code == 200: assert 'meta' in res.json else: assert not res.body @pytest.mark.django_db class TestPreprintProviderModeratorDetail(ProviderModeratorDetailTestClass): @pytest.fixture() def provider(self): pp = PreprintProviderFactory(name='ModArxiv') pp.update_group_permissions() return pp @pytest.fixture(params=['/{}preprint_providers/{}/moderators/{{}}/', '/{}providers/preprints/{}/moderators/{{}}/']) def url(self, provider, request): url = request.param return url.format(API_BASE, provider._id)
unknown
codeparrot/codeparrot-clean
# Copyright 2012,2014 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Class for PXE bare-metal nodes. """ import datetime import os import jinja2 from oslo.config import cfg from oslo.db import exception as db_exc from nova.compute import flavors from nova import exception from nova.i18n import _ from nova import objects from nova.openstack.common import fileutils from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import timeutils from nova import utils from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import base from nova.virt.baremetal import db from nova.virt.baremetal import utils as bm_utils pxe_opts = [ cfg.StrOpt('deploy_kernel', help='Default kernel image ID used in deployment phase'), cfg.StrOpt('deploy_ramdisk', help='Default ramdisk image ID used in deployment phase'), cfg.StrOpt('net_config_template', default='$pybasedir/nova/virt/baremetal/' 'net-dhcp.ubuntu.template', help='Template file for injected network config'), cfg.StrOpt('pxe_append_params', default='nofb nomodeset vga=normal', help='Additional append parameters for baremetal PXE boot'), cfg.StrOpt('pxe_config_template', default='$pybasedir/nova/virt/baremetal/pxe_config.template', help='Template file for PXE configuration'), cfg.BoolOpt('use_file_injection', help='If True, enable file injection for network info, ' 'files and admin password', default=False), cfg.IntOpt('pxe_deploy_timeout', help='Timeout for PXE deployments. Default: 0 (unlimited)', default=0), cfg.BoolOpt('pxe_network_config', help='If set, pass the network configuration details to the ' 'initramfs via cmdline.', default=False), cfg.StrOpt('pxe_bootfile_name', help='This gets passed to Neutron as the bootfile dhcp ' 'parameter.', default='pxelinux.0'), ] LOG = logging.getLogger(__name__) baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(pxe_opts, baremetal_group) CONF.import_opt('use_ipv6', 'nova.netconf') def build_pxe_network_config(network_info): interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6) template = None if not CONF.use_ipv6: template = "ip=%(address)s::%(gateway)s:%(netmask)s::%(name)s:off" else: template = ("ip=[%(address_v6)s]::[%(gateway_v6)s]:" "[%(netmask_v6)s]::%(name)s:off") net_config = [template % iface for iface in interfaces] return ' '.join(net_config) def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn, deployment_aki_path, deployment_ari_path, aki_path, ari_path, network_info): """Build the PXE config file for a node This method builds the PXE boot configuration file for a node, given all the required parameters. The resulting file has both a "deploy" and "boot" label, which correspond to the two phases of booting. This may be extended later. """ LOG.debug("Building PXE config for deployment %s.", deployment_id) network_config = None if network_info and CONF.baremetal.pxe_network_config: network_config = build_pxe_network_config(network_info) pxe_options = { 'deployment_id': deployment_id, 'deployment_key': deployment_key, 'deployment_iscsi_iqn': deployment_iscsi_iqn, 'deployment_aki_path': deployment_aki_path, 'deployment_ari_path': deployment_ari_path, 'aki_path': aki_path, 'ari_path': ari_path, 'pxe_append_params': CONF.baremetal.pxe_append_params, 'pxe_network_config': network_config, } tmpl_path, tmpl_file = os.path.split(CONF.baremetal.pxe_config_template) env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path)) template = env.get_template(tmpl_file) return template.render({'pxe_options': pxe_options, 'ROOT': '${ROOT}'}) def build_network_config(network_info): interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6) tmpl_path, tmpl_file = os.path.split(CONF.baremetal.net_config_template) env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path)) template = env.get_template(tmpl_file) return template.render({'interfaces': interfaces, 'use_ipv6': CONF.use_ipv6}) def get_deploy_aki_id(flavor): return flavor.get('extra_specs', {}).\ get('baremetal:deploy_kernel_id', CONF.baremetal.deploy_kernel) def get_deploy_ari_id(flavor): return flavor.get('extra_specs', {}).\ get('baremetal:deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk) def get_image_dir_path(instance): """Generate the dir for an instances disk.""" return os.path.join(CONF.instances_path, instance['name']) def get_image_file_path(instance): """Generate the full path for an instances disk.""" return os.path.join(CONF.instances_path, instance['name'], 'disk') def get_pxe_config_file_path(instance): """Generate the path for an instances PXE config file.""" return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config') def get_partition_sizes(instance): flavor = flavors.extract_flavor(instance) root_mb = flavor['root_gb'] * 1024 swap_mb = flavor['swap'] ephemeral_mb = flavor['ephemeral_gb'] * 1024 # NOTE(deva): For simpler code paths on the deployment side, # we always create a swap partition. If the flavor # does not specify any swap, we default to 1MB if swap_mb < 1: swap_mb = 1 return (root_mb, swap_mb, ephemeral_mb) def get_pxe_mac_path(mac): """Convert a MAC address into a PXE config file name.""" return os.path.join( CONF.baremetal.tftp_root, 'pxelinux.cfg', "01-" + mac.replace(":", "-").lower() ) def get_tftp_image_info(instance, flavor): """Generate the paths for tftp files for this instance Raises NovaException if - instance does not contain kernel_id or ramdisk_id - deploy_kernel_id or deploy_ramdisk_id can not be read from flavor['extra_specs'] and defaults are not set """ image_info = { 'kernel': [None, None], 'ramdisk': [None, None], 'deploy_kernel': [None, None], 'deploy_ramdisk': [None, None], } try: image_info['kernel'][0] = str(instance['kernel_id']) image_info['ramdisk'][0] = str(instance['ramdisk_id']) image_info['deploy_kernel'][0] = get_deploy_aki_id(flavor) image_info['deploy_ramdisk'][0] = get_deploy_ari_id(flavor) except KeyError: pass missing_labels = [] for label in image_info.keys(): (uuid, path) = image_info[label] if not uuid: missing_labels.append(label) else: image_info[label][1] = os.path.join(CONF.baremetal.tftp_root, instance['uuid'], label) if missing_labels: raise exception.NovaException(_( "Can not activate PXE bootloader. The following boot parameters " "were not passed to baremetal driver: %s") % missing_labels) return image_info class PXE(base.NodeDriver): """PXE bare metal driver.""" def __init__(self, virtapi): super(PXE, self).__init__(virtapi) def _collect_mac_addresses(self, context, node): macs = set() for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']): if nic['address']: macs.add(nic['address']) return sorted(macs) def _cache_tftp_images(self, context, instance, image_info): """Fetch the necessary kernels and ramdisks for the instance.""" fileutils.ensure_tree( os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) LOG.debug("Fetching kernel and ramdisk for instance %s", instance['name']) for label in image_info.keys(): (uuid, path) = image_info[label] bm_utils.cache_image( context=context, target=path, image_id=uuid, user_id=instance['user_id'], project_id=instance['project_id'], ) def _cache_image(self, context, instance, image_meta): """Fetch the instance's image from Glance This method pulls the relevant AMI and associated kernel and ramdisk, and the deploy kernel and ramdisk from Glance, and writes them to the appropriate places on local disk. Both sets of kernel and ramdisk are needed for PXE booting, so these are stored under CONF.baremetal.tftp_root. At present, the AMI is cached and certain files are injected. Debian/ubuntu-specific assumptions are made regarding the injected files. In a future revision, this functionality will be replaced by a more scalable and os-agnostic approach: the deployment ramdisk will fetch from Glance directly, and write its own last-mile configuration. """ fileutils.ensure_tree(get_image_dir_path(instance)) image_path = get_image_file_path(instance) LOG.debug("Fetching image %(ami)s for instance %(name)s", {'ami': image_meta['id'], 'name': instance['name']}) bm_utils.cache_image(context=context, target=image_path, image_id=image_meta['id'], user_id=instance['user_id'], project_id=instance['project_id'], clean=True, ) return [image_meta['id'], image_path] def _inject_into_image(self, context, node, instance, network_info, injected_files=None, admin_password=None): """Inject last-mile configuration into instances image Much of this method is a hack around DHCP and cloud-init not working together with baremetal provisioning yet. """ # NOTE(deva): We assume that if we're not using a kernel, # then the target partition is the first partition partition = None if not instance['kernel_id']: partition = "1" ssh_key = None if 'key_data' in instance and instance['key_data']: ssh_key = str(instance['key_data']) if injected_files is None: injected_files = [] else: # NOTE(deva): copy so we don't modify the original injected_files = list(injected_files) net_config = build_network_config(network_info) if instance['hostname']: injected_files.append(('/etc/hostname', instance['hostname'])) LOG.debug("Injecting files into image for instance %(name)s", {'name': instance['name']}) bm_utils.inject_into_image( image=get_image_file_path(instance), key=ssh_key, net=net_config, metadata=utils.instance_meta(instance), admin_password=admin_password, files=injected_files, partition=partition, ) def cache_images(self, context, node, instance, admin_password, image_meta, injected_files, network_info): """Prepare all the images for this instance.""" flavor = objects.Flavor.get_by_id(context, instance['instance_type_id']) tftp_image_info = get_tftp_image_info(instance, flavor) self._cache_tftp_images(context, instance, tftp_image_info) self._cache_image(context, instance, image_meta) if CONF.baremetal.use_file_injection: self._inject_into_image(context, node, instance, network_info, injected_files, admin_password) def destroy_images(self, context, node, instance): """Delete instance's image file.""" bm_utils.unlink_without_raise(get_image_file_path(instance)) bm_utils.rmtree_without_raise(get_image_dir_path(instance)) def dhcp_options_for_instance(self, instance): return [{'opt_name': 'bootfile-name', 'opt_value': CONF.baremetal.pxe_bootfile_name}, {'opt_name': 'server-ip-address', 'opt_value': CONF.my_ip}, {'opt_name': 'tftp-server', 'opt_value': CONF.my_ip} ] def activate_bootloader(self, context, node, instance, network_info): """Configure PXE boot loader for an instance Kernel and ramdisk images are downloaded by cache_tftp_images, and stored in /tftpboot/{uuid}/ This method writes the instances config file, and then creates symlinks for each MAC address in the instance. By default, the complete layout looks like this: /tftpboot/ ./{uuid}/ kernel ramdisk deploy_kernel deploy_ramdisk config ./pxelinux.cfg/ {mac} -> ../{uuid}/config """ flavor = objects.Flavor.get_by_id(context, instance['instance_type_id']) image_info = get_tftp_image_info(instance, flavor) (root_mb, swap_mb, ephemeral_mb) = get_partition_sizes(instance) pxe_config_file_path = get_pxe_config_file_path(instance) image_file_path = get_image_file_path(instance) deployment_key = bm_utils.random_alnum(32) deployment_iscsi_iqn = "iqn-%s" % instance['uuid'] db.bm_node_update(context, node['id'], {'deploy_key': deployment_key, 'image_path': image_file_path, 'pxe_config_path': pxe_config_file_path, 'root_mb': root_mb, 'swap_mb': swap_mb, 'ephemeral_mb': ephemeral_mb}) pxe_config = build_pxe_config( node['id'], deployment_key, deployment_iscsi_iqn, image_info['deploy_kernel'][1], image_info['deploy_ramdisk'][1], image_info['kernel'][1], image_info['ramdisk'][1], network_info, ) bm_utils.write_to_file(pxe_config_file_path, pxe_config) macs = self._collect_mac_addresses(context, node) for mac in macs: mac_path = get_pxe_mac_path(mac) bm_utils.unlink_without_raise(mac_path) bm_utils.create_link_without_raise(pxe_config_file_path, mac_path) def deactivate_bootloader(self, context, node, instance): """Delete PXE bootloader images and config.""" try: db.bm_node_update(context, node['id'], {'deploy_key': None, 'image_path': None, 'pxe_config_path': None, 'root_mb': 0, 'swap_mb': 0}) except exception.NodeNotFound: pass # NOTE(danms): the flavor extra_specs do not need to be # present/correct at deactivate time, so pass something empty # to avoid an extra lookup flavor = dict(extra_specs={ 'baremetal:deploy_ramdisk_id': 'ignore', 'baremetal:deploy_kernel_id': 'ignore'}) try: image_info = get_tftp_image_info(instance, flavor) except exception.NovaException: pass else: for label in image_info.keys(): (uuid, path) = image_info[label] bm_utils.unlink_without_raise(path) bm_utils.unlink_without_raise(get_pxe_config_file_path(instance)) try: macs = self._collect_mac_addresses(context, node) except db_exc.DBError: pass else: for mac in macs: bm_utils.unlink_without_raise(get_pxe_mac_path(mac)) bm_utils.rmtree_without_raise( os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) def activate_node(self, context, node, instance): """Wait for PXE deployment to complete.""" locals = {'error': '', 'started': False} def _wait_for_deploy(): """Called at an interval until the deployment completes.""" try: row = db.bm_node_get(context, node['id']) if instance['uuid'] != row.get('instance_uuid'): locals['error'] = _("Node associated with another instance" " while waiting for deploy of %s") raise loopingcall.LoopingCallDone() status = row.get('task_state') if (status == baremetal_states.DEPLOYING and locals['started'] is False): LOG.info(_("PXE deploy started for instance %s") % instance['uuid']) locals['started'] = True elif status in (baremetal_states.DEPLOYDONE, baremetal_states.ACTIVE): LOG.info(_("PXE deploy completed for instance %s") % instance['uuid']) raise loopingcall.LoopingCallDone() elif status == baremetal_states.DEPLOYFAIL: locals['error'] = _("PXE deploy failed for instance %s") except exception.NodeNotFound: locals['error'] = _("Baremetal node deleted while waiting " "for deployment of instance %s") if (CONF.baremetal.pxe_deploy_timeout and timeutils.utcnow() > expiration): locals['error'] = _("Timeout reached while waiting for " "PXE deploy of instance %s") if locals['error']: raise loopingcall.LoopingCallDone() expiration = timeutils.utcnow() + datetime.timedelta( seconds=CONF.baremetal.pxe_deploy_timeout) timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy) timer.start(interval=1).wait() if locals['error']: raise exception.InstanceDeployFailure( locals['error'] % instance['uuid']) def deactivate_node(self, context, node, instance): pass
unknown
codeparrot/codeparrot-clean
from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models from django.db.models.deletion import ProtectedError __all__ = ('Link', 'Place', 'Restaurant', 'Person', 'Address', 'CharLink', 'TextLink', 'OddRelation1', 'OddRelation2', 'Contact', 'Organization', 'Note', 'Company') class Link(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() def __str__(self): return "Link to %s id=%s" % (self.content_type, self.object_id) class Place(models.Model): name = models.CharField(max_length=100) links = GenericRelation(Link) def __str__(self): return "Place: %s" % self.name class Restaurant(Place): def __str__(self): return "Restaurant: %s" % self.name class Address(models.Model): street = models.CharField(max_length=80) city = models.CharField(max_length=50) state = models.CharField(max_length=2) zipcode = models.CharField(max_length=5) content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() def __str__(self): return '%s %s, %s %s' % (self.street, self.city, self.state, self.zipcode) class Person(models.Model): account = models.IntegerField(primary_key=True) name = models.CharField(max_length=128) addresses = GenericRelation(Address) def __str__(self): return self.name class CharLink(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.CharField(max_length=100) content_object = GenericForeignKey() class TextLink(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.TextField() content_object = GenericForeignKey() class OddRelation1(models.Model): name = models.CharField(max_length=100) clinks = GenericRelation(CharLink) class OddRelation2(models.Model): name = models.CharField(max_length=100) tlinks = GenericRelation(TextLink) # models for test_q_object_or: class Note(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey() note = models.TextField() class Contact(models.Model): notes = GenericRelation(Note) class Organization(models.Model): name = models.CharField(max_length=255) contacts = models.ManyToManyField(Contact, related_name='organizations') class Company(models.Model): name = models.CharField(max_length=100) links = GenericRelation(Link) def __str__(self): return "Company: %s" % self.name # For testing #13085 fix, we also use Note model defined above class Developer(models.Model): name = models.CharField(max_length=15) class Team(models.Model): name = models.CharField(max_length=15) members = models.ManyToManyField(Developer) def __str__(self): return "%s team" % self.name def __len__(self): return self.members.count() class Guild(models.Model): name = models.CharField(max_length=15) members = models.ManyToManyField(Developer) def __bool__(self): return False class Tag(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='g_r_r_tags') object_id = models.CharField(max_length=15) content_object = GenericForeignKey() label = models.CharField(max_length=15) class Board(models.Model): name = models.CharField(primary_key=True, max_length=15) class SpecialGenericRelation(GenericRelation): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.editable = True self.save_form_data_calls = 0 def save_form_data(self, *args, **kwargs): self.save_form_data_calls += 1 class HasLinks(models.Model): links = SpecialGenericRelation(Link) class Meta: abstract = True class HasLinkThing(HasLinks): pass class A(models.Model): flag = models.NullBooleanField() content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey('content_type', 'object_id') class B(models.Model): a = GenericRelation(A) class Meta: ordering = ('id',) class C(models.Model): b = models.ForeignKey(B, models.CASCADE) class Meta: ordering = ('id',) class D(models.Model): b = models.ForeignKey(B, models.SET_NULL, null=True) class Meta: ordering = ('id',) # Ticket #22998 class Node(models.Model): content_type = models.ForeignKey(ContentType, models.CASCADE) object_id = models.PositiveIntegerField() content = GenericForeignKey('content_type', 'object_id') class Content(models.Model): nodes = GenericRelation(Node) related_obj = models.ForeignKey('Related', models.CASCADE) class Related(models.Model): pass def prevent_deletes(sender, instance, **kwargs): raise ProtectedError("Not allowed to delete.", [instance]) models.signals.pre_delete.connect(prevent_deletes, sender=Node)
unknown
codeparrot/codeparrot-clean
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ a simple multilayer perceptron """ import mxnet as mx def get_symbol(num_classes=10, **kwargs): data = mx.symbol.Variable('data') data = mx.sym.Flatten(data=data) fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128) act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu") fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64) act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu") fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=num_classes) mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax') return mlp
unknown
codeparrot/codeparrot-clean
""" Support for GPS tracking MQTT enabled devices. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.mqtt_json/ """ import asyncio import json import logging import voluptuous as vol import homeassistant.components.mqtt as mqtt from homeassistant.core import callback from homeassistant.components.mqtt import CONF_QOS from homeassistant.components.device_tracker import PLATFORM_SCHEMA import homeassistant.helpers.config_validation as cv from homeassistant.const import ( CONF_DEVICES, ATTR_GPS_ACCURACY, ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_BATTERY_LEVEL) DEPENDENCIES = ['mqtt'] _LOGGER = logging.getLogger(__name__) GPS_JSON_PAYLOAD_SCHEMA = vol.Schema({ vol.Required(ATTR_LATITUDE): vol.Coerce(float), vol.Required(ATTR_LONGITUDE): vol.Coerce(float), vol.Optional(ATTR_GPS_ACCURACY, default=None): vol.Coerce(int), vol.Optional(ATTR_BATTERY_LEVEL, default=None): vol.Coerce(str), }, extra=vol.ALLOW_EXTRA) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(mqtt.SCHEMA_BASE).extend({ vol.Required(CONF_DEVICES): {cv.string: mqtt.valid_subscribe_topic}, }) @asyncio.coroutine def async_setup_scanner(hass, config, async_see, discovery_info=None): """Set up the MQTT JSON tracker.""" devices = config[CONF_DEVICES] qos = config[CONF_QOS] dev_id_lookup = {} @callback def async_tracker_message_received(topic, payload, qos): """Handle received MQTT message.""" dev_id = dev_id_lookup[topic] try: data = GPS_JSON_PAYLOAD_SCHEMA(json.loads(payload)) except vol.MultipleInvalid: _LOGGER.error("Skipping update for following data " "because of missing or malformatted data: %s", payload) return except ValueError: _LOGGER.error("Error parsing JSON payload: %s", payload) return kwargs = _parse_see_args(dev_id, data) hass.async_add_job( async_see(**kwargs)) for dev_id, topic in devices.items(): dev_id_lookup[topic] = dev_id yield from mqtt.async_subscribe( hass, topic, async_tracker_message_received, qos) return True def _parse_see_args(dev_id, data): """Parse the payload location parameters, into the format see expects.""" kwargs = { 'gps': (data[ATTR_LATITUDE], data[ATTR_LONGITUDE]), 'dev_id': dev_id } if ATTR_GPS_ACCURACY in data: kwargs[ATTR_GPS_ACCURACY] = data[ATTR_GPS_ACCURACY] if ATTR_BATTERY_LEVEL in data: kwargs['battery'] = data[ATTR_BATTERY_LEVEL] return kwargs
unknown
codeparrot/codeparrot-clean
{ "compilerOptions": { "target": "es5", "lib": ["dom", "dom.iterable", "esnext"], "allowJs": true, "skipLibCheck": true, "strict": false, "forceConsistentCasingInFileNames": true, "noEmit": true, "esModuleInterop": true, "module": "esnext", "moduleResolution": "node", "resolveJsonModule": true, "isolatedModules": true, "jsx": "react-jsx", "paths": { "@/*": ["./*"] } }, "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], "exclude": ["node_modules"] }
json
github
https://github.com/vercel/next.js
examples/with-typescript/tsconfig.json
/* * Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors. * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ package org.jetbrains.kotlin.analysis.api.fir.test.cases.generated.cases.references; import com.intellij.testFramework.TestDataPath; import org.jetbrains.kotlin.test.util.KtTestUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.kotlin.analysis.api.fir.test.configurators.AnalysisApiFirTestConfiguratorFactory; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode; import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode; import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.references.AbstractReferenceImportAliasTest; import org.jetbrains.kotlin.test.TestMetadata; import org.junit.jupiter.api.Test; import java.io.File; import java.util.regex.Pattern; /** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */ @SuppressWarnings("all") @TestMetadata("analysis/analysis-api/testData/imports/importAliases") @TestDataPath("$PROJECT_ROOT") public class FirIdeNormalAnalysisSourceModuleReferenceImportAliasTestGenerated extends AbstractReferenceImportAliasTest { @NotNull @Override public AnalysisApiTestConfigurator getConfigurator() { return AnalysisApiFirTestConfiguratorFactory.INSTANCE.createConfigurator( new AnalysisApiTestConfiguratorFactoryData( FrontendKind.Fir, TestModuleKind.Source, AnalysisSessionMode.Normal, AnalysisApiMode.Ide ) ); } @Test public void testAllFilesPresentInImportAliases() { KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/imports/importAliases"), Pattern.compile("^(.+)\\.kt$"), null, true); } @Test @TestMetadata("classAlias.kt") public void testClassAlias() { runTest("analysis/analysis-api/testData/imports/importAliases/classAlias.kt"); } @Test @TestMetadata("classAliasWithCompanionObject.kt") public void testClassAliasWithCompanionObject() { runTest("analysis/analysis-api/testData/imports/importAliases/classAliasWithCompanionObject.kt"); } @Test @TestMetadata("constructorAlias.kt") public void testConstructorAlias() { runTest("analysis/analysis-api/testData/imports/importAliases/constructorAlias.kt"); } @Test @TestMetadata("functionAlias.kt") public void testFunctionAlias() { runTest("analysis/analysis-api/testData/imports/importAliases/functionAlias.kt"); } @Test @TestMetadata("kdocReferenceAlias.kt") public void testKdocReferenceAlias() { runTest("analysis/analysis-api/testData/imports/importAliases/kdocReferenceAlias.kt"); } @Test @TestMetadata("propertyAlias.kt") public void testPropertyAlias() { runTest("analysis/analysis-api/testData/imports/importAliases/propertyAlias.kt"); } }
java
github
https://github.com/JetBrains/kotlin
analysis/analysis-api-fir/tests-gen/org/jetbrains/kotlin/analysis/api/fir/test/cases/generated/cases/references/FirIdeNormalAnalysisSourceModuleReferenceImportAliasTestGenerated.java
/* * Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license. */ import io.ktor.client.engine.mock.* import io.ktor.client.plugins.resources.* import io.ktor.client.plugins.resources.Resources import io.ktor.client.request.* import io.ktor.client.statement.* import io.ktor.client.test.base.* import io.ktor.http.* import io.ktor.resources.* import kotlin.test.Test import kotlin.test.assertEquals class ResourcesTest { @Resource("path/{id}/{method}") class Path(val id: Long, val method: String) { @Resource("child/{path?}") data class Child(val parent: Path, val path: String, val query: List<Int>) } @Test fun testBuilders() = testWithEngine(MockEngine) { config { engine { addHandler { request -> val uri = request.url.fullPath val method = request.method.value assertEquals(method, uri.split('/')[3]) assertEquals("/path/123/$method/child/value?query=1&query=2&query=3&query=4", uri) respondOk(uri) } } install(Resources) } test { client -> val response1 = client.get(Path.Child(Path(123, "GET"), "value", listOf(1, 2, 3, 4))).bodyAsText() val response2 = client.post(Path.Child(Path(123, "POST"), "value", listOf(1, 2, 3, 4))).bodyAsText() val response3 = client.put(Path.Child(Path(123, "PUT"), "value", listOf(1, 2, 3, 4))).bodyAsText() val response4 = client.delete(Path.Child(Path(123, "DELETE"), "value", listOf(1, 2, 3, 4))).bodyAsText() val response5 = client.options(Path.Child(Path(123, "OPTIONS"), "value", listOf(1, 2, 3, 4))).bodyAsText() val response6 = client.head(Path.Child(Path(123, "HEAD"), "value", listOf(1, 2, 3, 4))).bodyAsText() val response7 = client.patch(Path.Child(Path(123, "PATCH"), "value", listOf(1, 2, 3, 4))).bodyAsText() val response8 = client.request(Path.Child(Path(123, "METHOD"), "value", listOf(1, 2, 3, 4))) { method = HttpMethod("METHOD") }.bodyAsText() assertEquals("/path/123/GET/child/value?query=1&query=2&query=3&query=4", response1) assertEquals("/path/123/POST/child/value?query=1&query=2&query=3&query=4", response2) assertEquals("/path/123/PUT/child/value?query=1&query=2&query=3&query=4", response3) assertEquals("/path/123/DELETE/child/value?query=1&query=2&query=3&query=4", response4) assertEquals("/path/123/OPTIONS/child/value?query=1&query=2&query=3&query=4", response5) assertEquals("/path/123/HEAD/child/value?query=1&query=2&query=3&query=4", response6) assertEquals("/path/123/PATCH/child/value?query=1&query=2&query=3&query=4", response7) assertEquals("/path/123/METHOD/child/value?query=1&query=2&query=3&query=4", response8) client.prepareGet(Path.Child(Path(123, "GET"), "value", listOf(1, 2, 3, 4))).body { body: String -> assertEquals("/path/123/GET/child/value?query=1&query=2&query=3&query=4", body) } client.preparePost(Path.Child(Path(123, "POST"), "value", listOf(1, 2, 3, 4))).body { body: String -> assertEquals("/path/123/POST/child/value?query=1&query=2&query=3&query=4", body) } client.preparePut(Path.Child(Path(123, "PUT"), "value", listOf(1, 2, 3, 4))).body { body: String -> assertEquals("/path/123/PUT/child/value?query=1&query=2&query=3&query=4", body) } client.prepareDelete(Path.Child(Path(123, "DELETE"), "value", listOf(1, 2, 3, 4))).body { body: String -> assertEquals("/path/123/DELETE/child/value?query=1&query=2&query=3&query=4", body) } client.prepareOptions(Path.Child(Path(123, "OPTIONS"), "value", listOf(1, 2, 3, 4))).body { body: String -> assertEquals("/path/123/OPTIONS/child/value?query=1&query=2&query=3&query=4", body) } client.prepareHead(Path.Child(Path(123, "HEAD"), "value", listOf(1, 2, 3, 4))).body { body: String -> assertEquals("/path/123/HEAD/child/value?query=1&query=2&query=3&query=4", body) } client.preparePatch(Path.Child(Path(123, "PATCH"), "value", listOf(1, 2, 3, 4))).body { body: String -> assertEquals("/path/123/PATCH/child/value?query=1&query=2&query=3&query=4", body) } client.prepareRequest(Path.Child(Path(123, "METHOD"), "value", listOf(1, 2, 3, 4))) { method = HttpMethod("METHOD") }.body { body: String -> assertEquals("/path/123/METHOD/child/value?query=1&query=2&query=3&query=4", body) } } } @Test fun testBuildersWithUrl() = testWithEngine(MockEngine) { config { engine { addHandler { request -> val uri = request.url.fullPath val method = request.method.value assertEquals(method, uri.split('/')[3]) assertEquals("/path/123/$method/child/value?query=1&query=2&query=3&query=4", uri) respondOk(uri) } } install(Resources) } test { client -> val response1 = client.get("/path/123/GET/child/value?query=1&query=2&query=3&query=4").bodyAsText() assertEquals("/path/123/GET/child/value?query=1&query=2&query=3&query=4", response1) } } @Resource("path/{id}/{value?}") class PathWithDefault(val id: Boolean = true, val value: String? = null, val query1: Int?, val query2: Int? = 5) @Test fun testRequestWithDefaults() = testWithEngine(MockEngine) { config { engine { addHandler { request -> val uri = request.url.fullPath assertEquals("/path/true?query2=5", uri) respondOk(uri) } } install(Resources) } test { client -> val response = client.get(PathWithDefault(query1 = null)) assertEquals(HttpStatusCode.OK, response.status) } } @Resource("{path}") class ParametersEncoded(val path: String, val query: String, val queryList: List<String>) @Test fun testEncodesParameters() = testWithEngine(MockEngine) { config { engine { addHandler { request -> val uri = request.url.fullPath assertEquals("/p.:+!ath%2F?query=qu%3Fe%2Fry&queryList=it%3Dem1&queryList=it%26em2", uri) respondOk(uri) } } install(Resources) } test { client -> val response = client.get(ParametersEncoded("p.:+!ath/", "qu?e/ry", listOf("it=em1", "it&em2"))) assertEquals(HttpStatusCode.OK, response.status) } } }
kotlin
github
https://github.com/ktorio/ktor
ktor-client/ktor-client-plugins/ktor-client-resources/common/test/ResourcesTest.kt