content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# Created by Andrew Silva on 8/28/19 import gym import numpy as np import torch from interpretable_ddts.agents.ddt_agent import DDTAgent from interpretable_ddts.agents.mlp_agent import MLPAgent from interpretable_ddts.opt_helpers.replay_buffer import discount_reward import torch.multiprocessing as mp import argparse import copy import random if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-a", "--agent_type", help="architecture of agent to run", type=str, default='ddt') parser.add_argument("-e", "--episodes", help="how many episodes", type=int, default=2000) parser.add_argument("-l", "--num_leaves", help="number of leaves for DDT/DRL ", type=int, default=8) parser.add_argument("-n", "--num_hidden", help="number of hidden layers for MLP ", type=int, default=0) parser.add_argument("-env", "--env_type", help="environment to run on", type=str, default='cart') parser.add_argument("-gpu", help="run on GPU?", action='store_true') args = parser.parse_args() AGENT_TYPE = args.agent_type # 'ddt', 'mlp' NUM_EPS = args.episodes # num episodes Default 1000 ENV_TYPE = args.env_type # 'cart' or 'lunar' Default 'cart' USE_GPU = args.gpu # Applies for 'prolo' only. use gpu? Default false if ENV_TYPE == 'lunar': init_env = gym.make('LunarLander-v2') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n elif ENV_TYPE == 'cart': init_env = gym.make('CartPole-v1') dim_in = init_env.observation_space.shape[0] dim_out = init_env.action_space.n else: raise Exception('No valid environment selected') print(f"Agent {AGENT_TYPE} on {ENV_TYPE} ") # mp.set_start_method('spawn') mp.set_sharing_strategy('file_system') for i in range(5): bot_name = AGENT_TYPE + ENV_TYPE if USE_GPU: bot_name += 'GPU' if AGENT_TYPE == 'ddt': policy_agent = DDTAgent(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list=False, num_rules=args.num_leaves) elif AGENT_TYPE == 'mlp': policy_agent = MLPAgent(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out, num_hidden=args.num_hidden) else: raise Exception('No valid network selected') reward_array = main(NUM_EPS, policy_agent, ENV_TYPE)
[ 2, 15622, 416, 6858, 23720, 319, 807, 14, 2078, 14, 1129, 198, 11748, 11550, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 6738, 6179, 540, 62, 1860, 912, 13, 49638, 13, 1860, 83, 62, 25781, 1330, 20084, 5603, 6783, 198,...
2.155483
1,222
""" TW10: Words by Prefix Team: Tam Tamura, Andrew Nalundasan For: OMSBA 2061, Seattle University Date: 11/3/2020 """ question_2 = ['able', 'ability', 'apple', 'tryst', 'trial', 'tremendous', 'tree'] my_list = [] for elem in question_2: prefix = elem[:2] my_list.append(prefix) print(my_list)
[ 37811, 17306, 940, 25, 23087, 416, 3771, 13049, 201, 198, 15592, 25, 11552, 11552, 5330, 11, 6858, 399, 282, 917, 292, 272, 201, 198, 1890, 25, 440, 5653, 4339, 1160, 5333, 11, 7312, 2059, 201, 198, 10430, 25, 1367, 14, 18, 14, 4233...
2.246575
146
import hashlib from typing import TypeVar, Union import redis from openff.toolkit.topology import Molecule from openff.bespokefit.executor.services.qcgenerator import worker from openff.bespokefit.schema.tasks import HessianTask, OptimizationTask, Torsion1DTask from openff.bespokefit.utilities.molecule import canonical_order_atoms _T = TypeVar("_T", HessianTask, OptimizationTask, Torsion1DTask) def cached_compute_task( task: Union[HessianTask, OptimizationTask, Torsion1DTask], redis_connection: redis.Redis, ) -> str: """Checks to see if a QC task has already been executed and if not send it to a worker. """ if isinstance(task, Torsion1DTask): compute = worker.compute_torsion_drive elif isinstance(task, OptimizationTask): compute = worker.compute_optimization elif isinstance(task, HessianTask): compute = worker.compute_hessian else: raise NotImplementedError() # Canonicalize the task to improve the cache hit rate. task = _canonicalize_task(task) task_hash = hashlib.sha512(task.json().encode()).hexdigest() task_id = redis_connection.hget("qcgenerator:task-ids", task_hash) if task_id is not None: return task_id.decode() task_id = compute.delay(task_json=task.json()).id redis_connection.hset("qcgenerator:types", task_id, task.type) # Make sure to only set the hash after the type is set in case the connection # goes down before this information is entered and subsequently discarded. redis_connection.hset("qcgenerator:task-ids", task_hash, task_id) return task_id
[ 11748, 12234, 8019, 198, 6738, 19720, 1330, 5994, 19852, 11, 4479, 198, 198, 11748, 2266, 271, 198, 6738, 1280, 487, 13, 25981, 15813, 13, 4852, 1435, 1330, 25726, 23172, 198, 198, 6738, 1280, 487, 13, 65, 9774, 2088, 11147, 13, 18558, ...
2.777969
581
'''Copyright Gigaspaces, 2017, All Rights Reserved''' from cloudify.plugins import lifecycle OP_START = 'hacker.interfaces.lifecycle.start' OP_STOP = 'hacker.interfaces.lifecycle.stop' OP_SS_C = 'hacker.interfaces.lifecycle.create_snapshots' OP_SS_D = 'hacker.interfaces.lifecycle.delete_snapshots' REQUIRED_OPS = set([OP_START, OP_SS_C, OP_SS_D, OP_STOP]) def build_instance_sequence(instance, operation, state_start=None, state_end=None): ''' Builds sequenced subgraph tasks for an instance .. note:: The sequence will not be built if the instance provided does not have a node with an operation defined in the operation parameter. :param `CloudifyWorkflowNodeInstance` instance: Node instance to execute tasks against :param str operation: Node (lifecycle) operation to execute :param str state_start: Verb to describe operation start :param str state_stop: Verb to describe operation finish ''' tasks = list() # Only build the sequence if the node operation exists if operation not in instance.node.operations: return tasks # Add task starting state if state_start: tasks.append(instance.send_event('%s host' % state_start)) tasks.append(instance.set_state(state_start.lower())) # Add task operation tasks.append(instance.execute_operation(operation)) # Add task ended state if state_end: tasks.append(instance.send_event('%s host' % state_end)) tasks.append(instance.set_state(state_end.lower())) return tasks def build_instance_subgraph(instance, graph): ''' Builds a subgraph for an instance :param `CloudifyWorkflowNodeInstance` instance: Node instance to execute tasks against :param `TaskDependencyGraph` graph: Task graph to create sequences from ''' # Init a "stop instance" subgraph sg_stop = graph.subgraph('stop_subgraph') seq_stop = sg_stop.sequence() seq_stop.add(*build_instance_sequence( instance, OP_STOP, 'Stopping', 'Stopped')) # Init a "recreate snapshots" subgraph sg_snap = graph.subgraph('snapshot_subgraph') seq_snap = sg_snap.sequence() if OP_SS_D in instance.node.operations: seq_snap.add(*build_instance_sequence(instance, OP_SS_D)) if OP_SS_C in instance.node.operations: seq_snap.add(*build_instance_sequence(instance, OP_SS_C)) # Init a "start instance" subgraph sg_start = graph.subgraph('stop_subgraph') seq_start = sg_start.sequence() seq_start.add(*build_instance_sequence( instance, OP_START, 'Starting', 'Started')) # Create subgraph dependencies graph.add_dependency(sg_snap, sg_stop) graph.add_dependency(sg_start, sg_snap) def refresh_snapshots(ctx, **_): ''' Executes a complex, graph-based set of lifecycle events to stop all host (compute) instances, delete all existing instance snapshots, take new snapshots of all attached volumes, and start the instances back up when complete. ''' graph = ctx.graph_mode() # Find all compute hosts and build a sequence graph for node in ctx.nodes: if not REQUIRED_OPS.issubset(node.operations): ctx.logger.warn( 'Skipping refresh_snapshots workflow for node "%s" because ' 'it does not have all required operations defined' % node.id) continue # Iterate over each node instance for instance in node.instances: if not lifecycle.is_host_node(instance): ctx.logger.warn( 'Skipping refresh_snapshots workflow for node instance ' '"%s" because it is not a compute host' % instance.id) continue build_instance_subgraph(instance, graph) # Execute the sequences return graph.execute()
[ 7061, 6, 15269, 19525, 5126, 2114, 11, 2177, 11, 1439, 6923, 33876, 7061, 6, 198, 6738, 6279, 1958, 13, 37390, 1330, 3868, 47510, 198, 198, 3185, 62, 2257, 7227, 796, 705, 71, 10735, 13, 3849, 32186, 13, 36195, 47510, 13, 9688, 6, 1...
2.567251
1,539
""" Flyter Tool for transferring files on the same network using raw sockets. Doesn't use encryption. """ __version__ = (0, 0, 0) __author__ = "CryptoNyxz" __license__ = """ MIT License Copyright (c) 2021 Jaymund Cyrus F. Floranza Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from argparse import ArgumentParser from base64 import b64encode from datetime import timedelta from math import log from os import altsep, sep, \ mkdir, stat, unlink from os.path import dirname, exists, join from random import randint from secrets import token_bytes from shutil import get_terminal_size from socket import \ socket, error, timeout, \ ntohs, ntohl, htons, htonl, \ gethostname, \ AF_INET, SOCK_STREAM from threading import Thread from time import time from warnings import warn from sys import argv, exit, version_info if version_info < (3, 6): warn('[!] Some features are not be compatible with the version of your ' 'python interpreter') FROMTERMINAL = False # Utility Functions def random_port(host): """Return a random available TCP port.""" while True: port = randint(10_000, 65536) with socket(AF_INET, SOCK_STREAM) as sock: try: sock.bind((host, port)) except error: continue else: return port def printerror(errormsg): """Print an error message.""" global FROMTERMINAL if FROMTERMINAL: print(f'\n[x] {errormsg}') exit(-1) exit(-1) exit(-1) exit(-1) else: warn(errormsg) def printalert(alert): """Print an alert message.""" global FROMTERMINAL print(f'[!] {alert}') def int_to_bytes_s(integer): """Convert 16 - bit integer to bytes for packing.""" res = ntohs(integer) res = hex(res)[2:] res = '0'*(len(res) % 2) + res return bytes.fromhex(res) def bytes_to_int_s(byteseq): """Convert byte sequence to 16 - but integer for unpacking.""" res = bytes.hex(byteseq) res = int(res, 16) return htons(res) def int_to_bytes_l(integer): """Convert 32 - but integer to bytes for packing.""" res = ntohl(integer) res = hex(res)[2:] res = '0'*(len(res) % 2) + res return bytes.fromhex(res) def bytes_to_int_l(byteseq): """Convert byte sequence to 32 - but integer for unpacking.""" res = bytes.hex(byteseq) res = int(res, 16) return htonl(res) def pack_str(string): """Pack a string into a byte sequence.""" return string.encode() def unpack_str(byteseq): """Unpack a byte sequence into a string.""" return byteseq.decode() # Utility Classes # Flyter Classes # Simplified Functions def send(ip_address, port, filepath): """ Send file to receiver on the same network. Parameters ---------- ip_address : str The target receiver's IP address. port : int The target receiver's main TCP port. filepath : str The path to the file to be sent. """ sender = FlyterSender(ip_address, port) sender.recv_param_set() return sender.send_file(filepath) def receive(host_ip_address, port, workers=1): """ Receive a file from sender on the same network. Parameters ---------- host_ip_address : str The receiver's host IP address. port : int The receiver's host port to listen on. workers : :obj:`int`, optional The number of workers to use. """ receiver = FlyterReciever(host_ip_address, port, workers) receiver.send_param_set() receiver.recv_file() if __name__ == '__main__': parser = ArgumentParser( prog="Flyter", epilog="See '<command> --help' to read about a specific sub-command." ) subparsers = parser.add_subparsers( dest="action", help="The action to be performed" ) send_parser = subparsers.add_parser("send") recv_parser = subparsers.add_parser("recv") send_parser.add_argument('-i', '--ip', required=True, help="Target receiver's IP address") send_parser.add_argument('-p', '--port', type=int, required=True, help="Target receiver's TCP port number") send_parser.add_argument('-f', '--file', required=True, help="Path to the file to be sent") recv_parser.add_argument('-i', '--ip', required=True, help="Host IP address") recv_parser.add_argument('-p', '--port', type=int, required=True, help="TCP port to listen on") recv_parser.add_argument('-w', '--workers', type=int, default=1, help="TCP port to listen on") if len(argv) > 1: FROMTERMINAL = True args = parser.parse_args() if args.action == "send": send(args.ip, args.port, args.file) elif args.action == "recv": receive(args.ip, args.port, args.workers) else: parser.print_help()
[ 37811, 198, 33771, 353, 198, 198, 25391, 329, 26140, 3696, 319, 262, 976, 3127, 1262, 8246, 37037, 13, 198, 13921, 77, 470, 779, 15835, 13, 198, 37811, 628, 198, 834, 9641, 834, 796, 357, 15, 11, 657, 11, 657, 8, 198, 834, 9800, 8...
2.401758
2,616
# coding=utf-8 # Copyright Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if tf.debugging.assert_near(a, b, atol=atol): return True raise except Exception: msg = "{} != {}".format(a, b) if prefix: msg = prefix + ": " + msg raise AssertionError(msg) TOLERANCE = 1e-4
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 28493, 16734, 46671, 11, 9308, 412, 13, 15722, 11, 943, 805, 327, 22436, 290, 383, 12905, 2667, 32388, 3457, 13, 1074, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, ...
2.938104
517
import numpy as np import sklearn import logging from wann_genetic.individual.network_base import BaseFFNN def softmax(x, axis=-1): """Compute softmax values for each sets of scores in x. Returns: softmax - softmax normalized in dim axis """ e_x = np.exp(x - np.expand_dims(np.max(x,axis=axis), axis=axis)) s = (e_x / np.expand_dims(e_x.sum(axis=-1), axis=axis)) return s def apply_act_function(available_funcs, selected_funcs, x=None): """Apply the activation function of the selected nodes to their sums. This fullfils the same function as the :class:`wann_genetic.individual.torch.ffn.MultiActivationModule`. """ if x is not None: result = np.empty(x.shape) for i, func in enumerate(selected_funcs): assert func < len(available_funcs) result[..., i] = available_funcs[func][1](x[..., i]) return result else: return np.array([ # return function names available_funcs[func][0] for func in selected_funcs ])
[ 11748, 299, 32152, 355, 45941, 198, 11748, 1341, 35720, 198, 198, 11748, 18931, 198, 198, 6738, 266, 1236, 62, 5235, 5139, 13, 43129, 13, 27349, 62, 8692, 1330, 7308, 5777, 6144, 628, 198, 4299, 2705, 9806, 7, 87, 11, 16488, 10779, 16...
2.471698
424
import contextlib from datetime import date from datetime import datetime from datetime import timezone from functools import wraps from io import BytesIO from itertools import count from typing import Any from typing import Dict from typing import Sequence import pytest from dateutil.parser import parse as parse_date from dateutil.relativedelta import relativedelta from django import forms from django.core.exceptions import ValidationError from django.template.loader import render_to_string from django.urls import reverse from freezegun import freeze_time from lxml import etree from common.models.records import TrackedModel from common.renderers import counter_generator from common.serializers import validate_taric_xml_record_order from common.util import TaricDateRange from common.util import get_accessor from common.util import get_field_tuple INTERDEPENDENT_IMPORT_IMPLEMENTED = True UPDATE_IMPORTER_IMPLEMENTED = True EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED = False COMMODITIES_IMPLEMENTED = True MEURSING_TABLES_IMPLEMENTED = False PARTIAL_TEMPORARY_STOP_IMPLEMENTED = False UTC = timezone.utc requires_commodities = pytest.mark.skipif( not COMMODITIES_IMPLEMENTED, reason="Commodities not implemented", ) requires_export_refund_nomenclature = pytest.mark.skipif( not EXPORT_REFUND_NOMENCLATURE_IMPLEMENTED, reason="Export refund nomenclature not implemented", ) requires_meursing_tables = pytest.mark.skipif( not MEURSING_TABLES_IMPLEMENTED, reason="Meursing tables not implemented", ) requires_partial_temporary_stop = pytest.mark.skipif( not PARTIAL_TEMPORARY_STOP_IMPLEMENTED, reason="Partial temporary stop not implemented", ) requires_interdependent_import = pytest.mark.skipif( not INTERDEPENDENT_IMPORT_IMPLEMENTED, reason="Interdependent imports not implemented", ) requires_update_importer = pytest.mark.skipif( not UPDATE_IMPORTER_IMPLEMENTED, reason="Requires Updating importers to be implemented", ) def make_duplicate_record(factory, identifying_fields=None): """Creates two records using the passed factory that are duplicates of each other and returns the record created last.""" existing = factory.create() # allow overriding identifying_fields if identifying_fields is None: identifying_fields = list(factory._meta.model.identifying_fields) return factory.create( **dict(get_field_tuple(existing, field) for field in identifying_fields) ) def make_non_duplicate_record(factory, identifying_fields=None): """Creates two records using the passed factory that are not duplicates of each other and returns the record created last.""" existing = factory.create() not_duplicate = factory.create() if identifying_fields is None: identifying_fields = list(factory._meta.model.identifying_fields) assert any( get_field_tuple(existing, f) != get_field_tuple(not_duplicate, f) for f in identifying_fields ) return not_duplicate def get_checkable_data(model: TrackedModel, ignore=frozenset()): """ Returns a dict representing the model's data ignoring any automatically set fields and fields with names passed to `ignore`. The returned data will contain the identifying fields for any linked models rather than internal PKs. For example: get_checkable_data(FootnoteDescriptionFactory(), ignore={"sid"}) # { # "description": "My sample footnote text", # "described_footnote": { # "footnote_type__footnote_type_id": "FN" # "footnote_id": "123", # }, # } """ checked_field_names = {f.name for f in model.copyable_fields} - ignore data = { name: getattr(model, get_accessor(model._meta.get_field(name))) for name in checked_field_names } identifying_fields = { name: data[name].get_identifying_fields() for name in checked_field_names if hasattr(data[name], "get_identifying_fields") } data.update(identifying_fields) return data def assert_records_match( expected: TrackedModel, imported: TrackedModel, ignore=frozenset(), ): """ Asserts that every value for every field in the imported model is the same as the data in the expected model. System fields that will change from model to model are not checked. Any field names given to `ignore` will also not be checked. """ expected_data = get_checkable_data(expected, ignore=ignore) imported_data = get_checkable_data(imported, ignore=ignore) assert expected_data == imported_data def assert_many_records_match( expected: Sequence[TrackedModel], imported: Sequence[TrackedModel], ignore=frozenset(), ): """ Asserts that every value for every field in the imported models is the same as the data in the expected models, and that the count of both is equal. System fields that will change from model to model are not checked. Any field names given to `ignore` will also not be checked. """ expected_data = [get_checkable_data(e, ignore=ignore) for e in expected] imported_data = [get_checkable_data(i, ignore=ignore) for i in imported] assert expected_data == imported_data _transaction_counter = count(start=1) def taric_xml_record_codes(xml): """Yields tuples of (record_code, subrecord_code)""" records = xml.xpath(".//*[local-name() = 'record']") codes = etree.XPath( ".//*[local-name()='record.code' or local-name()='subrecord.code']/text()", ) return [tuple(codes(record)) for record in records] def only_applicable_after(cutoff): """ Decorator which asserts that a test fails after a specified cutoff date. :param cutoff: A date string, or datetime object before which the test should fail. """ cutoff = parse_date(cutoff) return decorator def validity_period_post_data(start: date, end: date) -> Dict[str, int]: """ Construct a POST data fragment for the validity period start and end dates of a ValidityPeriodForm from the given date objects, eg: >>> validity_period_post_data( >>> datetime.date(2021, 1, 2), >>> datetime.date(2022, 3, 4), >>> ) { "start_date_0": 1, "start_date_1": 2, "start_date_2": 2021, "end_date_0": 4, "end_date_1": 3, "end_date_2": 2022, } """ return { f"{name}_{i}": part for name, date in (("start_date", start), ("end_date", end)) for i, part in enumerate([date.day, date.month, date.year]) } def get_form_data(form: forms.ModelForm) -> Dict[str, Any]: """Returns a dictionary of the fields that the form will put onto a page and their current values, taking account of any fields that have sub-fields and hence result in multiple HTML <input> objects.""" data = {**form.initial} for field in form.rendered_fields: value = data[field] if field in data else form.fields[field].initial if hasattr(form.fields[field].widget, "decompress"): # If the widget can be decompressed, then it is not just a simple # value and has some internal structure. So we need to generate one # form item per decompressed value and append the name with _0, _1, # etc. This mirrors the MultiValueWidget in django/forms/widgets.py. if field in data: del data[field] value = form.fields[field].widget.decompress(value) data.update( **{f"{field}_{i}": v for i, v in enumerate(value) if v is not None} ) elif value is not None: data.setdefault(field, value) return data
[ 11748, 4732, 8019, 198, 6738, 4818, 8079, 1330, 3128, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 4818, 8079, 1330, 640, 11340, 198, 6738, 1257, 310, 10141, 1330, 27521, 198, 6738, 33245, 1330, 2750, 4879, 9399, 198, 6738, 340, ...
2.787607
2,792
''' Created on 2016810 @author: Administrator ''' from email import encoders from email.header import Header from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.mime.multipart import MIMEBase from email.utils import parseaddr, formataddr import smtplib from_addr = 'leeo1124@163.com'#input('From: ') password = input('Password: ') to_addr = '450475851@qq.com'#input('To: ') smtp_server = 'smtp.163.com'#input('SMTP server: ') # # msg = MIMEText('hello, send by Python...', 'plain', 'utf-8') # HTML # msg = MIMEText('<html><body><h1>Hello</h1>' + # '<p>send by <a href="http://www.python.org">Python</a>...</p>' + # '</body></html>', 'html', 'utf-8') # # : msg = MIMEMultipart() msg['From'] = _format_addr('Python <%s>' % from_addr) msg['To'] = _format_addr(' <%s>' % to_addr) msg['Subject'] = Header('SMTP', 'utf-8').encode() # MIMEText: msg.attach(MIMEText('send with file...', 'plain', 'utf-8')) # MIMEBase: with open('D:/pythonWorkspace/pthonDemo/src/com/python/email/test.jpg', 'rb') as f: # MIMEpng: mime = MIMEBase('image', 'png', filename='test.png') # : mime.add_header('Content-Disposition', 'attachment', filename='test.png') mime.add_header('Content-ID', '<0>') mime.add_header('X-Attachment-Id', '0') # : mime.set_payload(f.read()) # Base64: encoders.encode_base64(mime) # MIMEMultipart: msg.attach(mime) msg['From'] = _format_addr('Python <%s>' % from_addr) msg['To'] = _format_addr(' <%s>' % to_addr) msg['Subject'] = Header('SMTP', 'utf-8').encode() server = smtplib.SMTP(smtp_server, 25) server.set_debuglevel(1) server.login(from_addr, password) server.sendmail(from_addr, [to_addr], msg.as_string()) server.quit()
[ 7061, 6, 198, 41972, 319, 1584, 40215, 198, 198, 31, 9800, 25, 22998, 198, 7061, 6, 198, 6738, 3053, 1330, 2207, 375, 364, 198, 6738, 3053, 13, 25677, 1330, 48900, 198, 6738, 3053, 13, 76, 524, 13, 5239, 1330, 337, 3955, 2767, 2302,...
2.365359
739
# Generated by Django 3.0.5 on 2020-04-24 12:52 from django.db import migrations, models import django.db.models.deletion import modelcluster.contrib.taggit import modelcluster.fields import wagtail.core.blocks import wagtail.core.fields import wagtail.images.blocks
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 20, 319, 12131, 12, 3023, 12, 1731, 1105, 25, 4309, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, ...
2.988889
90
from evaluator import unit from evaluator.backends import QueueWorkerResources from evaluator.backends.dask import DaskLSFBackend from evaluator.client import ConnectionOptions, EvaluatorClient from evaluator.datasets import PhysicalPropertyDataSet from evaluator.forcefield import SmirnoffForceFieldSource from evaluator.server import EvaluatorServer from evaluator.utils import setup_timestamp_logging if __name__ == "__main__": main()
[ 6738, 5418, 84, 1352, 1330, 4326, 198, 6738, 5418, 84, 1352, 13, 1891, 2412, 1330, 4670, 518, 12468, 263, 33236, 198, 6738, 5418, 84, 1352, 13, 1891, 2412, 13, 67, 2093, 1330, 360, 2093, 6561, 37, 7282, 437, 198, 6738, 5418, 84, 135...
3.423077
130
import logging import unittest from pyinstrument import Profiler from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer from nuplan.planning.simulation.observation.idm_agents import IDMAgents from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) if __name__ == "__main__": unittest.main()
[ 11748, 18931, 198, 11748, 555, 715, 395, 198, 198, 6738, 12972, 259, 43872, 1330, 4415, 5329, 198, 198, 6738, 14364, 11578, 13, 11578, 768, 13, 1416, 39055, 62, 38272, 13, 28803, 11578, 62, 9945, 13, 9288, 13, 28803, 11578, 62, 1416, ...
3.185792
183
import os import os.path as osp import time from collections import deque import pickle from baselines.ddpg.ddpg_learner import DDPG from baselines.ddpg.models import Actor, Critic from baselines.ddpg.memory import Memory from baselines.ddpg.noise import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise from baselines.common import set_global_seeds from baselines import logger import tensorflow as tf import numpy as np try: from mpi4py import MPI except ImportError: MPI = None
[ 11748, 28686, 198, 11748, 28686, 13, 6978, 355, 267, 2777, 198, 11748, 640, 198, 6738, 17268, 1330, 390, 4188, 198, 11748, 2298, 293, 198, 198, 6738, 1615, 20655, 13, 1860, 6024, 13, 1860, 6024, 62, 3238, 1008, 1330, 360, 6322, 38, 19...
3.21875
160
import os from datetime import datetime from PySide2.QtGui import * from PySide2.QtCore import * from PySide2.QtWidgets import * from PySide2.QtPrintSupport import QPrinter, QPrintDialog from jinja2 import TemplateNotFound from .ui.ui_transaction_details import Ui_TransactionDetails from .ui import images_rc from . import jinja_env from .exceptions import PrinterError
[ 11748, 28686, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 9485, 24819, 17, 13, 48, 83, 8205, 72, 1330, 1635, 198, 6738, 9485, 24819, 17, 13, 48, 83, 14055, 1330, 1635, 198, 6738, 9485, 24819, 17, 13, 48, 83, 54, 312, ...
3.142857
119
""" YTArray class. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import copy import numpy as np from distutils.version import LooseVersion from functools import wraps from numpy import \ add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ floor_divide, negative, power, remainder, mod, absolute, rint, \ sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing try: # numpy 1.13 or newer from numpy import positive, divmod as divmod_, isnat, heaviside except ImportError: positive, divmod_, isnat, heaviside = (None,)*4 from yt.units.unit_object import Unit, UnitParseError from yt.units.unit_registry import UnitRegistry from yt.units.dimensions import \ angle, \ current_mks, \ dimensionless, \ em_dimensions from yt.utilities.exceptions import \ YTUnitOperationError, YTUnitConversionError, \ YTUfuncUnitError, YTIterableUnitCoercionError, \ YTInvalidUnitEquivalence, YTEquivalentDimsError from yt.utilities.lru_cache import lru_cache from numbers import Number as numeric_type from yt.utilities.on_demand_imports import _astropy from sympy import Rational from yt.units.unit_lookup_table import \ default_unit_symbol_lut from yt.units.equivalencies import equivalence_registry from yt.utilities.logger import ytLogger as mylog from .pint_conversions import convert_pint_units NULL_UNIT = Unit() POWER_SIGN_MAPPING = {multiply: 1, divide: -1} # redefine this here to avoid a circular import from yt.funcs def preserve_units(unit1, unit2=None): return unit1 def passthrough_unit(unit, unit2=None): return unit def return_without_unit(unit, unit2=None): return None def arctan2_unit(unit1, unit2): return NULL_UNIT def comparison_unit(unit1, unit2=None): return None def invert_units(unit): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def bitop_units(unit1, unit2): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def get_inp_u_unary(ufunc, inputs, out_arr=None): inp = inputs[0] u = getattr(inp, 'units', None) if u is None: u = NULL_UNIT if u.dimensions is angle and ufunc in trigonometric_operators: inp = inp.in_units('radian').v if out_arr is not None: out_arr = ufunc(inp).view(np.ndarray) return out_arr, inp, u def get_inp_u_binary(ufunc, inputs): inp1 = coerce_iterable_units(inputs[0]) inp2 = coerce_iterable_units(inputs[1]) unit1 = getattr(inp1, 'units', None) unit2 = getattr(inp2, 'units', None) ret_class = get_binary_op_return_class(type(inp1), type(inp2)) if unit1 is None: unit1 = Unit(registry=getattr(unit2, 'registry', None)) if unit2 is None and ufunc is not power: unit2 = Unit(registry=getattr(unit1, 'registry', None)) elif ufunc is power: unit2 = inp2 if isinstance(unit2, np.ndarray): if isinstance(unit2, YTArray): if unit2.units.is_dimensionless: pass else: raise YTUnitOperationError(ufunc, unit1, unit2) unit2 = 1.0 return (inp1, inp2), (unit1, unit2), ret_class def handle_preserve_units(inps, units, ufunc, ret_class): if units[0] != units[1]: any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) else: if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False): if units[0] != units[1]: u1d = units[0].is_dimensionless u2d = units[1].is_dimensionless any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) elif not any([u1d, u2d]): if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) else: if raise_error: raise YTUfuncUnitError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_multiply_divide_units(unit, units, out, out_arr): if unit.is_dimensionless and unit.base_value != 1.0: if not units[0].is_dimensionless: if units[0].dimensions == units[1].dimensions: out_arr = np.multiply(out_arr.view(np.ndarray), unit.base_value, out=out) unit = Unit(registry=unit.registry) return out, out_arr, unit def coerce_iterable_units(input_object): if isinstance(input_object, np.ndarray): return input_object if iterable(input_object): if any([isinstance(o, YTArray) for o in input_object]): ff = getattr(input_object[0], 'units', NULL_UNIT, ) if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]): raise YTIterableUnitCoercionError(input_object) # This will create a copy of the data in the iterable. return YTArray(input_object) return input_object else: return input_object def sanitize_units_mul(this_object, other_object): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # If the other object is a YTArray and has the same dimensions as the object # under consideration, convert so we don't mix units with the same # dimensions. if isinstance(ret, YTArray): if inp.units.same_dimensions_as(ret.units): ret.in_units(inp.units) return ret def sanitize_units_add(this_object, other_object, op_string): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # Make sure the other object is a YTArray before we use the `units` # attribute. if isinstance(ret, YTArray): if not inp.units.same_dimensions_as(ret.units): # handle special case of adding or subtracting with zero or # array filled with zero if not np.any(other_object): return ret.view(np.ndarray) elif not np.any(this_object): return ret raise YTUnitOperationError(op_string, inp.units, ret.units) ret = ret.in_units(inp.units) else: # If the other object is not a YTArray, then one of the arrays must be # dimensionless or filled with zeros if not inp.units.is_dimensionless and np.any(ret): raise YTUnitOperationError(op_string, inp.units, dimensionless) return ret def validate_comparison_units(this, other, op_string): # Check that other is a YTArray. if hasattr(other, 'units'): if this.units.expr is other.units.expr: if this.units.base_value == other.units.base_value: return other if not this.units.same_dimensions_as(other.units): raise YTUnitOperationError(op_string, this.units, other.units) return other.in_units(this.units) return other unary_operators = ( negative, absolute, rint, sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan, signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat, ) binary_operators = ( add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power, remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor, left_shift, right_shift, greater, greater_equal, less, less_equal, not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum, fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside ) trigonometric_operators = ( sin, cos, tan, ) else: # numpy version equal to or newer than 1.13 def __array_finalize__(self, obj): if obj is None and hasattr(self, 'units'): return self.units = getattr(obj, 'units', NULL_UNIT) def __pos__(self): """ Posify the data. """ # this needs to be defined for all numpy versions, see # numpy issue #9081 return type(self)(super(YTArray, self).__pos__(), self.units) def __reduce__(self): """Pickle reduction method See the documentation for the standard library pickle module: http://docs.python.org/2/library/pickle.html Unit metadata is encoded in the zeroth element of third element of the returned tuple, itself a tuple used to restore the state of the ndarray. This is always defined for numpy arrays. """ np_ret = super(YTArray, self).__reduce__() obj_state = np_ret[2] unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],) new_ret = np_ret[:2] + unit_state + np_ret[3:] return new_ret def __setstate__(self, state): """Pickle setstate method This is called inside pickle.read() and restores the unit data from the metadata extracted in __reduce__ and then serialized by pickle. """ super(YTArray, self).__setstate__(state[1:]) try: unit, lut = state[0] except TypeError: # this case happens when we try to load an old pickle file # created before we serialized the unit symbol lookup table # into the pickle file unit, lut = str(state[0]), default_unit_symbol_lut.copy() # need to fix up the lut if the pickle was saved prior to PR #1728 # when the pickle format changed if len(lut['m']) == 2: lut.update(default_unit_symbol_lut) for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]: lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}') registry = UnitRegistry(lut=lut, add_default_symbols=False) self.units = Unit(unit, registry=registry) def __deepcopy__(self, memodict=None): """copy.deepcopy implementation This is necessary for stdlib deepcopy of arrays and quantities. """ if memodict is None: memodict = {} ret = super(YTArray, self).__deepcopy__(memodict) return type(self)(ret, copy.deepcopy(self.units)) class YTQuantity(YTArray): """ A scalar associated with a unit. Parameters ---------- input_scalar : an integer or floating point scalar The scalar to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the quantity. Powers must be specified using python syntax (cm**3, not cm^3). registry : A UnitRegistry object The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Examples -------- >>> from yt import YTQuantity >>> a = YTQuantity(1, 'cm') >>> b = YTQuantity(2, 'm') >>> a + b 201.0 cm >>> b + a 2.01 m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTQuantity(12, 'g/cm**3') >>> np.abs(a) 12 g/cm**3 and strip them when it would be annoying to deal with them. >>> print(np.log10(a)) 1.07918124605 YTQuantity is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.quan(5, 'code_length') >>> a.in_cgs() 1.543e+25 cm This is equivalent to: >>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ def validate_numpy_wrapper_units(v, arrs): if not any(isinstance(a, YTArray) for a in arrs): return v if not all(isinstance(a, YTArray) for a in arrs): raise RuntimeError("Not all of your arrays are YTArrays.") a1 = arrs[0] if not all(a.units == a1.units for a in arrs[1:]): raise RuntimeError("Your arrays must have identical units.") v.units = a1.units return v def uconcatenate(arrs, axis=0): """Concatenate a sequence of arrays. This wrapper around numpy.concatenate preserves units. All input arrays must have the same units. See the documentation of numpy.concatenate for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uconcatenate((A, B)) YTArray([ 1., 2., 3., 2., 3., 4.]) cm """ v = np.concatenate(arrs, axis=axis) v = validate_numpy_wrapper_units(v, arrs) return v def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None): """Applies the cross product to two YT arrays. This wrapper around numpy.cross preserves units. See the documentation of numpy.cross for full details. """ v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis) units = arr1.units * arr2.units arr = YTArray(v, units, registry=registry) return arr def uintersect1d(arr1, arr2, assume_unique=False): """Find the sorted unique elements of the two input arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uintersect1d(A, B) YTArray([ 2., 3.]) cm """ v = np.intersect1d(arr1, arr2, assume_unique=assume_unique) v = validate_numpy_wrapper_units(v, [arr1, arr2]) return v def uunion1d(arr1, arr2): """Find the union of two arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> A = yt.YTArray([1, 2, 3], 'cm') >>> B = yt.YTArray([2, 3, 4], 'cm') >>> uunion1d(A, B) YTArray([ 1., 2., 3., 4.]) cm """ v = np.union1d(arr1, arr2) v = validate_numpy_wrapper_units(v, [arr1, arr2]) return v def unorm(data, ord=None, axis=None, keepdims=False): """Matrix or vector norm that preserves units This is a wrapper around np.linalg.norm that preserves units. See the documentation for that function for descriptions of the keyword arguments. The keepdims argument is ignored if the version of numpy installed is older than numpy 1.10.0. """ if LooseVersion(np.__version__) < LooseVersion('1.10.0'): norm = np.linalg.norm(data, ord=ord, axis=axis) else: norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims) if norm.shape == (): return YTQuantity(norm, data.units) return YTArray(norm, data.units) def udot(op1, op2): """Matrix or vector dot product that preserves units This is a wrapper around np.dot that preserves units. """ dot = np.dot(op1.d, op2.d) units = op1.units*op2.units if dot.shape == (): return YTQuantity(dot, units) return YTArray(dot, units) def uvstack(arrs): """Stack arrays in sequence vertically (row wise) while preserving units This is a wrapper around np.vstack that preserves units. """ v = np.vstack(arrs) v = validate_numpy_wrapper_units(v, arrs) return v def uhstack(arrs): """Stack arrays in sequence horizontally (column wise) while preserving units This is a wrapper around np.hstack that preserves units. """ v = np.hstack(arrs) v = validate_numpy_wrapper_units(v, arrs) return v def ustack(arrs, axis=0): """Join a sequence of arrays along a new axis while preserving units The axis parameter specifies the index of the new axis in the dimensions of the result. For example, if ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be the last dimension. This is a wrapper around np.stack that preserves units. """ v = np.stack(arrs) v = validate_numpy_wrapper_units(v, arrs) return v def array_like_field(data, x, field): field = data._determine_fields(field)[0] if isinstance(field, tuple): finfo = data.ds._get_field_info(field[0],field[1]) else: finfo = data.ds._get_field_info(field) if finfo.sampling_type == 'particle': units = finfo.output_units else: units = finfo.units if isinstance(x, YTArray): arr = copy.deepcopy(x) arr.convert_to_units(units) return arr if isinstance(x, np.ndarray): return data.ds.arr(x, units) else: return data.ds.quan(x, units) def get_binary_op_return_class(cls1, cls2): if cls1 is cls2: return cls1 if cls1 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls1, (numeric_type, np.number, list, tuple)): return cls2 if cls2 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls2, (numeric_type, np.number, list, tuple)): return cls1 if issubclass(cls1, YTQuantity): return cls2 if issubclass(cls2, YTQuantity): return cls1 if issubclass(cls1, cls2): return cls1 if issubclass(cls2, cls1): return cls2 else: raise RuntimeError("Undefined operation for a YTArray subclass. " "Received operand types (%s) and (%s)" % (cls1, cls2)) def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'): r""" Load YTArrays with unit information from a text file. Each row in the text file must have the same number of values. Parameters ---------- fname : str Filename to read. dtype : data-type, optional Data-type of the resulting array; default: float. delimiter : str, optional The string used to separate values. By default, this is any whitespace. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. comments : str, optional The character used to indicate the start of a comment; default: '#'. Examples -------- >>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t") """ f = open(fname, 'r') next_one = False units = [] num_cols = -1 for line in f.readlines(): words = line.strip().split() if len(words) == 0: continue if line[0] == comments: if next_one: units = words[1:] if len(words) == 2 and words[1] == "Units": next_one = True else: # Here we catch the first line of numbers try: col_words = line.strip().split(delimiter) for word in col_words: float(word) num_cols = len(col_words) break except ValueError: mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0]) f.close() if len(units) != num_cols: mylog.warning("Malformed or incomplete units header. Arrays will be " "dimensionless!") units = ["dimensionless"]*num_cols arrays = np.loadtxt(fname, dtype=dtype, comments=comments, delimiter=delimiter, converters=None, unpack=True, usecols=usecols, ndmin=0) if usecols is not None: units = [units[col] for col in usecols] mylog.info("Array units: %s" % ", ".join(units)) return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)]) def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='', footer='', comments='#'): r""" Write YTArrays with unit information to a text file. Parameters ---------- fname : str The file to write the YTArrays to. arrays : list of YTArrays or single YTArray The array(s) to write to the file. fmt : str or sequence of strs, optional A single format (%10.5f), or a sequence of formats. delimiter : str, optional String or character separating columns. header : str, optional String that will be written at the beginning of the file, before the unit header. footer : str, optional String that will be written at the end of the file. comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``yt.loadtxt``. Examples -------- >>> sp = ds.sphere("c", (100,"kpc")) >>> a = sp["density"] >>> b = sp["temperature"] >>> c = sp["velocity_x"] >>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t") """ if not isinstance(arrays, list): arrays = [arrays] units = [] for array in arrays: if hasattr(array, "units"): units.append(str(array.units)) else: units.append("dimensionless") if header != '': header += '\n' header += " Units\n " + '\t'.join(units) np.savetxt(fname, np.transpose(arrays), header=header, fmt=fmt, delimiter=delimiter, footer=footer, newline='\n', comments=comments)
[ 37811, 198, 56, 51, 19182, 1398, 13, 628, 198, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 2, 10097, 32501, 198, 2, 15069, 357, 66, 8, 2211, 11, 331, 83, 7712, 4816, 13, 198, 2, 198, 2, 4307, 6169, 739, ...
2.363842
9,735
from rest_framework import serializers from posts.models import Post post_detail_url = serializers.HyperlinkedIdentityField( view_name='posts-api:detail', lookup_field='slug', )
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 198, 6738, 6851, 13, 27530, 1330, 2947, 198, 198, 7353, 62, 49170, 62, 6371, 796, 11389, 11341, 13, 38197, 25614, 7390, 26858, 15878, 7, 198, 220, 220, 220, 1570, 62, 3672, 11639, 24875, ...
3.063492
63
from tir import Webapp import unittest if __name__ == '__main__': unittest.main()
[ 6738, 48965, 1330, 5313, 1324, 198, 11748, 555, 715, 395, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 197, 403, 715, 395, 13, 12417, 3419, 198 ]
2.709677
31
# @Time : {time} # @Author : code_generator from rest_framework.viewsets import ModelViewSet from rest_framework.generics import ListAPIView from rest_framework.filters import SearchFilter, OrderingFilter from rest_framework.response import Response from rest_framework.decorators import api_view,authentication_classes,permission_classes,action from common.custom import CommonPagination, RbacPermission from django_filters.rest_framework import DjangoFilterBackend from django.http import HttpResponse,FileResponse,JsonResponse from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework.permissions import IsAuthenticated from rest_xops.basic import XopsResponse from rest_xops.code import * from django.db.models import Q from django.apps import apps from ..models import {model_camel_case_name} from django.contrib.contenttypes.models import ContentType from ..serializers.{model_name}_serializers import * class {model_camel_case_name}View(ModelViewSet): queryset = {model_camel_case_name}.objects.all() serializer_class = {model_camel_case_name}Serializer filter_backends = (DjangoFilterBackend, SearchFilter,OrderingFilter) pagination_class = CommonPagination ordering_fields = ('id',) authentication_classes = (JSONWebTokenAuthentication,) permission_classes = (IsAuthenticated,) filter_fields = ({filter_fields}) search_fields = ({search_fields})
[ 2, 2488, 7575, 220, 220, 220, 1058, 1391, 2435, 92, 198, 2, 2488, 13838, 220, 1058, 2438, 62, 8612, 1352, 198, 198, 6738, 1334, 62, 30604, 13, 1177, 28709, 1330, 9104, 7680, 7248, 198, 6738, 1334, 62, 30604, 13, 8612, 873, 1330, 734...
3.35514
428
# coding=utf-8 """ Send a value of 1 as a heartbeat every time this collector is invoked. #### Dependencies None #### Usage Add the collector config as : enabled = True path = netuitive Metrics are collected as : - metrics.heartbeat Netuitive Change History ======================== DVG 2016/11/14 Initial version. """ import diamond.collector from diamond.utils.config import load_config as load_server_config try: import netuitive except ImportError: netuitive = None
[ 2, 19617, 28, 40477, 12, 23, 198, 198, 37811, 198, 25206, 257, 1988, 286, 352, 355, 257, 36051, 790, 640, 428, 22967, 318, 24399, 13, 198, 198, 4242, 37947, 3976, 198, 14202, 198, 198, 4242, 29566, 198, 4550, 262, 22967, 4566, 355, ...
3.287582
153
import numpy as np import pandas as pd import sys markets = ["hangseng", "dax", "ftse", "sp", "nikkei"] market = markets[int(sys.argv[1])-1] # read GD data file dat = pd.read_csv("./num_res/{}.GD.csv".format(market)) # split into two experiments exp1_GD = dat[dat.columns[:5]] exp2_GD = dat[dat.columns[5:]] # calculate statistics stat1_GD = pd.DataFrame([exp1_GD.min(), exp1_GD.median(), exp1_GD.std()]) stat1_GD.index = ["Best", "Median", "Std."] stat2_GD = pd.DataFrame([exp2_GD.min(), exp2_GD.median(), exp2_GD.std()]) stat2_GD.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_GD = stat1_GD.loc["Median"].sort_values() best1_GD = list(meds1_GD.index[:2]) meds2_GD = stat2_GD.loc["Median"].sort_values() best2_GD = list(meds2_GD.index[:2]) print("{}.GD:".format(market), best1_GD[0], best1_GD[1]) # print("{}.GD:".format(market), best2_GD[0], best2_GD[1]) # TODO: check error # read Spacing data file dat = pd.read_csv("./num_res/{}.Spacing.csv".format(market)) # split into two experiments exp1_Spacing = dat[dat.columns[:5]] exp2_Spacing = dat[dat.columns[5:]] # calculate statistics stat1_Spacing = pd.DataFrame( [exp1_Spacing.min(), exp1_Spacing.median(), exp1_Spacing.std()]) stat1_Spacing.index = ["Best", "Median", "Std."] stat2_Spacing = pd.DataFrame( [exp2_Spacing.min(), exp2_Spacing.median(), exp2_Spacing.std()]) stat2_Spacing.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_Spacing = stat1_Spacing.loc["Median"].sort_values() best1_Spacing = list(meds1_Spacing.index[:2]) meds2_Spacing = stat2_Spacing.loc["Median"].sort_values() best2_Spacing = list(meds2_Spacing.index[:2]) print("{}.Spacing:".format(market), best1_Spacing[0], best1_Spacing[1]) # print("{}.Spacing:".format(market), best2_Spacing[0], best2_Spacing[1]) # TODO: check error # read MaxSpread data file dat = pd.read_csv("./num_res/{}.MaxSpread.csv".format(market)) # split into two experiments exp1_MaxSpread = dat[dat.columns[:5]] exp2_MaxSpread = dat[dat.columns[5:]] # calculate statistics stat1_MaxSpread = pd.DataFrame( [exp1_MaxSpread.max(), exp1_MaxSpread.median(), exp1_MaxSpread.std()]) stat1_MaxSpread.index = ["Best", "Median", "Std."] stat2_MaxSpread = pd.DataFrame( [exp2_MaxSpread.max(), exp2_MaxSpread.median(), exp2_MaxSpread.std()]) stat2_MaxSpread.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_MaxSpread = stat1_MaxSpread.loc["Median"].sort_values(ascending=False) best1_MaxSpread = list(meds1_MaxSpread.index[:2]) meds2_MaxSpread = stat2_MaxSpread.loc["Median"].sort_values(ascending=False) best2_MaxSpread = list(meds2_MaxSpread.index[:2]) print("{}.MaxSpread:".format(market), best1_MaxSpread[0], best1_MaxSpread[1]) # print("{}.MaxSpread:".format(market), best2_MaxSpread[0], best2_MaxSpread[1]) # TODO: check error # read Delta data file dat = pd.read_csv("./num_res/{}.Delta.csv".format(market)) # split into two experiments exp1_Delta = dat[dat.columns[:5]] exp2_Delta = dat[dat.columns[5:]] # calculate statistics stat1_Delta = pd.DataFrame( [exp1_Delta.min(), exp1_Delta.median(), exp1_Delta.std()]) stat1_Delta.index = ["Best", "Median", "Std."] stat2_Delta = pd.DataFrame( [exp2_Delta.min(), exp2_Delta.median(), exp2_Delta.std()]) stat2_Delta.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_Delta = stat1_Delta.loc["Median"].sort_values() best1_Delta = list(meds1_Delta.index[:2]) meds2_Delta = stat2_Delta.loc["Median"].sort_values() best2_Delta = list(meds2_Delta.index[:2]) print("{}.Delta:".format(market), best1_Delta[0], best1_Delta[1]) # print("{}.Delta:".format(market), best2_Delta[0], best2_Delta[1]) # TODO: check error # read IGD data file dat = pd.read_csv("./num_res/{}.IGD.csv".format(market)) # split into two experiments exp1_IGD = dat[dat.columns[:5]] exp2_IGD = dat[dat.columns[5:]] # calculate statistics stat1_IGD = pd.DataFrame([exp1_IGD.min(), exp1_IGD.median(), exp1_IGD.std()]) stat1_IGD.index = ["Best", "Median", "Std."] stat2_IGD = pd.DataFrame([exp2_IGD.min(), exp2_IGD.median(), exp2_IGD.std()]) stat2_IGD.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_IGD = stat1_IGD.loc["Median"].sort_values() best1_IGD = list(meds1_IGD.index[:2]) meds2_IGD = stat2_IGD.loc["Median"].sort_values() best2_IGD = list(meds2_IGD.index[:2]) print("{}.IGD:".format(market), best1_IGD[0], best1_IGD[1]) # print("{}.IGD:".format(market), best2_IGD[0], best2_IGD[1]) # TODO: check error # read Hypervolume data file dat = pd.read_csv("./num_res/{}.Hypervolume.csv".format(market)) # split into two experiments exp1_Hypervolume = dat[dat.columns[:5]] exp2_Hypervolume = dat[dat.columns[5:]] # calculate statistics stat1_Hypervolume = pd.DataFrame( [exp1_Hypervolume.max(), exp1_Hypervolume.median(), exp1_Hypervolume.std()]) stat1_Hypervolume.index = ["Best", "Median", "Std."] stat2_Hypervolume = pd.DataFrame( [exp2_Hypervolume.max(), exp2_Hypervolume.median(), exp2_Hypervolume.std()]) stat2_Hypervolume.index = ["Best", "Median", "Std."] # find best and second best algorithm meds1_Hypervolume = stat1_Hypervolume.loc["Median"].sort_values( ascending=False) best1_Hypervolume = list(meds1_Hypervolume.index[:2]) meds2_Hypervolume = stat2_Hypervolume.loc["Median"].sort_values( ascending=False) best2_Hypervolume = list(meds2_Hypervolume.index[:2]) print("{}.Hypervolume:".format(market), best1_Hypervolume[0], best1_Hypervolume[1]) # print("{}.Hypervolume:".format(market), # best2_Hypervolume[0], best2_Hypervolume[1]) # TODO: check error print("{}\n----------------------------------------------".format(market)) pd.options.display.float_format = '{:.2e}'.format stat1_overall = pd.concat( [stat1_GD, stat1_Spacing, stat1_MaxSpread, stat1_Delta, stat1_IGD, stat1_Hypervolume]) stat2_overall = pd.concat( [stat2_GD, stat2_Spacing, stat2_MaxSpread, stat2_Delta, stat2_IGD, stat2_Hypervolume]) arrays = [["GD", "GD", "GD", "Spacing", "Spacing", "Spacing", "MaxSpread", "MaxSpread", "MaxSpread", "Delta", "Delta", "Delta", "IGD", "IGD", "IGD", "Hypervolume", "Hypervolume", "Hypervolume"], stat1_overall.index ] index = pd.MultiIndex.from_arrays(arrays, names=["Metric", ""]) stat1_overall.index = index stat2_overall.index = index print(stat1_overall) print("----------------------------------------------") print(stat2_overall)
[ 11748, 299, 32152, 355, 45941, 201, 198, 11748, 19798, 292, 355, 279, 67, 201, 198, 11748, 25064, 201, 198, 201, 198, 34162, 796, 14631, 71, 27725, 1516, 1600, 366, 67, 897, 1600, 366, 701, 325, 1600, 366, 2777, 1600, 366, 17187, 365,...
2.388346
2,763
""" Functions are useful untilities for SITperturb experiments Notes ----- Author : Zachary Labe Date : 13 August 2017 Usage ----- [1] calcDecJan(varx,vary,lat,lon,level,levsq) [2] calcDecJanFeb(varx,vary,lat,lon,level,levsq) [3] calc_indttest(varx,vary) [4] calc_weightedAve(var,lats) [5] calc_spatialCorr(varx,vary,lats,lons,weight) [6] calc_RMSE(varx,vary,lats,lons,weight) [7] calc_spatialCorrHeight(varx,vary,lats,lons,weight) [8] calc_spatialCorrHeightLev(varx,vary,lats,lons,weight,levelq) """ def calcDecJan(varx,vary,lat,lon,level,levsq): """ Function calculates average for December-January Parameters ---------- varx : 4d array or 5d array [year,month,lat,lon] or [year,month,lev,lat,lon] vary : 4d array or 5d array [year,month,lat,lon] or [year,month,lev,lat,lon] lat : 1d numpy array latitudes lon : 1d numpy array longitudes level : string Height of variable (surface or profile) levsq : integer number of levels Returns ------- varx_dj : 3d array or 4d array [year,lat,lon] or [year,lev,lat,lon] vary_dj : 3d array [year,lat,lon] or [year,lev,lat,lon] Usage ----- varx_dj,vary_dj = calcDecJan(varx,vary,lat,lon,level,levsq) """ print('\n>>> Using calcDecJan function!') ### Import modules import numpy as np ### Reshape for 3d variables if level == 'surface': varxravel = np.reshape(varx.copy(), (int(varx.shape[0]*12), int(lat.shape[0]),int(lon.shape[0]))) varyravel = np.reshape(vary.copy(), (int(vary.shape[0]*12), int(lat.shape[0]),int(lon.shape[0]))) varx_dj = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0])) vary_dj = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) ) for i in range(0,varxravel.shape[0]-12,12): counter = 0 if i >= 12: counter = i//12 djappendh = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:]) djappendf = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:]) varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh, (2,int(lat.shape[0]),int(lon.shape[0]))), axis=0) vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf, (2,int(lat.shape[0]),int(lon.shape[0]))), axis=0) ### Reshape for 4d variables elif level == 'profile': varxravel = np.reshape(varx.copy(), (int(varx.shape[0]*12.),levsq, int(lat.shape[0]),int(lon.shape[0]))) varyravel = np.reshape(vary.copy(), (int(vary.shape[0]*12.),levsq, int(lat.shape[0]),int(lon.shape[0]))) varx_dj = np.empty((int(varx.shape[0]-1),levsq, int(lat.shape[0]),int(lon.shape[0]))) vary_dj = np.empty((int(vary.shape[0]-1),levsq, int(lat.shape[0]),int(lon.shape[0])) ) for i in range(0,varxravel.shape[0]-12,12): counter = 0 if i >= 12: counter = i//12 djappendh = np.append(varxravel[11+i,:,:,:], varxravel[12+i,:,:,:]) djappendf = np.append(varyravel[11+i,:,:,:], varyravel[12+i,:,:,:]) varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh, (2,levsq,int(lat.shape[0]), int(lon.shape[0]))),axis=0) vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf, (2,levsq,int(lat.shape[0]), int(lon.shape[0]))),axis=0) else: print(ValueError('Selected wrong height - (surface or profile!)!')) print('Completed: Organized data by months (ON,DJ,FM)!') print('*Completed: Finished calcDecJan function!') return varx_dj,vary_dj ############################################################################### ############################################################################### ############################################################################### def calcDecJanFeb(varx,vary,lat,lon,level,levsq): """ Function calculates average for December-January-February Parameters ---------- varx : 4d array or 5d array [year,month,lat,lon] or [year,month,lev,lat,lon] vary : 4d array or 5d array [year,month,lat,lon] or [year,month,lev,lat,lon] lat : 1d numpy array latitudes lon : 1d numpy array longitudes level : string Height of variable (surface or profile) levsq : integer number of levels Returns ------- varx_djf : 3d array or 4d array [year,lat,lon] or [year,lev,lat,lon] vary_djf : 3d array [year,lat,lon] or [year,lev,lat,lon] Usage ----- varx_djf,vary_djf = calcDecJanFeb(varx,vary,lat,lon,level,levsq) """ print('\n>>> Using calcDecJan function!') ### Import modules import numpy as np ### Reshape for 3d variables if level == 'surface': varxravel = np.reshape(varx.copy(), (int(varx.shape[0]*12), int(lat.shape[0]),int(lon.shape[0]))) varyravel = np.reshape(vary.copy(), (int(vary.shape[0]*12), int(lat.shape[0]),int(lon.shape[0]))) varx_djf = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0])) vary_djf = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) ) for i in range(0,varxravel.shape[0]-12,12): counter = 0 if i >= 12: counter = i//12 djfappendh1 = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:]) djfappendf1 = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:]) djfappendh = np.append(djfappendh1,varxravel[13+i,:,:]) djfappendf = np.append(djfappendf1,varyravel[13+i,:,:]) varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh, (3,int(lat.shape[0]),int(lon.shape[0]))), axis=0) vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf, (3,int(lat.shape[0]),int(lon.shape[0]))), axis=0) ### Reshape for 4d variables elif level == 'profile': varxravel = np.reshape(varx.copy(), (int(varx.shape[0]*12.),levsq, int(lat.shape[0]),int(lon.shape[0]))) varyravel = np.reshape(vary.copy(), (int(vary.shape[0]*12.),levsq, int(lat.shape[0]),int(lon.shape[0]))) varx_djf = np.empty((int(varx.shape[0]-1),levsq, int(lat.shape[0]),int(lon.shape[0]))) vary_djf = np.empty((int(vary.shape[0]-1),levsq, int(lat.shape[0]),int(lon.shape[0])) ) for i in range(0,varxravel.shape[0]-12,12): counter = 0 if i >= 12: counter = i//12 djfappendh1 = np.append(varxravel[11+i,:,:,:], varxravel[12+i,:,:,:]) djfappendf1 = np.append(varyravel[11+i,:,:,:], varyravel[12+i,:,:,:]) djfappendh = np.append(djfappendh1, varxravel[13+i,:,:,:]) djfappendf = np.append(djfappendf1, varyravel[13+i,:,:,:]) varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh, (3,levsq,int(lat.shape[0]), int(lon.shape[0]))),axis=0) vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf, (3,levsq,int(lat.shape[0]), int(lon.shape[0]))),axis=0) else: print(ValueError('Selected wrong height - (surface or profile!)!')) print('Completed: Organized data by months (DJF)!') print('*Completed: Finished calcDecJanFeb function!') return varx_djf,vary_djf ############################################################################### ############################################################################### ############################################################################### def calc_indttest(varx,vary): """ Function calculates statistical difference for 2 independent sample t-test Parameters ---------- varx : 3d array vary : 3d array Returns ------- stat = calculated t-statistic pvalue = two-tailed p-value Usage ----- stat,pvalue = calc_ttest(varx,vary) """ print('\n>>> Using calc_ttest function!') ### Import modules import numpy as np import scipy.stats as sts ### 2-independent sample t-test stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit') ### Significant at 95% confidence level pvalue[np.where(pvalue >= 0.05)] = np.nan pvalue[np.where(pvalue < 0.05)] = 1. print('*Completed: Finished calc_ttest function!') return stat,pvalue ############################################################################### ############################################################################### ############################################################################### def calc_weightedAve(var,lats): """ Area weights sit array 5d [ens,year,month,lat,lon] into [ens,year,month] Parameters ---------- var : 5d,4d,3d array of a gridded variable lats : 2d array of latitudes Returns ------- meanvar : weighted average for 3d,2d,1d array Usage ----- meanvar = calc_weightedAve(var,lats) """ print('\n>>> Using calc_weightedAve function!') ### Import modules import numpy as np ### Calculate weighted average for various dimensional arrays if var.ndim == 5: meanvar = np.empty((var.shape[0],var.shape[1],var.shape[2])) for ens in range(var.shape[0]): for i in range(var.shape[1]): for j in range(var.shape[2]): varq = var[ens,i,j,:,:] mask = np.isfinite(varq) & np.isfinite(lats) varmask = varq[mask] areamask = np.cos(np.deg2rad(lats[mask])) meanvar[ens,i,j] = np.nansum(varmask*areamask) \ /np.sum(areamask) elif var.ndim == 4: meanvar = np.empty((var.shape[0],var.shape[1])) for i in range(var.shape[0]): for j in range(var.shape[1]): varq = var[i,j,:,:] mask = np.isfinite(varq) & np.isfinite(lats) varmask = varq[mask] areamask = np.cos(np.deg2rad(lats[mask])) meanvar[i,j] = np.nansum(varmask*areamask)/np.sum(areamask) elif var.ndim == 3: meanvar = np.empty((var.shape[0])) for i in range(var.shape[0]): varq = var[i,:,:] mask = np.isfinite(varq) & np.isfinite(lats) varmask = varq[mask] areamask = np.cos(np.deg2rad(lats[mask])) meanvar[i] = np.nansum(varmask*areamask)/np.sum(areamask) elif var.ndim == 2: meanvar = np.empty((var.shape[0])) varq = var[:,:] mask = np.isfinite(varq) & np.isfinite(lats) varmask = varq[mask] areamask = np.cos(np.deg2rad(lats[mask])) meanvar = np.nansum(varmask*areamask)/np.sum(areamask) else: print(ValueError('Variable has the wrong dimensions!')) print('Completed: Weighted variable average!') print('*Completed: Finished calc_weightedAve function!') return meanvar ############################################################################### ############################################################################### ############################################################################### def calc_spatialCorr(varx,vary,lats,lons,weight): """ Calculates spatial correlation from pearson correlation coefficient Parameters ---------- varx : 2d array vary : 2d array lats : 1d array lons : 1d array of latitude weight : string (yes or no) Returns ------- corrcoef : 1d array of correlation coefficient (pearson r) Usage ----- corrcoef = calc_spatialCorr(varx,vary,lats,lons) """ print('\n>>> Using calc_spatialCorr function!') ### Import modules import numpy as np if weight == 'yes': # Computed weighted correlation coefficient ### mask mask = 'yes' if mask == 'yes': latq = np.where(lats > 40)[0] lats = lats[latq] varx = varx[latq,:] vary = vary[latq,:] print('MASKING LATITUDES!') ### Create 2d meshgrid for weights lon2,lat2 = np.meshgrid(lons,lats) ### Create 2d array of weights based on latitude gw = np.cos(np.deg2rad(lat2)) def m(x, w): """Weighted Mean""" wave = np.sum(x * w) / np.sum(w) print('Completed: Computed weighted average!') return wave def cov(x, y, w): """Weighted Covariance""" wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w) print('Completed: Computed weighted covariance!') return wcov def corr(x, y, w): """Weighted Correlation""" wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w)) print('Completed: Computed weighted correlation!') return wcor corrcoef = corr(varx,vary,gw) elif weight == 'no': ### Correlation coefficient from numpy function (not weighted) corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1] print('Completed: Computed NON-weighted correlation!') else: ValueError('Wrong weighted arguement in function!') print('*Completed: Finished calc_SpatialCorr function!') return corrcoef ############################################################################### ############################################################################### ############################################################################### def calc_RMSE(varx,vary,lats,lons,weight): """ Calculates root mean square weighted average Parameters ---------- varx : 2d array vary : 2d array lons : 1d array of latitude weight : string (yes or no) Returns ------- rmse : 1d array Usage ----- rmse = calc_RMSE(varx,vary,lats,lons) """ print('\n>>> Using calc_RMSE function!') ### Import modules import numpy as np from sklearn.metrics import mean_squared_error if weight == 'yes': # Computed weighted correlation coefficient ### mask mask = 'yes' if mask == 'yes': latq = np.where(lats > 40)[0] lats = lats[latq] varx = varx[latq,:] vary = vary[latq,:] print('MASKING LATITUDES!') ### Create 2d meshgrid for weights lon2,lat2 = np.meshgrid(lons,lats) ### Create 2d array of weights based on latitude gw = np.cos(np.deg2rad(lat2)) ### Calculate rmse sq_err = (varx - vary)**2 rmse = np.sqrt((np.sum(sq_err*gw))/np.sum(gw)) elif weight == 'no': ### Root mean square error from sklearn (not weighted) rmse = np.sqrt(mean_squared_error(varx.ravel(),vary.ravel())) print('Completed: Computed NON-weighted correlation!') else: ValueError('Wrong weighted arguement in function!') print('*Completed: Finished calc_RMSE function!') return rmse ############################################################################### ############################################################################### ############################################################################### def calc_spatialCorrHeight(varx,vary,levs,lons,weight): """ Calculates spatial correlation from pearson correlation coefficient for grids over vertical height (17 pressure coordinate levels) Parameters ---------- varx : 2d array vary : 2d array levs : 1d array of levels lons : 1d array of latitude weight : string (yes or no) Returns ------- corrcoef : 1d array of correlation coefficient (pearson r) Usage ----- corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons) """ print('\n>>> Using calc_spatialCorrHeight function!') ### Import modules import numpy as np if weight == 'yes': # Computed weighted correlation coefficient ### Create 2d meshgrid for weights lon2,lev2 = np.meshgrid(lons,levs) ### Create 2d array of weights based on latitude gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5, 0.5,0.5,0.5,0.7,0.7,0.7,1.]) gw,gw2 = np.meshgrid(lons,gwq) def m(x, w): """Weighted Mean""" wave = np.sum(x * w) / np.sum(w) print('Completed: Computed weighted average (17 P Levels)!') return wave def cov(x, y, w): """Weighted Covariance""" wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w) print('Completed: Computed weighted covariance (17 P Levels)!') return wcov def corr(x, y, w): """Weighted Correlation""" wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w)) print('Completed: Computed weighted correlation (17 P Levels)!') return wcor corrcoef = corr(varx,vary,gw) elif weight == 'no': ### Correlation coefficient from numpy function (not weighted) corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1] print('Completed: Computed NON-weighted correlation!') else: ValueError('Wrong weighted argument in function!') print('*Completed: Finished calc_SpatialCorrHeight function!') return corrcoef ############################################################################### ############################################################################### ############################################################################### def calc_spatialCorrHeightLev(varx,vary,levs,lons,weight,levelq): """ Calculates spatial correlation from pearson correlation coefficient for grids over vertical height (17 pressure coordinate levels). Change the weighting for different level correlations Parameters ---------- varx : 2d array vary : 2d array levs : 1d array of levels lons : 1d array of latitude weight : string (yes or no) levelq : string (all, tropo, strato) Returns ------- corrcoef : 1d array of correlation coefficient (pearson r) Usage ----- corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons,levels) """ print('\n>>> Using calc_spatialCorrHeightLev function!') ### Import modules import numpy as np if weight == 'yes': # Computed weighted correlation coefficient ### Create 2d meshgrid for weights lon2,lev2 = np.meshgrid(lons,levs) if levelq == 'all': ### Create 2d array of weights based on latitude gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5, 0.5,0.5,0.5,0.7,0.7,0.7,1.]) gw,gw2 = np.meshgrid(lons,gwq) elif levelq == 'tropo': gwq = np.array([1.0,1.0,1.0,1.0,0.5,0.5,0.5,0.2,0.2,0.,0.,0., 0.,0.,0.,0.,0.]) gw,gw2 = np.meshgrid(lons,gwq) elif levelq == 'strato': gwq = np.array([0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.5,1.,1.,1.,1. ,1.,1.]) gw,gw2 = np.meshgrid(lons,gwq) def m(x, w): """Weighted Mean""" wave = np.sum(x * w) / np.sum(w) print('Completed: Computed weighted average (17 P Levels)!') return wave def cov(x, y, w): """Weighted Covariance""" wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w) print('Completed: Computed weighted covariance (17 P Levels)!') return wcov def corr(x, y, w): """Weighted Correlation""" wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w)) print('Completed: Computed weighted correlation (17 P Levels)!') return wcor corrcoef = corr(varx,vary,gw) elif weight == 'no': ### Correlation coefficient from numpy function (not weighted) corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1] print('Completed: Computed NON-weighted correlation!') else: ValueError('Wrong weighted argument in function!') print('*Completed: Finished calc_SpatialCorrHeightLev function!') return corrcoef
[ 37811, 198, 24629, 2733, 389, 4465, 1566, 871, 329, 311, 2043, 11766, 5945, 10256, 198, 220, 198, 16130, 198, 30934, 198, 220, 220, 220, 6434, 1058, 18825, 560, 3498, 68, 198, 220, 220, 220, 7536, 220, 220, 1058, 1511, 2932, 2177, 198...
1.961821
11,577
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import pytest import tvm from tvm import tir from tvm.script import ty # pylint: disable=no-member,invalid-name,unused-variable # pylint: enable=no-member,invalid-name,unused-variable def test_compute_inline_elementwise(): sch = tir.Schedule(elementwise, debug_mode=True) block_b = sch.get_block("B") block_c = sch.get_block("C") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) assert sch.get(block_c).name_hint == "C" def test_compute_inline_under_loop(): sch = tir.Schedule(elementwise_under_loop, debug_mode=True) block_b = sch.get_block("B") block_c = sch.get_block("C") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) assert sch.get(block_c).name_hint == "C" def test_compute_inline_as_dce(): sch = tir.Schedule(elementwise_standalone, debug_mode=True) block_b = sch.get_block("B") block_c = sch.get_block("C") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_standalone_dce, sch.mod["main"]) assert sch.get(block_c).name_hint == "C" def test_compute_inline_multi_consumer(): sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True) block_b = sch.get_block("B") block_c = sch.get_block("C") block_d = sch.get_block("D") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_multi_consumer_inlined, sch.mod["main"]) assert sch.get(block_c).name_hint == "C" assert sch.get(block_d).name_hint == "D" def test_compute_inline_fail_multi_writer(): sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True, error_render_level="detail") block_b = sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block_b) def test_reverse_compute_inline_elementwise(): sch = tir.Schedule(elementwise, debug_mode=True) block_b = sch.get_block("B") block_c = sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) assert sch.get(block_b).name_hint == "B" def test_reverse_compute_inline_under_loop(): sch = tir.Schedule(elementwise_under_loop, debug_mode=True) block_b = sch.get_block("B") block_c = sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) assert sch.get(block_b).name_hint == "B" def test_reverse_compute_inline_fail_as_dce(): sch = tir.Schedule(elementwise_standalone, debug_mode=True) block_b = sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block_b) def test_reverse_compute_inline_fail_multi_producer(): sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mode=True) block_d = sch.get_block("D") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block_d) def test_reverse_compute_inline_fail_multi_reader(): sch = tir.Schedule(fail_multi_reader_writer, debug_mode=True) block_c = sch.get_block("C") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block_c) def test_reverse_compute_multi_reverse_loads(): sch = tir.Schedule(elementwise_multi_reverse_loads, debug_mode=True) block_c = sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal(elementwise_multi_reverse_loads_inlined, sch.mod["main"]) def test_reverse_compute_fail_multi_reverse_loads(): sch = tir.Schedule(elementwise_multi_loads, debug_mode=True) block_c = sch.get_block("C") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block_c) def test_opaque_access_load(): sch = tir.Schedule(opaque_access_load, debug_mode=True) block_b = sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block_b) def test_opaque_access_store(): sch = tir.Schedule(opaque_access_store, debug_mode=True) block_b = sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block_b) def test_buffer_matched(): sch = tir.Schedule(buffer_matched, debug_mode=True) block_b = sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block_b) def test_compute_inline_predicate(): sch = tir.Schedule(elementwise_predicate, debug_mode=True) block_b = sch.get_block("B") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_predicate_inlined, sch.mod["main"]) def test_compute_inline_multi_loads(): sch = tir.Schedule(elementwise_multi_loads, debug_mode=True) block_b = sch.get_block("B") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_multi_loads_inlined, sch.mod["main"]) if __name__ == "__main__": test_compute_inline_elementwise() test_compute_inline_under_loop() test_compute_inline_as_dce() test_compute_inline_multi_consumer() test_compute_inline_fail_multi_writer() test_reverse_compute_inline_elementwise() test_reverse_compute_inline_under_loop() test_reverse_compute_inline_fail_as_dce() test_reverse_compute_inline_fail_multi_producer() test_reverse_compute_inline_fail_multi_reader() test_reverse_compute_multi_reverse_loads() test_reverse_compute_fail_multi_reverse_loads() test_opaque_access_load() test_opaque_access_store() test_buffer_matched() test_compute_inline_predicate() test_compute_inline_multi_loads()
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
2.578299
2,516
from flask import Flask, Response, request, redirect import subprocess import tempfile import json import yaml import signal import threading import time import copy app = Flask(__name__) jobs_lock = threading.Lock() jobs = [] def logspooler(job): with open(job.logname, "r") as f: while True: r = f.read(4096) if r: yield r else: with job.updatelock: if job.status["state"] != "Running": break time.sleep(1) if __name__ == "__main__": # app.debug = True app.run()
[ 6738, 42903, 1330, 46947, 11, 18261, 11, 2581, 11, 18941, 198, 11748, 850, 14681, 198, 11748, 20218, 7753, 198, 11748, 33918, 198, 11748, 331, 43695, 198, 11748, 6737, 198, 11748, 4704, 278, 198, 11748, 640, 198, 11748, 4866, 198, 198, ...
2.045902
305
__source__ = 'https://leetcode.com/problems/reverse-bits/description/' # https://github.com/kamyu104/LeetCode/blob/master/Python/reverse-bits.py # Time : O(n) # Space: O(1) # Bit Manipulation # # Description: Leetcode # 190. Reverse Bits # # Reverse bits of a given 32 bits unsigned integer. # # For example, given input 43261596 (represented in binary as 00000010100101000001111010011100), # return 964176192 (represented in binary as 00111001011110000010100101000000). # # Follow up: # If this function is called many times, how would you optimize it? # # Companies # Apple Airbnb # Related Topics # Bit Manipulation # Similar Questions # Number of 1 Bits # import unittest if __name__ == '__main__': unittest.main() Java = ''' # Thought: # 1ms 100% class Solution { // you need treat n as an unsigned value public int reverseBits(int n) { int ret = 0; for (int i = 0; i < 32; i++) { if ((n & 1) != 0) { ret |= 1; //same as // res += n & 1 } n >>>= 1; // padding 0 on the left side if (i < 31) { // CATCH: for last digit, don't shift! ret <<= 1; } } return ret; } } We first intitialize result to 0. We then iterate from 0 to 31 (an integer has 32 bits). In each iteration: We first shift result to the left by 1 bit. Then, if the last digit of input n is 1, we add 1 to result. To find the last digit of n, we just do: (n & 1) Example, if n=5 (101), n&1 = 101 & 001 = 001 = 1; however, if n = 2 (10), n&1 = 10 & 01 = 0). Finally, we update n by shifting it to the right by 1 (n >>= 1) At the end of the iteration, we return result. Example, if input n = 13 (represented in binary as 0000_0000_0000_0000_0000_0000_0000_1101, the "_" is for readability), calling reverseBits(13) should return: 1011_0000_0000_0000_0000_0000_0000_0000 Here is how our algorithm would work for input n = 13: Initially, result = 0 = 0000_0000_0000_0000_0000_0000_0000_0000, n = 13 = 0000_0000_0000_0000_0000_0000_0000_1101 Starting for loop: i = 0: result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0000. n&1 = 0000_0000_0000_0000_0000_0000_0000_1101 & 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0001 = 1 therefore result = result + 1 = 0000_0000_0000_0000_0000_0000_0000_0000 + 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0001 = 1 We right shift n by 1 (n >>= 1) to get: n = 0000_0000_0000_0000_0000_0000_0000_0110. We then go to the next iteration. i = 1: result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0010; n&1 = 0000_0000_0000_0000_0000_0000_0000_0110 & 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0000 = 0; therefore we don't increment result. We right shift n by 1 (n >>= 1) to get: n = 0000_0000_0000_0000_0000_0000_0000_0011. We then go to the next iteration. i = 2: result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_0100. n&1 = 0000_0000_0000_0000_0000_0000_0000_0011 & 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0001 = 1 therefore result = result + 1 = 0000_0000_0000_0000_0000_0000_0000_0100 + 0000_0000_0000_0000_0000_0000_0000_0001 = result = 0000_0000_0000_0000_0000_0000_0000_0101 We right shift n by 1 to get: n = 0000_0000_0000_0000_0000_0000_0000_0001. We then go to the next iteration. i = 3: result = result << 1 = 0000_0000_0000_0000_0000_0000_0000_1010. n&1 = 0000_0000_0000_0000_0000_0000_0000_0001 & 0000_0000_0000_0000_0000_0000_0000_0001 = 0000_0000_0000_0000_0000_0000_0000_0001 = 1 therefore result = result + 1 = = 0000_0000_0000_0000_0000_0000_0000_1011 We right shift n by 1 to get: n = 0000_0000_0000_0000_0000_0000_0000_0000 = 0. Now, from here to the end of the iteration, n is 0, so (n&1) will always be 0 and n >>=1 will not change n. The only change will be for result <<=1, i.e. shifting result to the left by 1 digit. Since there we have i=4 to i = 31 iterations left, this will result in padding 28 0's to the right of result. i.e at the end, we get result = 1011_0000_0000_0000_0000_0000_0000_0000 This is exactly what we expected to get # 1ms 100% class Solution { // you need treat n as an unsigned value public int reverseBits(int n) { if (n == 0) return 0; int result = 0; for (int i = 0; i < 32; i++) { result <<= 1; if ((n & 1) == 1) result++; n >>= 1; } return result; } } # 1ms 100% class Solution { // you need treat n as an unsigned value public int reverseBits(int n) { n = ((n & 0x55555555) << 1) | ((n & 0xAAAAAAAA) >>> 1); n = ((n & 0x33333333) << 2) | ((n & 0xCCCCCCCC) >>> 2); n = ((n & 0x0F0F0F0F) << 4) | ((n & 0xF0F0F0F0) >>> 4); n = ((n & 0x00FF00FF) << 8) | ((n & 0xFF00FF00) >>> 8); return (n >>> 16) | (n << 16); } } '''
[ 834, 10459, 834, 796, 705, 5450, 1378, 293, 316, 8189, 13, 785, 14, 1676, 22143, 14, 50188, 12, 9895, 14, 11213, 14, 6, 198, 2, 3740, 1378, 12567, 13, 785, 14, 74, 14814, 84, 13464, 14, 3123, 316, 10669, 14, 2436, 672, 14, 9866, ...
2.489394
1,980
"""Version information.""" # The following line *must* be the last in the module, exactly as formatted: __version__ = "0.16.1"
[ 37811, 14815, 1321, 526, 15931, 198, 198, 2, 383, 1708, 1627, 1635, 27238, 9, 307, 262, 938, 287, 262, 8265, 11, 3446, 355, 39559, 25, 198, 834, 9641, 834, 796, 366, 15, 13, 1433, 13, 16, 1, 198 ]
3.368421
38
from sys import argv from PyPDF2 import PdfFileReader, PdfFileWriter import re range_pattern = re.compile(r'(\d+)(\.\.|-)(\d+)') comma_pattern = re.compile('\d+(,\d+)*') if __name__ == '__main__': assert(len(argv) > 1), "usage examle:\npython3 selective_merge_pdf.py file1.pdf 1-3 file2.pdf 3,4,10 file1.pdf 50" assert(len(argv) % 2 == 1), "invalid arguments; supply page numbers after each pdf name" files_names = argv[1::2] pages_args = argv[2::2] pdf_writer = PdfFileWriter() for file_name, pages in zip(files_names, pages_args): pdf_reader = PdfFileReader(file_name) last_page_index = pdf_reader.getNumPages() pages = pages_args_to_array(pages) pages_to_add = list(filter(lambda i: i >= 0 and i <= last_page_index, pages)) for page in pages_to_add: pdf_writer.addPage(pdf_reader.getPage(page - 1)) with open("merged.pdf", 'wb') as out: pdf_writer.write(out)
[ 6738, 25064, 1330, 1822, 85, 198, 6738, 9485, 20456, 17, 1330, 350, 7568, 8979, 33634, 11, 350, 7568, 8979, 34379, 198, 11748, 302, 628, 198, 9521, 62, 33279, 796, 302, 13, 5589, 576, 7, 81, 6, 38016, 67, 10, 5769, 17405, 17405, 91,...
2.507003
357
import math from vp import geom_tools def horizon_error(ground_truth_horizon, detected_horizon, image_dims): """Calculates error in a detected horizon. This measures the max distance between the detected horizon line and the ground truth horizon line, within the image's x-axis, and normalized by image height. Args: ground_truth_horizon: Tuple with (slope, intercept) for the GT horizon line. detected_horizon: Tuple with (slope, intercept) for the detected horizon line. image_dims: Tuple of integers, (width, height) of the image, in pixels. Returns: Float, or None if a horizon is missing altogether. """ if ground_truth_horizon is None or detected_horizon is None: return None width, height = image_dims return max(abs(gt(0) - dt(0)), abs(gt(width) - dt(width))) / height def vp_direction_error(ground_truth_vps, detected_vps, image_dims): """Measures error in direction from center of detected vanishing points. Each detected VP is matched with its closest unclaimed ground truth VP. Args: ground_truth_vps: List of ground truth VP point tuples. detected_vps: List of detected VP point tuples. image_dims: Tuple of integers, (width, height) of the image, in pixels. Returns: List with float degrees of error for each ground truth VP. Error is None for missing VPs. """ principal_point = (image_dims[0] // 2, image_dims[1] // 2) point_pair_dists = [] for gt_vp in ground_truth_vps: for dt_vp in detected_vps: gt_angle = geom_tools.get_line_angle(( principal_point[0], principal_point[1], gt_vp[0], gt_vp[1])) dt_angle = geom_tools.get_line_angle(( principal_point[0], principal_point[1], dt_vp[0], dt_vp[1])) angle_diff = 180 - abs(abs(gt_angle - dt_angle) - 180) point_pair_dists.append((angle_diff, gt_vp, dt_vp)) point_pair_dists = sorted(point_pair_dists, key=lambda k: k[0]) gt_vp_to_error = {} seen_dt_vps = set() for distance, gt_vp, dt_vp in point_pair_dists: if gt_vp in gt_vp_to_error or dt_vp in seen_dt_vps: continue gt_vp_to_error[gt_vp] = distance seen_dt_vps.add(dt_vp) return [gt_vp_to_error.get(gt, None) for gt in ground_truth_vps] def location_accuracy_error(ground_truth_vps, detected_vps): """Measures average error in the location of detected vanishing points. "Missed" or "extra" VPs do not count against the score. Based on log distance of detected vp from ground truth vp. Args: ground_truth_vps: List of ground truth VP point tuples. detected_vps: List of detected VP point tuples. Returns: Float, error. """ if len(ground_truth_vps) == 0 or len(detected_vps) == 0: return 0 point_pair_dists = [] for gt_vp in ground_truth_vps: for dt_vp in detected_vps: distance = geom_tools.point_to_point_dist(gt_vp, dt_vp) point_pair_dists.append((distance, gt_vp, dt_vp)) sorted(point_pair_dists, key=lambda k: k[0]) seen_gt_vps = set() seen_dt_vps = set() total_error = 0 for distance, gt_vp, dt_vp in point_pair_dists: if gt_vp in seen_gt_vps or dt_vp in seen_dt_vps: continue seen_gt_vps.add(gt_vp) seen_dt_vps.add(dt_vp) if distance > 0: total_error += math.log(distance) return total_error / min(len(detected_vps), len(ground_truth_vps)) def num_model_detection_error(ground_truth_vps, detected_vps): """Measures error in the number of detected vanishing points. Returns: Integer, positive when there are too many VPs, negative when there are too few. """ return len(detected_vps) - len(ground_truth_vps)
[ 11748, 10688, 198, 198, 6738, 410, 79, 1330, 4903, 296, 62, 31391, 628, 198, 4299, 17810, 62, 18224, 7, 2833, 62, 35310, 62, 17899, 8637, 11, 12326, 62, 17899, 8637, 11, 2939, 62, 67, 12078, 2599, 198, 220, 220, 220, 37227, 9771, 31...
2.375077
1,629
import numpy as np import argparse import composition import os import json import torch from spinup.algos.pytorch.ppo.core import MLPActorCritic from spinup.algos.pytorch.ppo.ppo import ppo from spinup.utils.run_utils import setup_logger_kwargs from spinup.utils.mpi_tools import proc_id, num_procs if __name__ == '__main__': main()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 1822, 29572, 198, 11748, 11742, 198, 11748, 28686, 198, 11748, 33918, 198, 198, 11748, 28034, 198, 198, 6738, 7906, 929, 13, 14016, 418, 13, 9078, 13165, 354, 13, 16634, 13, 7295, 1330, 10373, ...
2.866667
120
"""igvm - The command line interface Copyright (c) 2017 InnoGames GmbH """ from __future__ import print_function from argparse import ArgumentParser, _SubParsersAction from logging import StreamHandler, root as root_logger import time from fabric.network import disconnect_all from igvm.commands import ( change_address, disk_set, evacuate, host_info, mem_set, vcpu_set, vm_build, vm_delete, vm_migrate, vm_rename, vm_restart, vm_start, vm_stop, vm_sync, vm_define, ) from igvm.libvirt import close_virtconns def parse_args(): top_parser = IGVMArgumentParser('igvm') top_parser.add_argument('--silent', '-s', action='count', default=0) top_parser.add_argument('--verbose', '-v', action='count', default=0) subparsers = top_parser.add_subparsers(help='Actions') subparser = subparsers.add_parser( 'build', description=vm_build.__doc__, ) subparser.set_defaults(func=vm_build) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( '--postboot', metavar='postboot_script', help='Run postboot_script on the guest after first boot', ) subparser.add_argument( '--skip-puppet', action='store_false', dest='run_puppet', help='Skip running puppet in chroot before powering up', ) subparser.add_argument( '--debug-puppet', action='store_true', help='Run puppet in debug mode', ) subparser.add_argument( '--ignore-reserved', dest='allow_reserved_hv', action='store_true', help='Allow building on a Host which has the state online_reserved', ) subparser.add_argument( '--rebuild', dest='rebuild', action='store_true', help='Rebuild already defined VM or build it if not defined', ) subparser.add_argument( '--soft-preferences', dest='soft_preferences', action='store_true', help='Overrules all preferences so that Hypervisors are not excluded. ' 'Use this if igvm fails to find a matching Hypervisor, but you ' 'are in urgent need to do it anyway. Hint: If igvm fails to find ' 'a matching Hypervisor something might be really wrong. Run igvm ' 'with --verbose to check why it fails finding a Hypervisor.', ) subparser = subparsers.add_parser( 'migrate', description=vm_migrate.__doc__, ) subparser.set_defaults(func=vm_migrate) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( 'hypervisor_hostname', nargs='?', default=None, help='Hostname of destination hypervisor', ) subparser.add_argument( '--run-puppet', action='store_true', help='Run puppet in chroot before powering up', ) subparser.add_argument( '--debug-puppet', action='store_true', help='Run puppet in debug mode', ) subparser.add_argument( '--offline', action='store_true', help='Force offline migration', ) subparser.add_argument( '--ignore-reserved', dest='allow_reserved_hv', action='store_true', help='Allow migration to a Host which has the state online_reserved', ) subparser.add_argument( '--offline-transport', default='drbd', choices=('drbd', 'netcat', 'xfs'), help=( 'Specify drbd (default), netcat or xfs transport to migrate ' 'disk image' ), ) subparser.add_argument( '--no-shutdown', action='store_true', help=( 'Don\'t shutdown VM during offline migration, igvm will wait for' ' operator to shut down VM for 24h.' ), ) subparser.add_argument( '--enforce-vm-env', dest='enforce_vm_env', action='store_true', help='Build or migrate VM only to a HV with the same environment of VM' ) subparser.add_argument( '--disk-size', dest='disk_size', type=int, help='Resize disk of migrated VM. Expects new size in GiB. ' 'Works only with --offline --offline-transport=xfs', ) subparser.add_argument( '--soft-preferences', dest='soft_preferences', action='store_true', help='Overrules all preferences so that Hypervisors are not excluded. ' 'Use this if igvm fails to find a matching Hypervisor, but you ' 'are in urgent need to do it anyway. Hint: If igvm fails to find ' 'a matching Hypervisor something might be really wrong. Run igvm ' 'with --verbose to check why it fails finding a Hypervisor.', ) subparser = subparsers.add_parser( 'change-address', description=disk_set.__doc__, ) subparser.set_defaults(func=change_address) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( 'new_address', help=( 'New IPv4 address of VM' ) ) subparser.add_argument( '--offline', action='store_true', help='Perform IP address change offline', ) subparser.add_argument( '--migrate', action='store_true', help='Migrate VM to new HV while changing IP address', ) subparser.add_argument( '--ignore-reserved', dest='allow_reserved_hv', action='store_true', help='Allow migration to a Host which has the state online_reserved', ) subparser.add_argument( '--offline-transport', default='drbd', help=( 'Specify drbd (default) or netcat transport to migrate disk image' ), ) subparser = subparsers.add_parser( 'disk-set', description=disk_set.__doc__, ) subparser.set_defaults(func=disk_set) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( 'size', help=( 'New disk size with an optional unit (default GiB). ' 'Can be specified relative with "+". Only integers are allowed' ) ) subparser = subparsers.add_parser( 'mem-set', description=mem_set.__doc__, ) subparser.set_defaults(func=mem_set) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( 'size', help=( 'New memory size with optional unit (default is MiB).' 'Only integers are allowed.' ), ) subparser.add_argument( '--offline', action='store_true', help='Shutdown VM, change memory, and restart VM', ) subparser = subparsers.add_parser( 'vcpu-set', description=vcpu_set.__doc__, ) subparser.set_defaults(func=vcpu_set) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( 'count', type=int, help='New number of CPUs', ) subparser.add_argument( '--offline', action='store_true', help='Shutdown VM, change CPUs, and restart VM', ) subparser = subparsers.add_parser( 'start', description=vm_start.__doc__, ) subparser.set_defaults(func=vm_start) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( '--unretire', nargs='?', const='maintenance', help='Unretire a VM, set it to given state, maintenance by default', ) subparser = subparsers.add_parser( 'stop', description=vm_stop.__doc__, ) subparser.set_defaults(func=vm_stop) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( '--force', action='store_true', help='Do not wait for guest to shutdown gracefully', ) subparser.add_argument( '--retire', action='store_true', help='Retire VM after stopping it', ) subparser = subparsers.add_parser( 'restart', description=vm_restart.__doc__, ) subparser.set_defaults(func=vm_restart) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( '--force', action='store_true', help='Do not wait for guest to shutdown gracefully', ) subparser.add_argument( '--no-redefine', action='store_true', help='Do not redefine the domain to use latest hypervisor settings', ) subparser = subparsers.add_parser( 'delete', description=vm_delete.__doc__, ) subparser.set_defaults(func=vm_delete) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( '--retire', action='store_true', help='Set VM state to "retired" on Serveradmin instead of deleting', ) subparser = subparsers.add_parser( 'info', description=host_info.__doc__, ) subparser.set_defaults(func=host_info) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser = subparsers.add_parser( 'sync', description=vm_sync.__doc__, ) subparser.set_defaults(func=vm_sync) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser = subparsers.add_parser( 'rename', description=vm_rename.__doc__, ) subparser.set_defaults(func=vm_rename) subparser.add_argument( 'vm_hostname', help='Hostname of the guest system', ) subparser.add_argument( 'new_hostname', help='New hostname', ) subparser.add_argument( '--offline', action='store_true', help='Shutdown VM, if running', ) subparser = subparsers.add_parser( 'evacuate', description=evacuate.__doc__, ) subparser.set_defaults(func=evacuate) subparser.add_argument( 'hv_hostname', help='Hostname of the hypervisor', ) subparser.add_argument( 'dst_hv_hostname', nargs='?', default=None, help='Hostname of destination hypervisor', ) subparser.add_argument( '--dry-run', action='store_true', help='Do not migrate but just print what would be done' ) subparser.add_argument( '--offline', nargs='*', help='Migrate VMs matching the given serveradmin function offline', ) subparser.add_argument( '--ignore-reserved', dest='allow_reserved_hv', action='store_true', help='Allow migrating to a host which has the state online_reserved', ) subparser.add_argument( '--soft-preferences', dest='soft_preferences', action='store_true', help='Overrules all preferences so that Hypervisors are not excluded. ' 'Use this if igvm fails to find a matching Hypervisor, but you ' 'are in urgent need to do it anyway. Hint: If igvm fails to find ' 'a matching Hypervisor something might be really wrong. Run igvm ' 'with --verbose to check why it fails finding a Hypervisor.', ) subparser = subparsers.add_parser( 'define', description=vm_define.__doc__, ) subparser.set_defaults(func=vm_define) subparser.add_argument('vm_hostname', help='Hostname of the guest system') return vars(top_parser.parse_args()) def main(): args = parse_args() configure_root_logger(args.pop('silent'), args.pop('verbose')) try: args.pop('func')(**args) finally: # Fabric requires the disconnect function to be called after every # use. We are also taking our chance to disconnect from # the hypervisors. disconnect_all() close_virtconns() # The underlying library of Fabric, Paramiko, raises an error, on # destruction right after the disconnect function is called. We are # sleeping for a little while to avoid this. time.sleep(0.1) def configure_root_logger(silent, verbose): root_logger.addHandler(IGVMLogHandler()) # We are summing up the silent and verbose arguments in here. It # is not really meaningful to use them both, but giving an error is not # better. See Python logging library documentation [1] for the levels. # Paramiko is overly verbose. We configure it for one level higher. # # [1] https://docs.python.org/library/logging.html#logging-levels level = 20 + (silent - verbose) * 10 root_logger.setLevel(level) root_logger.getChild('paramiko').setLevel(level + 10)
[ 37811, 328, 14761, 532, 383, 3141, 1627, 7071, 198, 198, 15269, 357, 66, 8, 2177, 554, 3919, 24474, 402, 2022, 39, 198, 37811, 198, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6738, 1822, 29572, 1330, 45751, 46677, 11, 48...
2.286207
5,793
import os, sys from os.path import dirname as d from os.path import abspath, join root_dir = d(d(d(abspath(__file__)))) sys.path.append(root_dir) from openprompt.data_utils.conditional_generation_dataset import PROCESSORS base_path = os.path.join(root_dir, "datasets/CondGen")
[ 11748, 28686, 11, 25064, 198, 6738, 28686, 13, 6978, 1330, 26672, 3672, 355, 288, 198, 6738, 28686, 13, 6978, 1330, 2352, 6978, 11, 4654, 198, 15763, 62, 15908, 796, 288, 7, 67, 7, 67, 7, 397, 2777, 776, 7, 834, 7753, 834, 35514, ...
2.752475
101
## @file # Standardized Error Hanlding infrastructures. # # Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR> # This program and the accompanying materials # are licensed and made available under the terms and conditions of the BSD License # which accompanies this distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # FILE_OPEN_FAILURE = 1 FILE_WRITE_FAILURE = 2 FILE_PARSE_FAILURE = 3 FILE_READ_FAILURE = 4 FILE_CREATE_FAILURE = 5 FILE_CHECKSUM_FAILURE = 6 FILE_COMPRESS_FAILURE = 7 FILE_DECOMPRESS_FAILURE = 8 FILE_MOVE_FAILURE = 9 FILE_DELETE_FAILURE = 10 FILE_COPY_FAILURE = 11 FILE_POSITIONING_FAILURE = 12 FILE_ALREADY_EXIST = 13 FILE_NOT_FOUND = 14 FILE_TYPE_MISMATCH = 15 FILE_CASE_MISMATCH = 16 FILE_DUPLICATED = 17 FILE_UNKNOWN_ERROR = 0x0FFF OPTION_UNKNOWN = 0x1000 OPTION_MISSING = 0x1001 OPTION_CONFLICT = 0x1002 OPTION_VALUE_INVALID = 0x1003 OPTION_DEPRECATED = 0x1004 OPTION_NOT_SUPPORTED = 0x1005 OPTION_UNKNOWN_ERROR = 0x1FFF PARAMETER_INVALID = 0x2000 PARAMETER_MISSING = 0x2001 PARAMETER_UNKNOWN_ERROR =0x2FFF FORMAT_INVALID = 0x3000 FORMAT_NOT_SUPPORTED = 0x3001 FORMAT_UNKNOWN = 0x3002 FORMAT_UNKNOWN_ERROR = 0x3FFF RESOURCE_NOT_AVAILABLE = 0x4000 RESOURCE_ALLOCATE_FAILURE = 0x4001 RESOURCE_FULL = 0x4002 RESOURCE_OVERFLOW = 0x4003 RESOURCE_UNDERRUN = 0x4004 RESOURCE_UNKNOWN_ERROR = 0x4FFF ATTRIBUTE_NOT_AVAILABLE = 0x5000 ATTRIBUTE_GET_FAILURE = 0x5001 ATTRIBUTE_SET_FAILURE = 0x5002 ATTRIBUTE_UPDATE_FAILURE = 0x5003 ATTRIBUTE_ACCESS_DENIED = 0x5004 ATTRIBUTE_UNKNOWN_ERROR = 0x5FFF IO_NOT_READY = 0x6000 IO_BUSY = 0x6001 IO_TIMEOUT = 0x6002 IO_UNKNOWN_ERROR = 0x6FFF COMMAND_FAILURE = 0x7000 PERMISSION_FAILURE = 0x8000 CODE_ERROR = 0xC0DE AUTOGEN_ERROR = 0xF000 PARSER_ERROR = 0xF001 BUILD_ERROR = 0xF002 GENFDS_ERROR = 0xF003 ECC_ERROR = 0xF004 EOT_ERROR = 0xF005 DDC_ERROR = 0xF009 WARNING_AS_ERROR = 0xF006 MIGRATION_ERROR = 0xF010 PCD_VALIDATION_INFO_ERROR = 0xF011 PCD_VARIABLE_ATTRIBUTES_ERROR = 0xF012 PCD_VARIABLE_ATTRIBUTES_CONFLICT_ERROR = 0xF013 ABORT_ERROR = 0xFFFE UNKNOWN_ERROR = 0xFFFF ## Error message of each error code gErrorMessage = { FILE_NOT_FOUND : "File/directory not found in workspace", FILE_OPEN_FAILURE : "File open failure", FILE_WRITE_FAILURE : "File write failure", FILE_PARSE_FAILURE : "File parse failure", FILE_READ_FAILURE : "File read failure", FILE_CREATE_FAILURE : "File create failure", FILE_CHECKSUM_FAILURE : "Invalid checksum of file", FILE_COMPRESS_FAILURE : "File compress failure", FILE_DECOMPRESS_FAILURE : "File decompress failure", FILE_MOVE_FAILURE : "File move failure", FILE_DELETE_FAILURE : "File delete failure", FILE_COPY_FAILURE : "File copy failure", FILE_POSITIONING_FAILURE: "Failed to seeking position", FILE_ALREADY_EXIST : "File or directory already exists", FILE_TYPE_MISMATCH : "Incorrect file type", FILE_CASE_MISMATCH : "File name case mismatch", FILE_DUPLICATED : "Duplicated file found", FILE_UNKNOWN_ERROR : "Unknown error encountered on file", OPTION_UNKNOWN : "Unknown option", OPTION_MISSING : "Missing option", OPTION_CONFLICT : "Conflict options", OPTION_VALUE_INVALID : "Invalid value of option", OPTION_DEPRECATED : "Deprecated option", OPTION_NOT_SUPPORTED : "Unsupported option", OPTION_UNKNOWN_ERROR : "Unknown error when processing options", PARAMETER_INVALID : "Invalid parameter", PARAMETER_MISSING : "Missing parameter", PARAMETER_UNKNOWN_ERROR : "Unknown error in parameters", FORMAT_INVALID : "Invalid syntax/format", FORMAT_NOT_SUPPORTED : "Not supported syntax/format", FORMAT_UNKNOWN : "Unknown format", FORMAT_UNKNOWN_ERROR : "Unknown error in syntax/format ", RESOURCE_NOT_AVAILABLE : "Not available", RESOURCE_ALLOCATE_FAILURE : "Allocate failure", RESOURCE_FULL : "Full", RESOURCE_OVERFLOW : "Overflow", RESOURCE_UNDERRUN : "Underrun", RESOURCE_UNKNOWN_ERROR : "Unknown error", ATTRIBUTE_NOT_AVAILABLE : "Not available", ATTRIBUTE_GET_FAILURE : "Failed to retrieve", ATTRIBUTE_SET_FAILURE : "Failed to set", ATTRIBUTE_UPDATE_FAILURE: "Failed to update", ATTRIBUTE_ACCESS_DENIED : "Access denied", ATTRIBUTE_UNKNOWN_ERROR : "Unknown error when accessing", COMMAND_FAILURE : "Failed to execute command", IO_NOT_READY : "Not ready", IO_BUSY : "Busy", IO_TIMEOUT : "Timeout", IO_UNKNOWN_ERROR : "Unknown error in IO operation", UNKNOWN_ERROR : "Unknown error", } ## Exception indicating a fatal error if __name__ == "__main__": pass
[ 2235, 2488, 7753, 201, 198, 2, 8997, 1143, 13047, 9530, 335, 278, 1167, 5685, 1356, 942, 13, 201, 198, 2, 201, 198, 2, 15069, 357, 66, 8, 4343, 532, 1853, 11, 8180, 10501, 13, 1439, 2489, 10395, 29847, 11473, 29, 201, 198, 2, 770,...
2.198593
2,417
import torch import torchvision.transforms as transforms from torch.utils.data import Dataset import glob from PIL import Image import random
[ 11748, 28034, 198, 11748, 28034, 10178, 13, 7645, 23914, 355, 31408, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 16092, 292, 316, 198, 11748, 15095, 198, 6738, 350, 4146, 1330, 7412, 198, 11748, 4738, 628 ]
4.085714
35
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. from mixbox import entities, fields import cybox import cybox.bindings.cybox_common as common_binding
[ 2, 15069, 357, 66, 8, 2177, 11, 383, 17168, 2200, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 4091, 38559, 24290, 13, 14116, 329, 1844, 2846, 13, 198, 198, 6738, 5022, 3524, 1330, 12066, 11, 7032, 198, 198, 11748, 3075, 3524, 198, 117...
3.559322
59
#!/usr/bin/env python import sys import numpy as np import pandas as pd import pysam import matplotlib matplotlib.use("agg") import matplotlib.pyplot as plt import seaborn as sns from functools import partial tumor = pysam.AlignmentFile(snakemake.input[0], "rb") normal = pysam.AlignmentFile(snakemake.input[1], "rb") softclips = [] for i, rec in enumerate(normal): if rec.is_supplementary or rec.is_unmapped: continue is_first_read = rec.pos < rec.mpos get_clip = lambda c: c[1] if c[0] == 4 else None clip_left = get_clip(rec.cigartuples[0]) if clip_left is not None: softclips.append([clip_left, True, is_first_read]) clip_right = get_clip(rec.cigartuples[-1]) if clip_right is not None: softclips.append([clip_right, False, is_first_read]) if i == 10000000: break softclips = pd.DataFrame(softclips, columns=["len", "left", "first_in_pair"]) g = sns.FacetGrid(softclips, col="left", row="first_in_pair") g = g.map(plot, "len") plt.savefig(snakemake.output[0])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 25064, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 279, 893, 321, 198, 11748, 2603, 29487, 8019, 198, 6759, 29487, 8019, 13, 1904, ...
2.420561
428
# Generated by Django 3.1 on 2020-08-13 16:23 from django.db import migrations, models import django.utils.timezone
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 319, 12131, 12, 2919, 12, 1485, 1467, 25, 1954, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 26791, 13, 2435, 11340, 628 ]
3.025641
39
import tensorflow from tensorflow import keras Model = keras.models.Model Dense = keras.layers.Dense Activation = keras.layers.Activation Flatten = keras.layers.Flatten BatchNormalization= keras.layers.BatchNormalization Conv2D = tensorflow.keras.layers.Conv2D AveragePooling2D = keras.layers.AveragePooling2D Input=keras.layers.Input l2=keras.regularizers.l2 from tensorflow.keras import backend def resnet_layer(inputs, num_filters=16, kernel_size=3, strides=1, activation='relu', batch_normalization=True, conv_first=True): """2D Convolution-Batch Normalization-Activation stack builder # Arguments inputs (tensor): input tensor from input image or previous layer num_filters (int): Conv2D number of filters kernel_size (int): Conv2D square kernel dimensions strides (int): Conv2D square stride dimensions activation (string): activation name batch_normalization (bool): whether to include batch normalization conv_first (bool): conv-bn-activation (True) or bn-activation-conv (False) # Returns x (tensor): tensor as input to the next layer """ conv = Conv2D( num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(1e-4)) x = inputs if conv_first: x = conv(x) if batch_normalization: x = BatchNormalization()(x) if activation is not None: x = Activation(activation)(x) else: if batch_normalization: x = BatchNormalization()(x) if activation is not None: x = Activation(activation)(x) x = conv(x) return x
[ 11748, 11192, 273, 11125, 201, 198, 6738, 11192, 273, 11125, 1330, 41927, 292, 201, 198, 17633, 796, 41927, 292, 13, 27530, 13, 17633, 201, 198, 35, 1072, 796, 41927, 292, 13, 75, 6962, 13, 35, 1072, 201, 198, 25526, 341, 796, 41927, ...
2.184544
867
#!/usr/bin/env python3 import sys import json import rdflib import rdflib.plugins.sparql as sparql RELS_TO_DRAW = ['isWifeOf', 'isMotherOf', 'isFatherOf', 'isHusbandOf', 'isSpouseOf'] RELS_TO_INFER = ['hasGrandParent', 'isGrandParentOf', 'hasGreatGrandParent', 'isGreatGrandParentOf', 'isUncleOf', 'hasUncle', 'isGreatUncleOf', 'hasGreatUncle', 'isAuntOf', 'hasAunt', 'isGreatAuntOf', 'hasGreatAunt', 'isBrotherOf', 'isSisterOf', 'isSiblingOf', 'isFirstCousinOf', 'isSecondCousinOf', 'isThirdCousinOf'] RELS_OF_INTEREST = RELS_TO_DRAW + RELS_TO_INFER try: workpath = sys.argv[1] except IndexError: sys.exit("No path defined!") try: recursion_limit = int(sys.argv[2]) except IndexError: recursion_limit = 0 if recursion_limit > 0: sys.setrecursionlimit(recursion_limit) g = rdflib.Graph() g.parse(workpath, format="turtle") fhkb_str = "http://www.example.com/genealogy.owl#" schema_str = "https://schema.org/" FHKB = rdflib.Namespace(fhkb_str) SCHEMA_ORG = rdflib.Namespace(schema_str) graph = {} graph['nodes'] = [] graph['edges'] = [] nodes = {} q = sparql.prepareQuery( """PREFIX fhkb:<http://www.example.com/genealogy.owl#> SELECT ?person ?pred ?obj WHERE { ?person a fhkb:Person ; ?pred ?obj . } ORDER BY ?person""") for rel in RELS_OF_INTEREST: pred = rdflib.URIRef("{}{}".format(fhkb_str, rel)) relation_query_results = g.query(q, initBindings={'pred': pred}) for (subj, pred, obj) in relation_query_results: graph['edges'].append( { 'data': { 'group': 'edges', 'id': f'{dump(subj)}-{dump(pred)}-{dump(obj)}', 'source': dump(subj), 'target': dump(obj), 'type': dump(pred) } }) q_details = sparql.prepareQuery( """PREFIX fhkb:<http://www.example.com/genealogy.owl#> SELECT ?person ?pred ?obj WHERE { ?person a fhkb:Person ; ?pred ?obj . FILTER NOT EXISTS { ?person ?testPred ?obj . VALUES ?testPred { fhkb:isWifeOf fhkb:isMotherOf fhkb:isFatherOf fhkb:isHusbandOf fhkb:isSpouseOf fhkb:hasGrandParent fhkb:isGrandParentOf fhkb:hasGreatGrandParent fhkb:isGreatGrandParentOf fhkb:isUncleOf fhkb:hasUncle fhkb:isGreatUncleOf fhkb:hasGreatUncle fhkb:isAuntOf fhkb:hasAunt fhkb:isGreatAuntOf fhkb:hasGreatAunt fhkb:isBrotherOf fhkb:isSisterOf fhkb:isSiblingOf fhkb:isFirstCousinOf fhkb:isSecondCousinOf fhkb:isThirdCousinOf fhkb:hasRelation fhkb:isPartnerIn fhkb:isMalePartnerIn fhkb:isFemalePartnerIn fhkb:isBloodrelationOf } } } ORDER BY ?person""" ) person_query_results = g.query(q_details) for (subj, pred, obj) in person_query_results: node = nodes.get(dump(subj), { 'data': { 'label': '', 'degree': 0, 'size': 10, 'alternateNames': [], 'honorificPrefixes': [], 'honorificSuffixes': [], 'images': [], 'id': dump(subj), }}) if pred == FHKB.Sex: node['data'][dump(pred)] = dump(obj) elif pred.startswith(SCHEMA_ORG): if dump(pred) == 'honorificSuffix': node['data']['honorificSuffixes'].append(obj) elif dump(pred) == 'honorificPrefix': node['data']['honorificPrefixes'].append(obj) elif dump(pred) == 'alternateName': node['data']['alternateNames'].append(obj) elif dump(pred) == 'image': node['data']['images'].append(obj) else: node['data'][dump(pred)] = obj elif pred == rdflib.RDFS.label: node['data']['label'] = obj else: continue nodes[dump(subj)] = node graph['nodes'] = list(nodes.values()) print(json.dumps(graph, indent=0)) sys.exit(0)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 25064, 198, 11748, 33918, 198, 11748, 374, 67, 2704, 571, 198, 11748, 374, 67, 2704, 571, 13, 37390, 13, 82, 1845, 13976, 355, 37331, 13976, 198, 198, 2200, 6561, 62, ...
1.834827
2,343
# Copyright 2019-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import time import urllib.parse from typing import Any, Dict, List, Optional, Union from unittest.mock import Mock from urllib.parse import urlencode import pymacaroons from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource import synapse.rest.admin from synapse.appservice import ApplicationService from synapse.rest.client import devices, login, logout, register from synapse.rest.client.account import WhoamiRestServlet from synapse.rest.synapse.client import build_synapse_client_resource_tree from synapse.server import HomeServer from synapse.types import create_requester from synapse.util import Clock from tests import unittest from tests.handlers.test_oidc import HAS_OIDC from tests.handlers.test_saml import has_saml2 from tests.rest.client.utils import TEST_OIDC_AUTH_ENDPOINT, TEST_OIDC_CONFIG from tests.server import FakeChannel from tests.test_utils.html_parsers import TestHtmlParser from tests.unittest import HomeserverTestCase, override_config, skip_unless try: import jwt HAS_JWT = True except ImportError: HAS_JWT = False # synapse server name: used to populate public_baseurl in some tests SYNAPSE_SERVER_PUBLIC_HOSTNAME = "synapse" # public_baseurl for some tests. It uses an http:// scheme because # FakeChannel.isSecure() returns False, so synapse will see the requested uri as # http://..., so using http in the public_baseurl stops Synapse trying to redirect to # https://.... BASE_URL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,) # CAS server used in some tests CAS_SERVER = "https://fake.test" # just enough to tell pysaml2 where to redirect to SAML_SERVER = "https://test.saml.server/idp/sso" TEST_SAML_METADATA = """ <md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"> <md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol"> <md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="%(SAML_SERVER)s"/> </md:IDPSSODescriptor> </md:EntityDescriptor> """ % { "SAML_SERVER": SAML_SERVER, } LOGIN_URL = b"/_matrix/client/r0/login" TEST_URL = b"/_matrix/client/r0/account/whoami" # a (valid) url with some annoying characters in. %3D is =, %26 is &, %2B is + TEST_CLIENT_REDIRECT_URL = 'https://x?<ab c>&q"+%3D%2B"="f%26=o"' # the query params in TEST_CLIENT_REDIRECT_URL EXPECTED_CLIENT_REDIRECT_URL_PARAMS = [("<ab c>", ""), ('q" =+"', '"f&=o"')] # (possibly experimental) login flows we expect to appear in the list after the normal # ones ADDITIONAL_LOGIN_FLOWS = [ {"type": "m.login.application_service"}, {"type": "uk.half-shot.msc2778.login.application_service"}, ] def test_login_with_overly_long_device_id_fails(self) -> None: self.register_user("mickey", "cheese") # create a device_id longer than 512 characters device_id = "yolo" * 512 body = { "type": "m.login.password", "user": "mickey", "password": "cheese", "device_id": device_id, } # make a login request with the bad device_id channel = self.make_request( "POST", "/_matrix/client/v3/login", json.dumps(body).encode("utf8"), custom_headers=None, ) # test that the login fails with the correct error code self.assertEqual(channel.code, 400) self.assertEqual(channel.json_body["errcode"], "M_INVALID_PARAM") # The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use # RSS256, with a public key configured in synapse as "jwt_secret", and tokens # signed by the private key. AS_USER = "as_user_alice"
[ 2, 15069, 13130, 12, 1238, 2481, 383, 24936, 13, 2398, 5693, 327, 13, 40, 13, 34, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 28...
2.738882
1,574
import argparse import sys from time import sleep import uuid import rclpy from rmf_task_msgs.msg import Delivery if __name__ == '__main__': main(sys.argv)
[ 198, 11748, 1822, 29572, 198, 11748, 25064, 198, 6738, 640, 1330, 3993, 198, 11748, 334, 27112, 198, 198, 11748, 374, 565, 9078, 198, 198, 6738, 42721, 69, 62, 35943, 62, 907, 14542, 13, 19662, 1330, 28682, 198, 198, 361, 11593, 3672, ...
2.733333
60
"""This file handles the interaction with discords http endpoints.""" import asyncio import logging from typing import Any, Dict, Optional, Union from urllib.parse import quote as _uriquote from weakref import WeakValueDictionary import aiohttp from aiohttp import BaseConnector, ClientSession, ClientWebSocketResponse, FormData from multidict import CIMultiDictProxy from dis_snek.api.http.http_requests import ( BotRequests, ChannelRequests, EmojiRequests, GuildRequests, InteractionRequests, MemberRequests, MessageRequests, ReactionRequests, StickerRequests, ThreadRequests, UserRequests, WebhookRequests, ScheduledEventsRequests, ) from dis_snek.client.const import __py_version__, __repo_url__, __version__, logger_name, MISSING, Absent from dis_snek.client.errors import DiscordError, Forbidden, GatewayNotFound, HTTPException, NotFound, LoginError from dis_snek.client.utils.input_utils import response_decode from dis_snek.client.utils.serializer import dict_filter_missing from dis_snek.models import CooldownSystem from .route import Route __all__ = ["HTTPClient"] log = logging.getLogger(logger_name)
[ 37811, 1212, 2393, 17105, 262, 10375, 351, 1221, 3669, 2638, 886, 13033, 526, 15931, 198, 11748, 30351, 952, 198, 11748, 18931, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 32233, 11, 4479, 198, 6738, 2956, 297, 571, 13, 29572, 1330,...
3.117021
376
from os import environ import psycopg2 from datetime import timedelta from dotenv import load_dotenv load_dotenv()
[ 6738, 28686, 1330, 551, 2268, 198, 11748, 17331, 22163, 70, 17, 198, 6738, 4818, 8079, 1330, 28805, 12514, 198, 6738, 16605, 24330, 1330, 3440, 62, 26518, 24330, 198, 2220, 62, 26518, 24330, 3419, 198 ]
3.382353
34
ELECTRUM_VERSION = '4.1.5-radc' # version of the client package APK_VERSION = '4.1.5.0' # read by buildozer.spec PROTOCOL_VERSION = '1.4' # protocol version requested # The hash of the mnemonic seed must begin with this SEED_PREFIX = '01' # Standard wallet SEED_PREFIX_SW = '100' # Segwit wallet SEED_PREFIX_2FA = '101' # Two-factor authentication SEED_PREFIX_2FA_SW = '102' # Two-factor auth, using segwit
[ 36, 16779, 49, 5883, 62, 43717, 796, 705, 19, 13, 16, 13, 20, 12, 6335, 66, 6, 220, 220, 220, 220, 1303, 2196, 286, 262, 5456, 5301, 198, 2969, 42, 62, 43717, 796, 705, 19, 13, 16, 13, 20, 13, 15, 6, 220, 220, 220, 220, 220,...
2.257426
202
from __future__ import division from math import sqrt as sqrt from itertools import product as product import torch import numpy as np import cv2 from lib.utils.visualize_utils import TBWriter def vis(func): """tensorboard visualization if has writer as input""" return wrapper class PriorBoxSSD(PriorBoxBase): # PriorBox = PriorBoxSSD if __name__ == '__main__': import copy # from lib.datasets.config import ssd_voc_vgg as cfg # from lib.utils.visualize_utils import TBWriter # tb_writer = TBWriter(log_dir, {'epoch': 50}) # # test_no_vis(cfg, tb_writer) # test_filp(cfg, tb_writer) # test_rectangle(cfg, tb_writer) print('haha') from lib.utils.config import cfg print(cfg)
[ 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 10688, 1330, 19862, 17034, 355, 19862, 17034, 198, 6738, 340, 861, 10141, 1330, 1720, 355, 1720, 198, 11748, 28034, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 269, 85, 17, 198, 198, 6...
2.688406
276
########################################################################## # # Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import IECore import Gaffer
[ 29113, 29113, 7804, 2235, 198, 2, 198, 2, 220, 15069, 357, 66, 8, 2321, 12, 6390, 11, 7412, 7117, 8495, 3457, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 220, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, ...
3.395564
541
# Generated by Django 3.1.13 on 2021-10-01 18:41 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 1485, 319, 33448, 12, 940, 12, 486, 1248, 25, 3901, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.8
30
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle from PhysicsTools.Heppy.physicsobjects.Tau import Tau from PhysicsTools.HeppyCore.utils.deltar import deltaR, matchObjectCollection3 import PhysicsTools.HeppyCore.framework.config as cfg # Find the definitions of the tau ID strings here: # http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer( class_object = TauAnalyzer, # inclusive very loose hadronic tau selection inclusive_ptMin = 18, inclusive_etaMax = 9999, inclusive_dxyMax = 1000., inclusive_dzMax = 0.4, inclusive_vetoLeptons = False, inclusive_leptonVetoDR = 0.4, inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or "" inclusive_tauID = "decayModeFindingNewDMs", inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required inclusive_tauAntiMuonID = "", inclusive_tauAntiElectronID = "", # loose hadronic tau selection loose_ptMin = 18, loose_etaMax = 9999, loose_dxyMax = 1000., loose_dzMax = 0.2, loose_vetoLeptons = True, loose_leptonVetoDR = 0.4, loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or "" loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits", loose_vetoLeptonsPOG = False, # If True, the following two IDs are required loose_tauAntiMuonID = "againstMuonLoose3", loose_tauAntiElectronID = "againstElectronLooseMVA5" ) )
[ 6738, 23123, 33637, 13, 1544, 14097, 13, 38200, 47031, 13, 7295, 13, 37702, 9107, 1330, 16213, 9107, 198, 6738, 23123, 33637, 13, 1544, 14097, 13, 38200, 47031, 13, 7295, 13, 27722, 37508, 1330, 11160, 37508, 198, 6738, 23123, 33637, 13, ...
2.680672
595
# smail ALTAY 170401038 import math import random r = 3271 n = int(input("Oluturulmak istenen anahtar iftlerinin bit uzunluunu girin: ")) Keygen(n) encrypt("plaintext.txt","publickey.txt") decrypt("ciphertext.txt", "privatekey.txt")
[ 2, 895, 603, 8355, 51, 4792, 1596, 3023, 20943, 2548, 201, 198, 201, 198, 11748, 10688, 201, 198, 11748, 4738, 201, 198, 201, 198, 81, 796, 513, 28977, 201, 198, 201, 198, 77, 796, 493, 7, 15414, 7203, 30098, 315, 333, 377, 76, 46...
2.287037
108
# Built-in modules # # Internal modules # from seqenv import module_dir from seqenv.common.cache import property_cached # Third party modules # import sh, networkx import matplotlib.colors # A list of envos to help test this module # test_envos = [ "ENVO:00000033", "ENVO:00000043", "ENVO:00000067", "ENVO:00000143", "ENVO:00000210", "ENVO:00000215", "ENVO:00000475", ] ################################################################################ def add_weights(self, g, weights=None): """Input a networkx DiGraph object. Outputs a pygraphviz AGraph object.""" g = networkx.nx_agraph.to_agraph(g) if weights is None: return g for envo in weights: node = g.get_node(envo) weight = weights[envo] color = matplotlib.colors.rgb2hex((1.0, 1.0 - weight, 0.0)) node.attr['fillcolor'] = color return g def add_style(self, g): """Input a pygraphviz AGraph object. Outputs a pygraphviz AGraph object.""" for node in g.nodes(): text = node.attr['name'] node.attr['label'] = text.replace(' ','\\n') node.attr['name'] = '' node.attr['shape'] = 'Mrecord' node.attr['style'] = 'filled' # To add the envo id to each node, uncomment: #envo = node.attr['label'] #node.attr['label'] = "{<f0> %s|<f1> %s}" % (envo, text) for edge in g.edges(): if edge.attr['label'] == 'located_in': edge.attr['color'] = 'turquoise4' edge.attr['label'] = '' return g def write_to_dot(self, g, path): """Input a pygraphviz AGraph object.""" with open(path, 'w') as handle: handle.write(g.to_string()) def add_legend(self, path): """Input the path to a dot file.""" legend_txt = """ digraph { rankdir=LR node [shape=plaintext,fontname="helvetica"] subgraph cluster_01 { label = "NB: darker nodes weigh more"; key [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0"> <tr><td align="right" port="i1">Is</td></tr> <tr><td align="right" port="i2">Part</td></tr> <tr><td align="right" port="i3">Located</td></tr> </table>>]; key2 [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0"> <tr><td port="i1">a</td></tr> <tr><td port="i2">of</td></tr> <tr><td port="i3">in</td></tr> </table>>]; key:i1:e -> key2:i1:w [color=red]; key:i2:e -> key2:i2:w [color=blue]; key:i3:e -> key2:i3:w [color=turquoise4]; }""" orig_txt = [line.rstrip('\n') for line in open(path, 'r') if line] new_text = [line.lstrip() for line in legend_txt.split('\n') if line] new_text = '\n'.join(new_text + orig_txt[2:]) with open(path, 'w') as handle: handle.write(new_text) def draw_to_pdf(self, in_path, out_path): """Input a path to a dot file.""" sh.dot(in_path, '-Tpdf', '-o', out_path) # --------------------------- In this section --------------------------- # # descends def descends(self, e, root): """Does the envo term `e` descend from the node `root`? Returns True or False.""" # Auto conversion # if isinstance(e, int): e = "ENVO:%08d" % e if isinstance(root, int): root = "ENVO:%08d" % root # Return # return e in networkx.ancestors(self.networkx, root) # --------------------------- In this section --------------------------- # # print_test # draw_with_networkx # draw_with_pygraphviz def print_test(self, e=None): """Just a method to see a bit how the different libraries work.""" # Test node # if e is None: e = test_envos[0] # Goa # print "Goa: " print self.goatools[e] # Pygraphviz # print "pygraphviz: " print self.pygraphviz[e] print self.pygraphviz.successors(e) print self.pygraphviz.predecessors(e) print self.pygraphviz.get_node(e) # Networkx # import networkx print "networkx: " print self.networkx[e] print self.networkx.successors(e) print self.networkx.predecessors(e) print networkx.ancestors(self.networkx, e) # same as predecessors print networkx.descendants(self.networkx, e) # almost as child_to_parents def draw_with_networkx(self, g, path): """Input a networkx DiGraph object.""" from matplotlib import pyplot networkx.draw(g) pyplot.savefig(path) pyplot.close() def draw_with_pygraphviz(self, g, path): """Input a pygraphviz AGraph object.""" with open(path, 'w') as handle: handle.write(g.to_string())
[ 2, 28477, 12, 259, 13103, 1303, 198, 198, 2, 18628, 13103, 1303, 198, 6738, 33756, 24330, 1330, 8265, 62, 15908, 198, 6738, 33756, 24330, 13, 11321, 13, 23870, 1330, 3119, 62, 66, 2317, 198, 198, 2, 10467, 2151, 13103, 1303, 198, 1174...
2.144828
2,320
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 16529, 35937, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 19...
4.555556
135
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict import six import mock from st2common.services import triggers as trigger_service with mock.patch.object(trigger_service, 'create_trigger_type_db', mock.MagicMock()): from st2api.controllers.v1.webhooks import HooksHolder from st2common.persistence.rbac import UserRoleAssignment from st2common.models.db.rbac import UserRoleAssignmentDB from st2common.service_setup import register_service_in_service_registry from st2common.services import coordination from st2tests import config as tests_config from st2tests.fixturesloader import FixturesLoader from open_rbac.tests import APIControllerWithRBACTestCase from tests.unit.controllers.v1.test_webhooks import DUMMY_TRIGGER_DICT http_client = six.moves.http_client __all__ = [ 'APIControllersRBACTestCase' ] FIXTURES_PACK = 'generic' TEST_FIXTURES = OrderedDict([ ('runners', ['testrunner1.yaml', 'run-local.yaml']), ('sensors', ['sensor1.yaml']), ('actions', ['action1.yaml', 'local.yaml']), ('aliases', ['alias1.yaml']), ('triggers', ['trigger1.yaml', 'cron1.yaml']), ('rules', ['rule1.yaml']), ('triggertypes', ['triggertype1.yaml']), ('executions', ['execution1.yaml']), ('liveactions', ['liveaction1.yaml', 'parentliveaction.yaml', 'childliveaction.yaml']), ('enforcements', ['enforcement1.yaml']), ('apikeys', ['apikey1.yaml']), ('traces', ['trace_for_test_enforce.yaml']) ]) MOCK_RUNNER_1 = { 'name': 'test-runner-1', 'description': 'test', 'enabled': False } MOCK_ACTION_1 = { 'name': 'ma.dummy.action', 'pack': 'examples', 'description': 'test description', 'enabled': True, 'entry_point': '/tmp/test/action2.py', 'runner_type': 'local-shell-script', 'parameters': { 'c': {'type': 'string', 'default': 'C1', 'position': 0}, 'd': {'type': 'string', 'default': 'D1', 'immutable': True} } } MOCK_ACTION_ALIAS_1 = { 'name': 'alias3', 'pack': 'aliases', 'description': 'test description', 'action_ref': 'core.local', 'formats': ['a', 'b'] } MOCK_RULE_1 = { 'enabled': True, 'name': 'st2.test.rule2', 'pack': 'yoyohoneysingh', 'trigger': { 'type': 'wolfpack.triggertype-1' }, 'criteria': { 'trigger.k1': { 'pattern': 't1_p_v', 'type': 'equals' } }, 'action': { 'ref': 'sixpack.st2.test.action', 'parameters': { 'ip2': '{{rule.k1}}', 'ip1': '{{trigger.t1_p}}' } }, 'description': '' } def test_icon_png_file_is_whitelisted(self): self.use_user(self.users['no_permissions']) # Test that access to icon.png file doesn't require any permissions response = self.app.get('/v1/packs/views/file/dummy_pack_2/icon.png') self.assertEqual(response.status_code, http_client.OK) # Other files should return forbidden response = self.app.get('/v1/packs/views/file/dummy_pack_2/pack.yaml', expect_errors=True) self.assertEqual(response.status_code, http_client.FORBIDDEN) def _perform_request_for_endpoint(self, endpoint): if endpoint['method'] == 'GET': response = self.app.get(endpoint['path'], expect_errors=True) elif endpoint['method'] == 'POST': return self.app.post_json(endpoint['path'], endpoint['payload'], expect_errors=True) elif endpoint['method'] == 'PUT': return self.app.put_json(endpoint['path'], endpoint['payload'], expect_errors=True) elif endpoint['method'] == 'DELETE': return self.app.delete(endpoint['path'], expect_errors=True) else: raise ValueError('Unsupported method: %s' % (endpoint['method'])) return response
[ 2, 49962, 284, 262, 23881, 32173, 11, 3457, 19203, 25896, 32173, 11537, 739, 530, 393, 517, 198, 2, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 9387, 351, 198, 2, 428, 670, 329, 3224, 1321, 5115, 6634, 9238, 13, 198, 2, 383...
2.519495
1,821
import pytest import time import subprocess from subprocess import run,Popen from seldon_utils import * from k8s_utils import *
[ 11748, 12972, 9288, 198, 11748, 640, 198, 11748, 850, 14681, 198, 6738, 850, 14681, 1330, 1057, 11, 47, 9654, 198, 6738, 384, 25900, 62, 26791, 1330, 1635, 198, 6738, 479, 23, 82, 62, 26791, 1330, 1635, 198, 220, 220, 220, 220, 220, ...
2.978261
46
# proxy module from __future__ import absolute_import from envisage.safeweakref import *
[ 2, 15741, 8265, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 45633, 496, 13, 49585, 413, 68, 461, 5420, 1330, 1635, 198 ]
3.56
25
from django.db import models from .query import BookQuerySet
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 764, 22766, 1330, 4897, 20746, 7248, 628, 628, 198 ]
3.611111
18
# -*- coding: utf-8 -*- # File: dropblock.py from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import six # from tensorpack.tfutils.compat import tfv1 as tf # this should be avoided first in model code from tensorpack.tfutils.tower import get_current_tower_context from tensorpack.models import GlobalAvgPooling, FullyConnected import tensorflow as tf __all__ = ['dropblock', 'dropblock2','dropblock3','dropblock4'] # 1: paper baseline; 2: group dropout; 3: group soft-dropout; 4: Uout group dropout def dropblock(net, keep_prob, dropblock_size, gap_w=None, label=None, G=None, CG=None, data_format='channels_first'): """DropBlock: a regularization method for convolutional neural networks. DropBlock is a form of structured dropout, where units in a contiguous region of a feature map are dropped together. DropBlock works better than dropout on convolutional layers due to the fact that activation units in convolutional layers are spatially correlated. See https://arxiv.org/pdf/1810.12890.pdf for details. Args: net: `Tensor` input tensor. is_training: `bool` for whether the model is training. keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock. "None" means no DropBlock. dropblock_size: `int` size of blocks to be dropped by DropBlock. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A version of input tensor with DropBlock applied. Raises: if width and height of the input tensor are not equal. """ ctx = get_current_tower_context() is_training = bool(ctx.is_training) if not is_training or keep_prob is None: return net tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape)) if data_format == 'channels_last': _, width, height, _ = net.get_shape().as_list() else: _, _, width, height = net.get_shape().as_list() if width != height: raise ValueError('Input tensor with width!=height is not supported.') dropblock_size = min(dropblock_size, width) # seed_drop_rate is the gamma parameter of DropBlcok. seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / ( width - dropblock_size + 1)**2 # Forces the block to be inside the feature map. w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width)) valid_block_center = tf.logical_and( tf.logical_and(w_i >= int(dropblock_size // 2), w_i < width - (dropblock_size - 1) // 2), tf.logical_and(h_i >= int(dropblock_size // 2), h_i < width - (dropblock_size - 1) // 2)) valid_block_center = tf.expand_dims(valid_block_center, 0) valid_block_center = tf.expand_dims( valid_block_center, -1 if data_format == 'channels_last' else 0) randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32) block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast( (1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1 block_pattern = tf.cast(block_pattern, dtype=tf.float32) if dropblock_size == width: block_pattern = tf.reduce_min( block_pattern, axis=[1, 2] if data_format == 'channels_last' else [2, 3], keepdims=True) else: if data_format == 'channels_last': ksize = [1, dropblock_size, dropblock_size, 1] else: ksize = [1, 1, dropblock_size, dropblock_size] block_pattern = -tf.nn.max_pool( -block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC' if data_format == 'channels_last' else 'NCHW') percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast( tf.size(block_pattern), tf.float32) net = net / tf.cast(percent_ones, net.dtype) * tf.cast( block_pattern, net.dtype) return net def dropblock2(net, keep_prob, dropblock_size, G=None, CG=None, data_format='channels_first'): """ mimic GN """ ctx = get_current_tower_context() is_training = bool(ctx.is_training) if not is_training or keep_prob is None: return net tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape)) if data_format == 'channels_last': N, height, width, C = net.get_shape().as_list() else: N, C, height, width = net.get_shape().as_list() N = tf.shape(net)[0] if width != height: raise ValueError('Input tensor with width!=height is not supported.') if G == None: G = C // CG if CG == None: CG = C // G net = tf.reshape(net, [N, G, CG, height, width]) dropblock_size = min(dropblock_size, width) # seed_drop_rate is the gamma parameter of DropBlcok. # seed_drop_rate = (1.0 - keep_prob) * width**2 * G**2 / (C * dropblock_size**2) / (C * (width - dropblock_size + 1)**2) seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2 # Forces the block to be inside the feature map. w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width)) valid_block_center = tf.logical_and( tf.logical_and(w_i >= int(dropblock_size // 2), w_i < width - (dropblock_size - 1) // 2), tf.logical_and(h_i >= int(dropblock_size // 2), h_i < width - (dropblock_size - 1) // 2)) valid_block_center = tf.expand_dims(valid_block_center, 0) # for depth valid_block_center = tf.expand_dims(valid_block_center, 0) # for batch valid_block_center = tf.expand_dims(valid_block_center, 0) # for channel randnoise = tf.random_uniform([N, G, 1, width, height], dtype=tf.float32) block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast( (1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1 block_pattern = tf.cast(block_pattern, dtype=tf.float32) if dropblock_size == width: block_pattern = tf.reduce_min(block_pattern, axis=[2, 3, 4], keepdims=True) else: ksize = [1, 1, dropblock_size, dropblock_size] block_pattern = tf.reduce_max(-block_pattern, reduction_indices=[2]) block_pattern = -tf.nn.max_pool(block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NCHW') block_pattern = tf.expand_dims(block_pattern, 2) percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32) net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype) net = tf.reshape(net, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height, width]) return net def CamDrop(net, keep_prob, dropblock_size, flag=None, label=None, G=None, CG=None, data_format='channels_first'): '''CamDrop''' def _get_cam(net, label, flag, dropblock_size, data_format='channels_first'): ''' net: [N, C, H, W] gap_w : [gap_C, num_of_class] ''' if data_format == 'channels_last': N, height, width, C = net.get_shape().as_list() else: N, C, height, width = net.get_shape().as_list() N = tf.shape(net)[0] gap_w = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'linear/W') if flag > 0 else None if not gap_w is None: gap_w = tf.convert_to_tensor(gap_w, tf.float32) gap_C, num = tf.squeeze(gap_w, 0).get_shape().as_list() # [gap_C, num] gap_w = tf.reshape(gap_w, [C, gap_C//C, num]) gap_w = tf.reduce_mean(gap_w, reduction_indices=[1]) # [C, num] label = tf.gather(tf.transpose(gap_w), label) # [N, C] # spatial weights = tf.expand_dims(label, 2) # [N, C, 1] net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width]) cam = tf.matmul(weights, net, transpose_a=True) # [N, 1, width*height] # spt_mask = tf.not_equal(cam, tf.reduce_max(cam, reduction_indices=[2], keepdims=True)) # cam = tf.reshape(cam, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height, width]) # cam = tf.nn.avg_pool(cam, ksize=[1, 1, dropblock_size, dropblock_size], strides=[1, 1, 1, 1], padding='VALID', data_format='NCHW') # left_or_top = (dropblock_size-1) // 2 # right_or_bot = left_or_top if dropblock_size % 2 == 1 else dropblock_size-left_or_top-1 # cam = tf.pad(cam, [[0, 0], [0, 0], [left_or_top, right_or_bot], [left_or_top, right_or_bot]]) # cam = tf.reshape(cam, [N, height*width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height*width]) k = tf.cast(height*width/dropblock_size**2, tf.int32) topk, _ = tf.math.top_k(cam, k=k) # [N, 1, k] topk = tf.gather(topk, indices=[k-1], axis=-1) # [N, 1, 1] spt_mask = (cam < topk) spt_mask = tf.reshape(spt_mask, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(spt_mask, [N, 1, height, width]) # channel k = tf.cast(C/8, tf.int32) topk, _ = tf.math.top_k(label, k=k+1) # [N, k] topk = tf.gather(topk, indices=k, axis=1) # [N, 1] topk = tf.expand_dims(topk, 1) # [N, C, 1] chan_mask = (label < topk) chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1] chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1, 1] cam_mask = tf.logical_or(spt_mask, chan_mask) # chan_mask = tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) # chan_mask = tf.reshape(cam, [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(cam, [N*C, height*width]) # chan_mask = tf.reshape(tf.nn.sigmoid(cam), [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(tf.nn.sigmoid(cam), [N, 1, height, width]) else: cam_mask = False return cam_mask # def _get_gradcam(net, cost=None, gap_w=None, data_format='channels_first'): # # Conv layer tensor [?,2048,10,10] # def _compute_gradients(tensor, var_list): # grads = tf.gradients(tensor, var_list) # return [grad if grad is not None else tf.zeros_like(var) # for var, grad in zip(var_list, grads)] # # grads = tf.gradients(cost, net)[0] # if not gap_w is None: # # Normalizing the gradients # if data_format == 'channels_last': # N, height, width, C = net.get_shape().as_list() # else: # N, C, height, width = net.get_shape().as_list() # N = tf.shape(net)[0] # grads = _compute_gradients(cost, [net])[0] # norm_grads = tf.divide(grads, tf.sqrt(tf.reduce_mean(tf.square(grads), reduction_indices=[2,3], keepdims=True)) + tf.constant(1e-5)) # weights = tf.reduce_mean(norm_grads, reduction_indices=[2,3]) # [N, C] # weights = tf.expand_dims(weights, 2) # [N, C, 1] # net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width]) # # cam_mean = 1 + tf.matmul(net, weights, transpose_a=True) # [N, width*height, 1] # cam_mean = tf.maximum(tf.matmul(weights, net, transpose_a=True), 0) # [N, 1, width*height] # cam_chan = tf.maximum(tf.multiply(net, weights), 0) # [N, C, width*height] # cam = cam_mean*cam_chan # # Passing through ReLU # cam = cam / tf.reduce_max(cam, reduction_indices=[1,2], keepdims=True) # cam = tf.reshape(cam, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(cam, [N, C, height, width]) # else: # cam = 0. # return cam # def _gumbel_softmax(logits, tau, shape, seed_drop_rate, eps=1e-20): # if logits == False: # return logits # U = tf.random_uniform(tf.shape(logits), minval=0, maxval=1) # y = logits - tf.log(-tf.log(U + eps) + eps) # cam_mask = tf.nn.softmax(y / tau) # topk, _ = tf.math.top_k(cam_mask, k=tf.cast(seed_drop_rate*shape[-1], tf.int32)) # [N, 1] # topk = tf.gather(topk, indices=tf.cast(seed_drop_rate*shape[-1], tf.int32)-1, axis=1) # topk = tf.expand_dims(topk, 1) # [N, C, 1] # cam_mask = (cam_mask < topk) # # cam_mask = tf.cast(tf.equal(cam_mask, tf.reduce_max(cam_mask, reduction_indices=[1], keepdims=True)), tf.float32) # cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1] # cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1, 1] # return cam_mask ctx = get_current_tower_context() is_training = bool(ctx.is_training) if not is_training or keep_prob is None: return net tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape)) if data_format == 'channels_last': _, width, height, C = net.get_shape().as_list() else: _, C, width, height = net.get_shape().as_list() if width != height: raise ValueError('Input tensor with width!=height is not supported.') N = tf.shape(net)[0] dropblock_size = min(dropblock_size, width) # seed_drop_rate is the gamma parameter of DropBlcok. seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2 cam_mask = _get_cam(net, label, flag, dropblock_size, data_format) # Forces the block to be inside the feature map. w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width)) valid_block_center = tf.logical_and( tf.logical_and(w_i >= int(dropblock_size // 2), w_i < width - (dropblock_size - 1) // 2), tf.logical_and(h_i >= int(dropblock_size // 2), h_i < width - (dropblock_size - 1) // 2)) valid_block_center = tf.expand_dims(valid_block_center, 0) valid_block_center = tf.expand_dims(valid_block_center, -1 if data_format == 'channels_last' else 0) randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32) block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast((1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1 block_pattern = tf.logical_or(block_pattern, cam_mask) block_pattern = tf.cast(block_pattern, dtype=tf.float32) if dropblock_size == width: block_pattern = tf.reduce_min( block_pattern, axis=[1, 2] if data_format == 'channels_last' else [2, 3], keepdims=True) else: if data_format == 'channels_last': ksize = [1, dropblock_size, dropblock_size, 1] else: ksize = [1, 1, dropblock_size, dropblock_size] block_pattern = -tf.nn.max_pool( -block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC' if data_format == 'channels_last' else 'NCHW') percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32) net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype) return net
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 9220, 25, 4268, 9967, 13, 9078, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 11593, 37443, 834, 1330, 360...
2.376488
6,218
# # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # from rlstructures import logging from rlstructures.env_wrappers import GymEnv, GymEnvInf from rlstructures.tools import weight_init import torch.nn as nn import copy import torch import time import numpy as np import torch.nn.functional as F from tutorial.tutorial_recurrent_policy.agent import RecurrentAgent from tutorial.tutorial_recurrent_policy.a2c import A2C import gym from gym.wrappers import TimeLimit # We write the 'create_env' and 'create_agent' function in the main file to allow these functions to be used with pickle when creating the batcher processes if __name__ == "__main__": # We use spawn mode such that most of the environment will run in multiple processes import torch.multiprocessing as mp mp.set_start_method("spawn") config = { "env_name": "CartPole-v0", "a2c_timesteps": 3, "n_envs": 4, "max_episode_steps": 100, "env_seed": 42, "n_threads": 4, "n_evaluation_threads": 2, "n_evaluation_episodes": 256, "time_limit": 3600, "lr": 0.001, "discount_factor": 0.95, "critic_coef": 1.0, "entropy_coef": 0.01, "a2c_coef": 1.0, "logdir": "./results", } exp = Experiment(config, create_env, create_train_env, create_agent) exp.run()
[ 2, 198, 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, 287, 262, 6808, 8619, 286, 428, 2723, 5509, 1...
2.579585
578
from matplotlib.pyplot import title import streamlit as st import pandas as pd import altair as alt import pydeck as pdk import os import glob from wordcloud import WordCloud import streamlit_analytics path = os.path.dirname(__file__) streamlit_analytics.start_tracking() #main st.title('GND-Dashboard') #infoebereich oben with st.beta_container(): st.info('Hier finden Sie statistische Auswertungen der GND und ihrer Verknpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Whlen Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfgbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.') with st.beta_expander("Methodik und Datenherkunft"): st.markdown(''' Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tontrger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen. Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr groen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden. Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html). Fr grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert. Alle Skripte und Daten stehen unter CC0 Lizenz und knnen frei weitergenutzt werden. Die Daten werden monatlich aktualisiert. ''') #sidebar mit satzartenfilter st.sidebar.header("Satzart whlen") satzart = st.sidebar.selectbox( "ber welche GND-Satzart mchten Sie etwas erfahren?", ('alle', "Tp - Personen", "Tb - Krperschaften", "Tg - Geografika", "Ts - Sachbegriffe", "Tu - Werke", "Tf - Veranstaltungen") ) st.sidebar.info('Diese Widgets haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie Christian Baumann geschrieben. Sie gehren zur Python Community der Deutschen Nationalbibliothek.') gnd_allgemein = st.beta_container() with gnd_allgemein: st.header('GND Statistik allgemein') #allgemeine statistiken in abhngigkeit der satzart if satzart == 'alle': gesamt_entity_count() entities() newcomer() zeitverlauf() relationen() systematik() else: entities() newcomer() #besondere widgets fr einzelne satzarten if satzart == "Tp - Personen": wirkungsorte() elif satzart == "Tg - Geografika": wirkungsorte_musik() wirkungsorte() elif satzart == "Ts - Sachbegriffe": sachbegriff_cloud() systematik_ts() dnb = st.beta_container() with dnb: st.header('GND in der Deutschen Nationalbibliothek') gnd_top() dnb_links() streamlit_analytics.stop_tracking()
[ 6738, 2603, 29487, 8019, 13, 9078, 29487, 1330, 3670, 198, 11748, 4269, 18250, 355, 336, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 5988, 958, 355, 5988, 198, 11748, 12972, 35875, 355, 279, 34388, 198, 11748, 28686, 198, 11748, 1...
2.511995
1,459
""" Class for all excpetions used in following scripts - geocoder.py - geocoder_input.py """
[ 37811, 198, 9487, 329, 477, 2859, 6449, 507, 973, 287, 1708, 14750, 198, 12, 4903, 420, 12342, 13, 9078, 198, 12, 4903, 420, 12342, 62, 15414, 13, 9078, 198, 37811, 628, 628 ]
3
32
#Erdin Alhas 150401052 import os import sys import time from socket import * from os import system, name ip = '127.0.0.1' port = 42 s_soket = socket(AF_INET, SOCK_DGRAM) s_soket.bind((ip, port)) print("\nSunucu Hazir\n") kontrol, istemciAdres = s_soket.recvfrom(4096) s_soket.sendto(bytes("Sunucu hazir", encoding='utf-8'), istemciAdres) i, istemciAdres = s_soket.recvfrom(4096) if(i.decode("utf-8") == "listeleme yap"): dosyalar = "\n".join(os.listdir()) s_soket.sendto(bytes(dosyalar, encoding='utf-8'), istemciAdres) sys.exit() elif(i.decode("utf-8") == "put yap"): cevap = s_soket.recvfrom(4096) if(cevap[0].decode("utf-8") == "mevcut"): dosyaIsmi, istemciAdres = s_soket.recvfrom(4096) dosyaIcerigi = s_soket.recvfrom(4096) if(os.path.exists(dosyaIsmi.decode("utf-8")) == True): s_soket.sendto(bytes("aynisi mevcut", encoding='utf-8'), istemciAdres) karar = s_soket.recvfrom(4096) if(karar[0].decode("utf-8") == "1"): yeniAd = dosyaIsmi.decode("utf-8")[:-4] + " (kopya)" + ".txt" dosyaYeni = open(yeniAd, "wb") dosyaYeni.write(dosyaIcerigi[0]) dosyaYeni.close() print("\nPUT islemi basariyla gerceklesti..") else: dosyaYeni = open(dosyaIsmi, "wb") dosyaYeni.write(dosyaIcerigi[0]) dosyaYeni.close() s_soket.sendto(bytes("tamam", encoding='utf-8'), istemciAdres) print("\nPUT islemi basariyla gerceklesti..") else: print("\nGirilen adda bir dosya istemcide bulunamadi..") elif(i.decode("utf-8") == "get yap"): dosyaIsmi, istemciAdres = s_soket.recvfrom(4096) if (os.path.exists(dosyaIsmi.decode("utf-8")) == True): dosya = open(dosyaIsmi.decode("utf-8"), "rb") s_soket.sendto(bytes("dosya mevcut", encoding='utf-8'), istemciAdres) dosyaIcerik = dosya.read() dosya.close() s_soket.sendto(dosyaIcerik, istemciAdres) kontrol = s_soket.recvfrom(4096) print("\nGET islemi basariyla gerceklesti..") sys.exit() else: print("\n! Bu isimde bir dosya sunucuda mevcut deil") sys.exit() elif(i.decode("utf-8") == "bitir"): s_soket.close() print("\nSunucu kapandi") sys.exit()
[ 2, 36, 4372, 259, 978, 10134, 6640, 21844, 37841, 201, 198, 201, 198, 11748, 28686, 201, 198, 11748, 25064, 201, 198, 11748, 640, 201, 198, 6738, 17802, 1330, 1635, 201, 198, 6738, 28686, 1330, 1080, 11, 1438, 201, 198, 541, 796, 705,...
1.511617
2,109
""" This module handles compatibility issues between testcase format v2 and v3. httprunner2 3 """ import os import sys from typing import List, Dict, Text, Union, Any from loguru import logger from httprunner import exceptions from httprunner.loader import load_project_meta, convert_relative_project_root_dir from httprunner.parser import parse_data from httprunner.utils import sort_dict_by_custom_order def _convert_extractors(extractors: Union[List, Dict]) -> Dict: """ convert extract list(v2) to dict(v3) Args: extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}] Returns: {"varA": "body.varA", "varB": "body.varB"} """ v3_extractors: Dict = {} if isinstance(extractors, List): # [{"varA": "content.varA"}, {"varB": "json.varB"}] for extractor in extractors: if not isinstance(extractor, Dict): logger.error(f"Invalid extractor: {extractors}") sys.exit(1) for k, v in extractor.items(): v3_extractors[k] = v elif isinstance(extractors, Dict): # {"varA": "body.varA", "varB": "body.varB"} v3_extractors = extractors else: logger.error(f"Invalid extractor: {extractors}") sys.exit(1) for k, v in v3_extractors.items(): v3_extractors[k] = _convert_jmespath(v) return v3_extractors def ensure_cli_args(args: List) -> List: """ ensure compatibility with deprecated cli args in v2 """ # remove deprecated --failfast if "--failfast" in args: logger.warning(f"remove deprecated argument: --failfast") args.pop(args.index("--failfast")) # convert --report-file to --html if "--report-file" in args: logger.warning(f"replace deprecated argument --report-file with --html") index = args.index("--report-file") args[index] = "--html" args.append("--self-contained-html") # keep compatibility with --save-tests in v2 if "--save-tests" in args: logger.warning( f"generate conftest.py keep compatibility with --save-tests in v2" ) args.pop(args.index("--save-tests")) _generate_conftest_for_summary(args) return args def ensure_path_sep(path: Text) -> Text: """ ensure compatibility with different path separators of Linux and Windows """ if "/" in path: path = os.sep.join(path.split("/")) if "\\" in path: path = os.sep.join(path.split("\\")) return path
[ 37811, 198, 1212, 8265, 17105, 17764, 2428, 1022, 1332, 7442, 5794, 410, 17, 290, 410, 18, 13, 198, 2804, 1050, 403, 1008, 17, 220, 513, 220, 198, 37811, 198, 11748, 28686, 198, 11748, 25064, 198, 6738, 19720, 1330, 7343, 11, 360, 713...
2.389255
1,061
""" Scatter plot with panning and zooming Shows a scatter plot of a set of random points, with basic Chaco panning and zooming. Interacting with the plot: - Left-mouse-drag pans the plot. - Mouse wheel up and down zooms the plot in and out. - Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow and alt-right-arrow moves you forwards and backwards through the "zoom history". """ # Major library imports from numpy import sort from numpy.random import random # Enthought library imports from enable.api import Component, ComponentEditor from traits.api import HasTraits, Instance from traitsui.api import Item, Group, View # Chaco imports from chaco.api import ArrayPlotData, Plot from chaco.tools.api import PanTool, ZoomTool #=============================================================================== # # Create the Chaco plot. #=============================================================================== #=============================================================================== # Attributes to use for the plot view. size = (650, 650) title = "Basic scatter plot" bg_color="lightgray" #=============================================================================== # # Demo class that is used by the demo.py application. #=============================================================================== demo = Demo() if __name__ == "__main__": demo.configure_traits() #--EOF---
[ 37811, 198, 3351, 1436, 7110, 351, 3425, 768, 290, 1976, 30602, 198, 198, 2484, 1666, 257, 41058, 7110, 286, 257, 900, 286, 4738, 2173, 11, 220, 198, 4480, 4096, 609, 10602, 3425, 768, 290, 1976, 30602, 13, 198, 198, 9492, 27362, 351,...
4.18306
366
""" Central configuration module of webstr selenium tests. This module provides configuration options along with default values and function to redefine values. """ # Copyright 2016 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import sys SELENIUM_LOG_LEVEL = logging.INFO SCHEME = 'https' PORT = 443 BROWSER = 'Firefox' BROWSER_VERSION = '' BROWSER_PLATFORM = 'ANY' SELENIUM_SERVER = None SELENIUM_PORT = 4444 BROWSER_WIDTH = 1280 BROWSER_HEIGHT = 1024 def update_value(key_name, value, force=False): """ Update single value of this config module. """ this_module = sys.modules[__name__] key_name = key_name.upper() # raise AttributeError if we try to define new value (unless force is used) if not force: getattr(this_module, key_name) setattr(this_module, key_name, value)
[ 37811, 198, 30645, 8398, 8265, 286, 3992, 2536, 384, 11925, 1505, 5254, 13, 198, 198, 1212, 8265, 3769, 8398, 3689, 1863, 351, 4277, 3815, 290, 198, 8818, 284, 34087, 500, 3815, 13, 198, 37811, 198, 198, 2, 15069, 1584, 2297, 10983, 1...
3.191943
422
import random
[ 11748, 4738, 628 ]
5
3
import RandomCharacter # Defined in Listing 6.9 def main(): """Main.""" # Create a list of characters chars = createList() # Display the list print("The lowercase letters are:") displayList(chars) # Count the occurrences of each letter counts = countLetters(chars) # Display counts print("The occurrences of each letter are:") displayCounts(counts) def createList(): """Create a list of characters.""" # Create an empty list chars = [] # Create lowercase letters randomly and add them to the list for i in range(100): chars.append(RandomCharacter.getRandomLowerCaseLetter()) # Return the list return chars def displayList(chars): """Display the list of characters.""" # Display the characters in the list 20 on each line for i in range(len(chars)): if (i + 1) % 20 == 0: print(chars[i]) else: print(chars[i], end=' ') def countLetters(chars): """Count the occurrences of each letter.""" # Create a list of 26 integers with initial value 0 counts = 26 * [0] # For each lowercase letter in the list, count it for i in range(len(chars)): counts[ord(chars[i]) - ord('a')] += 1 return counts def displayCounts(counts): """Display counts.""" for i in range(len(counts)): if (i + 1) % 10 == 0: print(counts[i], chr(i + ord('a'))) else: print(counts[i], chr(i + ord('a')), end=' ') print() main() # Call the main function
[ 11748, 14534, 27275, 220, 1303, 2896, 1389, 287, 7343, 278, 718, 13, 24, 628, 198, 4299, 1388, 33529, 198, 220, 220, 220, 37227, 13383, 526, 15931, 198, 220, 220, 220, 1303, 13610, 257, 1351, 286, 3435, 198, 220, 220, 220, 34534, 796,...
2.572139
603
""" The Vertica integration will trace queries made using the vertica-python library. Vertica will be automatically instrumented with ``patch_all``, or when using the ``ls-trace-run`` command. Vertica is instrumented on import. To instrument Vertica manually use the ``patch`` function. Note the ordering of the following statements:: from ddtrace import patch patch(vertica=True) import vertica_python # use vertica_python like usual To configure the Vertica integration globally you can use the ``Config`` API:: from ddtrace import config, patch patch(vertica=True) config.vertica['service_name'] = 'my-vertica-database' To configure the Vertica integration on an instance-per-instance basis use the ``Pin`` API:: from ddtrace import Pin, patch, Tracer patch(vertica=True) import vertica_python custom_tracer = Tracer() conn = vertica_python.connect(**YOUR_VERTICA_CONFIG) # override the service and tracer to be used Pin.override(conn, service='myverticaservice', tracer=custom_tracer) """ from ...utils.importlib import require_modules required_modules = ['vertica_python'] with require_modules(required_modules) as missing_modules: if not missing_modules: from .patch import patch, unpatch __all__ = [patch, unpatch]
[ 37811, 198, 464, 24417, 3970, 11812, 481, 12854, 20743, 925, 1262, 262, 9421, 3970, 12, 29412, 198, 32016, 13, 198, 198, 42369, 3970, 481, 307, 6338, 8875, 276, 351, 7559, 17147, 62, 439, 15506, 11, 393, 618, 1262, 198, 1169, 7559, 72...
3.262376
404
from openid.consumer.discover import OpenIDServiceEndpoint import datadriven
[ 6738, 1280, 312, 13, 49827, 13, 67, 29392, 1330, 4946, 2389, 16177, 12915, 4122, 198, 11748, 4818, 324, 380, 574, 198 ]
3.666667
21
from .base import * SECRET_KEY = get_env_var('SECRET_KEY') CSRF_COOKIE_SECURE = True SESSION_COOKIE_SECURE = True TEMPLATE_LOADERS = ( ('django.template.loaders.cached.Loader', ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', )), ) EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = get_env_var('EMAIL_HOST_USER') EMAIL_HOST_PASSWORD = get_env_var('EMAIL_HOST_PASSWORD') EMAIL_PORT = 587 EMAIL_USE_TLS = True DEFAULT_FROM_EMAIL = '' USERENA_USE_HTTPS = True
[ 6738, 764, 8692, 1330, 1635, 628, 198, 23683, 26087, 62, 20373, 796, 651, 62, 24330, 62, 7785, 10786, 23683, 26087, 62, 20373, 11537, 198, 198, 7902, 32754, 62, 34, 15308, 10008, 62, 23683, 11335, 796, 6407, 198, 198, 50, 47621, 62, 3...
2.221402
271
import pandas as pd import argparse import json try: from graphviz import Digraph except: print("Note: Optional graphviz not installed") if __name__ == '__main__': parser = argparse.ArgumentParser(description='Generate Graph Viz') parser.add_argument('-f', '--output_file', type=str, help='The output file to generate a graph of', required=True) args = parser.parse_args() main(args)
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 1822, 29572, 198, 11748, 33918, 198, 28311, 25, 198, 220, 220, 220, 422, 4823, 85, 528, 1330, 7367, 1470, 198, 16341, 25, 198, 220, 220, 220, 3601, 7203, 6425, 25, 32233, 4823, 85, 528, 4...
2.582857
175
import discord import os import json import datetime import pandas as pd from dateutil.relativedelta import relativedelta from pprint import pprint import base.ColorPrint as CPrint import command.voice_log.Config_Main as CSetting
[ 11748, 36446, 198, 11748, 28686, 198, 11748, 33918, 198, 198, 11748, 4818, 8079, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 3128, 22602, 13, 2411, 265, 1572, 12514, 1330, 48993, 1572, 12514, 198, 6738, 279, 4798, 1330, 279, 4798, ...
3.507463
67
from abc import ABCMeta, abstractmethod import os from vmaf.tools.misc import make_absolute_path, run_process from vmaf.tools.stats import ListStats __copyright__ = "Copyright 2016-2018, Netflix, Inc." __license__ = "Apache, Version 2.0" import re import numpy as np import ast from vmaf import ExternalProgramCaller, to_list from vmaf.config import VmafConfig, VmafExternalConfig from vmaf.core.executor import Executor from vmaf.core.result import Result from vmaf.tools.reader import YuvReader class VmafFeatureExtractor(FeatureExtractor): TYPE = "VMAF_feature" # VERSION = '0.1' # vmaf_study; Anush's VIF fix # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr # VERSION = '0.2.1' # expose vif num/den of each scale # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case # VERSION = '0.2.2b' # expose adm_den/num_scalex # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2 VERSION = '0.2.4c' # Modify by moving motion2 to c code ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2', 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr', 'vif_num_scale0', 'vif_den_scale0', 'vif_num_scale1', 'vif_den_scale1', 'vif_num_scale2', 'vif_den_scale2', 'vif_num_scale3', 'vif_den_scale3', 'adm_num_scale0', 'adm_den_scale0', 'adm_num_scale1', 'adm_den_scale1', 'adm_num_scale2', 'adm_den_scale2', 'adm_num_scale3', 'adm_den_scale3', ] DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'vif2', 'adm2', 'adm3', 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3', ] ADM2_CONSTANT = 0 ADM_SCALE_CONSTANT = 0
[ 6738, 450, 66, 1330, 9738, 48526, 11, 12531, 24396, 198, 11748, 28686, 198, 6738, 45887, 1878, 13, 31391, 13, 44374, 1330, 787, 62, 48546, 62, 6978, 11, 1057, 62, 14681, 198, 6738, 45887, 1878, 13, 31391, 13, 34242, 1330, 7343, 29668, ...
2.086957
1,081
df['Age'].hist() #bins=30, log=True
[ 7568, 17816, 23396, 6, 4083, 10034, 3419, 1303, 65, 1040, 28, 1270, 11, 2604, 28, 17821 ]
2.1875
16
# -*- coding: utf-8 -*- """ Created by susy at 2019/11/8 """ from dao.dao import DataDao import pytz from dao.models import PanAccounts from cfg import PAN_SERVICE, MASTER_ACCOUNT_ID
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 416, 2341, 88, 379, 13130, 14, 1157, 14, 23, 198, 37811, 198, 6738, 288, 5488, 13, 67, 5488, 1330, 6060, 35, 5488, 198, 11748, 12972, 22877, 198, 67...
2.591549
71
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import torch.nn as nn from .single import attention
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 6738, 764, 29762, 1330, 3241, 198 ]
2.564103
39
import os, time, argparse from datetime import datetime from pm4py.objects.log.importer.csv import factory as csv_importer from pm4py.objects.log.exporter.xes import factory as xes_exporter from pm4py.objects.log.importer.xes import factory as xes_importer from pm4py.objects.petri.importer import pnml as pnml_importer from pm4py.evaluation.replay_fitness import factory as replay_factory from pm4py.evaluation.precision import factory as precision_factory from conf.settings import DATA_PATH WORK_PATH = os.path.abspath(os.getcwd()) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-s', '--system', help='Which system (e.g. pb_system_5_3)', required=True) parser.add_argument('-sfx', '--suffix', help='Suffix (chosen epoch, e.g. 1981)', required=True) parser.add_argument('-j', '--job', help='Job (0/1)', required=True) parser.add_argument('-pn', '--pn', help='Petri net file to evaluate', required=True) parser.add_argument('-strategy', '--strategy', help='naive/mh', required=True) args = parser.parse_args() system = args.system suffix = int(args.suffix) job = args.job pn = args.pn strategy = args.strategy if DATA_PATH is None: train_file = os.path.join(WORK_PATH, "data", "variants", system + "_train.txt") gen_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt") csv_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv") xes_file = os.path.join(WORK_PATH, "data", "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes") pn_file = os.path.join(WORK_PATH, "data", "pns", system, pn) else: train_file = os.path.join(DATA_PATH, "variants", system + "_train.txt") gen_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + ".txt") csv_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.csv") xes_file = os.path.join(DATA_PATH, "avatar", "variants", system + "_relgan_" + str(suffix) + "_j" + str(job) + "_" + strategy + "_generalization.xes") pn_file = os.path.join(DATA_PATH, "pns", system, pn) """ READ FILES AND CONVERT TO XES """ traces = readFile(train_file,gen_file, unique=True) convertToCsv(traces=traces, to_path=csv_file) time.sleep(1) log = csv_importer.import_event_log(csv_file) xes_exporter.export_log(log, xes_file) time.sleep(1) """ PERFORM MEASUREMENT ON PN AND XES""" log = xes_importer.import_log(xes_file) net, initial_marking, final_marking = pnml_importer.import_net(pn_file) fitness = replay_factory.apply(log, net, initial_marking, final_marking) print("Fitness=", fitness) precision = precision_factory.apply(log, net, initial_marking, final_marking) print("Precision=", precision) fitness = fitness["log_fitness"] generalization = 2 * ((fitness * precision) / (fitness + precision)) if strategy == "mh": print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using MH SAMPLING on suffix ", str(suffix)," ***") elif strategy == "naive": print("**** ", str(system), " Job ", str(job), " on PN ", str(pn_file), " using NAIVE SAMPLING on suffix ", str(suffix), " ***") else: raise ValueError("Unknown strategy.") print("AVATAR Generalization=", generalization)
[ 11748, 28686, 11, 640, 11, 1822, 29572, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 9114, 19, 9078, 13, 48205, 13, 6404, 13, 320, 26634, 13, 40664, 1330, 8860, 355, 269, 21370, 62, 320, 26634, 198, 6738, 9114, 19, 9078, ...
2.561246
1,445
# $ python embed.py from ctypes import cdll lib = cdll.LoadLibrary("../target/release/libembed.dylib") #=> for Mac #lib = cdll.LoadLibrary("../target/release/libembed.so") #=> for Linux lib.process() print("done!")
[ 2, 720, 21015, 11525, 13, 9078, 198, 198, 6738, 269, 19199, 1330, 269, 12736, 198, 198, 8019, 796, 269, 12736, 13, 8912, 23377, 7203, 40720, 16793, 14, 20979, 14, 8019, 20521, 13, 31739, 4943, 1303, 14804, 329, 4100, 198, 2, 8019, 796...
2.881579
76
# coding: utf-8 from __future__ import absolute_import import datetime import re import importlib import six from huaweicloudsdkcore.client import Client, ClientBuilder from huaweicloudsdkcore.exceptions import exceptions from huaweicloudsdkcore.utils import http_utils from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 11748, 4818, 8079, 198, 11748, 302, 198, 11748, 1330, 8019, 198, 198, 11748, 2237, 198, 198, 6738, 289, 84, 707, 68, 291, 75, 2778, ...
3.026549
113
import numpy as np from typing import Optional, Any from pandas import DataFrame from copy import deepcopy from abc import abstractmethod from utils import TimedGenericIterativeAlgorithm import pycsou.core as pcore import pycsou.linop as pl from pycsou.func.penalty import L1Norm from pycsou.func.loss import SquaredL2Loss from pycsou.opt.proxalgs import APGD
[ 11748, 299, 32152, 355, 45941, 198, 6738, 19720, 1330, 32233, 11, 4377, 198, 6738, 19798, 292, 1330, 6060, 19778, 198, 6738, 4866, 1330, 2769, 30073, 198, 6738, 450, 66, 1330, 12531, 24396, 198, 198, 6738, 3384, 4487, 1330, 5045, 276, 4...
3.210526
114
#!/usr/bin/env python import unittest from rdflib.graph import ConjunctiveGraph from rdflib.term import URIRef, Literal from rdflib.graph import Graph if __name__=='__main__': unittest.main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 555, 715, 395, 198, 198, 6738, 374, 67, 2704, 571, 13, 34960, 1330, 1482, 29741, 14070, 37065, 198, 6738, 374, 67, 2704, 571, 13, 4354, 1330, 37902, 4663, 891, 11, 25659, ...
2.631579
76
#!/usr/bin/env python from sinedon import dbupgrade, dbconfig import updatelib project_dbupgrade = dbupgrade.DBUpgradeTools('projectdata', drop=True) if __name__ == "__main__": updatelib_inst = updatelib.UpdateLib(project_dbupgrade) checkout_version = raw_input('Revert to checkout version, for example, 2.1 -->') if checkout_version != 'trunk': try: map((lambda x:int(x)),checkout_version.split('.')[:2]) except: print "valid versions are 'trunk', '2.1', or '2.1.2' etc" raise checkout_revision = int(raw_input('Revert to checkout revision, for example, 16500 -->')) updatelib_inst.updateDatabaseVersion(checkout_version) print "\033[35mVersion Updated in the database %s\033[0m" % checkout_version updatelib_inst.updateDatabaseRevision(checkout_revision) print "\033[35mRevision Updated in the database as %d\033[0m" % checkout_revision
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 6738, 264, 1389, 261, 1330, 20613, 929, 9526, 11, 20613, 11250, 198, 11748, 4296, 8019, 198, 198, 16302, 62, 9945, 929, 9526, 796, 20613, 929, 9526, 13, 11012, 44948, 33637, 10786, 16302...
2.873333
300
from sklearn.linear_model import LogisticRegression from fightchurn.listings.chap8.listing_8_2_logistic_regression import prepare_data, save_regression_model from fightchurn.listings.chap8.listing_8_2_logistic_regression import save_regression_summary, save_dataset_predictions
[ 6738, 1341, 35720, 13, 29127, 62, 19849, 1330, 5972, 2569, 8081, 2234, 198, 6738, 1907, 354, 700, 13, 4868, 654, 13, 354, 499, 23, 13, 4868, 278, 62, 23, 62, 17, 62, 6404, 2569, 62, 2301, 2234, 1330, 8335, 62, 7890, 11, 3613, 62, ...
3.1
90
#%% [markdown] # # Clustering classique #%% [markdown] # ## import classique import os #%% %load_ext autoreload %autoreload 2 os.chdir('/home/jovyan/work') #%% [markdown] # ## Import iss #%% from iss.tools import Config from iss.tools import Tools from iss.models import SimpleConvAutoEncoder from iss.clustering import ClassicalClustering from iss.clustering import AdvancedClustering from dotenv import find_dotenv, load_dotenv import numpy as np #%% [markdown] ### Chargement de la config #%% load_dotenv(find_dotenv()) cfg = Config(project_dir = os.getenv("PROJECT_DIR"), mode = os.getenv("MODE")) #%% [markdown] ### Chargement du modle #%% ## charger le modle model_type = 'simple_conv' cfg.get('models')[model_type]['model_name'] = 'model_colab' model = SimpleConvAutoEncoder(cfg.get('models')[model_type]) #%% [markdown] ## Chargement des images #%% filenames = Tools.list_directory_filenames('data/processed/models/autoencoder/train/k/') generator_imgs = Tools.generator_np_picture_from_filenames(filenames, target_size = (27, 48), batch = 496, nb_batch = 10) #%% pictures_id, pictures_preds = Tools.encoded_pictures_from_generator(generator_imgs, model) #%% intermediate_output = pictures_preds.reshape((pictures_preds.shape[0], 3*6*16)) #%% [markdown] # ## ACP # Rduction de la dimension #%% clustering = ClassicalClustering(cfg.get('clustering')['classical'], pictures_id, intermediate_output) #%% clustering.compute_pca() #%% [markdown] # ## Kmeans # Premiers clusters #%% clustering.compute_kmeans() clustering.compute_kmeans_centers() #%% [markdown] # ## CAH # Seconds clusters #%% clustering.compute_cah() clustering.compute_cah_labels() #%% [markdown] # ## Rsultats #%% [markdown] # ### Clusters intermediaires #%% fig = plt.figure(1, figsize=(12, 7)) plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.kmeans_labels) #%% [markdown] # ### Clusters finaux #%% plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.final_labels) #%% [markdown] # ### Sauvegarde des modles #%% clustering.save() #%% # clustering = ClassicalClustering(cfg.get('clustering')['classical']) clustering.load() #%% [markdown] # ##Visualisation des clusters #%% #%% from IPython.display import Image #%% for cl in range(0,19): print("Cluster %s" % (cl)) res_tmp = select_cluster(clustering, cl) print(len(res_tmp)) image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp[:100]] # img = Tools.display_mosaic(image_array, nrow = 10) # fig = plt.figure(1, figsize=(12, 7)) # plt.imshow(img, aspect = 'auto') # plt.show() #%% [markdown] ###Zoom sur le cluster 0 #%% res_tmp = select_cluster(clustering, 1) #%% print(len(res_tmp)) image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp] #%% Tools.display_mosaic(image_array, nrow = 18) #%% col = [1 if l == 1 else 0 for l in clustering.kmeans_labels] plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = col) #%% plt.scatter(clustering.pca_reduction[np.array(col) == 1, 0], clustering.pca_reduction[np.array(col) == 1, 1])
[ 2, 16626, 685, 4102, 2902, 60, 198, 2, 1303, 1012, 436, 1586, 1398, 2350, 198, 198, 2, 16626, 685, 4102, 2902, 60, 198, 2, 22492, 1330, 1398, 2350, 198, 11748, 28686, 198, 198, 2, 16626, 198, 4, 2220, 62, 2302, 1960, 382, 2220, 19...
2.524941
1,263
#!/usr/bin/python3 # -*- coding: utf-8 -*- # ----------------------------------------------------------- # created 02.02.2021, tkaulke # Thomas Kaulke, kaulkth@gmail.com # https://github.com/kaulketh # ----------------------------------------------------------- __author__ = "Thomas Kaulke" __email__ = "kaulketh@gmail.com" import errno import logging import os from logging.config import fileConfig # runtime location this_folder = os.path.dirname(os.path.abspath(__file__)) # define log folder related to location log_folder = os.path.join(this_folder, '../logs') # define ini and log files ini_file = 'debug.ini' info_log_file = log_folder + '/info.log' error_log_file = log_folder + '/error.log' # check if exists or create log folder try: os.makedirs(log_folder, exist_ok=True) # Python>3.2 except TypeError: try: os.makedirs(log_folder) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(log_folder): pass else: raise # setup configuration config_file = os.path.join(this_folder, ini_file) fileConfig(config_file, disable_existing_loggers=True) # create handlers handler_info = logging.FileHandler(os.path.join(this_folder, info_log_file)) handler_error = logging.FileHandler(os.path.join(this_folder, error_log_file)) # set levels handler_info.setLevel(logging.INFO) handler_error.setLevel(logging.ERROR) # create formatters and add to handlers format_info = \ logging.Formatter('%(asctime)s %(levelname)s ' '[ %(module)s.%(funcName)s linenr.%(lineno)s ] ' '%(message).180s', datefmt='%Y-%m-%d %H:%M:%S') format_error = \ logging.Formatter( '%(asctime)s %(levelname)s ' '[ %(module)s.%(funcName)s linenr.%(lineno)s ] ' '[ thread: %(threadName)s ] %(message)s') handler_info.setFormatter(format_info) handler_error.setFormatter(format_error) if __name__ == '__main__': pass
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 20368, 22369, 6329, 198, 2, 2727, 7816, 13, 2999, 13, 1238, 2481, 11, 256, 74, 2518, 365, 198, 2, 5658, 509, ...
2.488636
792
import numpy as np import pytest from mr_uplift.dataset.data_simulation import get_no_noise_data, get_simple_uplift_data, get_observational_uplift_data_1 from mr_uplift.mr_uplift import MRUplift, get_t_data from mr_uplift.keras_model_functionality import prepare_data_optimized_loss import sys import pandas as pd
[ 11748, 299, 32152, 355, 45941, 198, 11748, 12972, 9288, 198, 198, 6738, 285, 81, 62, 84, 489, 2135, 13, 19608, 292, 316, 13, 7890, 62, 14323, 1741, 1330, 651, 62, 3919, 62, 3919, 786, 62, 7890, 11, 651, 62, 36439, 62, 84, 489, 213...
2.603306
121
""" """ import unittest from example_module import COLORS, increment
[ 37811, 198, 198, 37811, 198, 198, 11748, 555, 715, 395, 198, 198, 6738, 1672, 62, 21412, 1330, 20444, 20673, 11, 18703, 628 ]
3.318182
22
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. try: import oauth2 as oauth except: oauth = None import cgi import logging import urllib from datetime import datetime from axes.decorators import watch_login import django.contrib.auth.views from django.core import urlresolvers from django.core.exceptions import SuspiciousOperation from django.contrib.auth import login, get_backends, authenticate from django.contrib.auth.models import User from django.contrib.sessions.models import Session from django.http import HttpResponseRedirect from django.utils.translation import ugettext as _ from desktop.auth import forms as auth_forms from desktop.lib.django_util import render from desktop.lib.django_util import login_notrequired from desktop.lib.django_util import JsonResponse from desktop.log.access import access_warn, last_access_map from desktop.conf import LDAP, OAUTH, DEMO_ENABLED from hadoop.fs.exceptions import WebHdfsException from useradmin.models import get_profile from useradmin.views import ensure_home_directory, require_change_password LOG = logging.getLogger(__name__) def get_current_users(): """Return dictionary of User objects and a dictionary of the user's IP address and last access time""" current_users = { } for session in Session.objects.all(): try: uid = session.get_decoded().get(django.contrib.auth.SESSION_KEY) except SuspiciousOperation: # If secret_key changed, this resolution won't work. uid = None if uid is not None: try: userobj = User.objects.get(pk=uid) current_users[userobj] = last_access_map.get(userobj.username, { }) except User.DoesNotExist: LOG.debug("User with id=%d does not exist" % uid) return current_users def dt_logout(request, next_page=None): """Log out the user""" username = request.user.get_username() request.audit = { 'username': username, 'operation': 'USER_LOGOUT', 'operationText': 'Logged out user: %s' % username } backends = get_backends() if backends: for backend in backends: if hasattr(backend, 'logout'): response = backend.logout(request, next_page) if response: return response return django.contrib.auth.views.logout(request, next_page) def profile(request): """ Dumps JSON for user-profile information. """ return render(None, request, _profile_dict(request.user)) # OAuth is based on Twitter as example.
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 49962, 284, 1012, 280, 1082, 64, 11, 3457, 13, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198...
3.164032
1,012
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from collections import OrderedDict from moz_sql_parser import parse as parse_sql import pyparsing import re from six.moves.urllib import parse FROM_REGEX = re.compile(' from ("http.*?")', re.IGNORECASE)
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 17268, 1330, ...
3.293578
109
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import json from pathlib import Path import numpy as np import torch from PIL import Image from panopticapi.utils import rgb2id # from util.box_ops import masks_to_boxes from .construction import make_construction_transforms import logging
[ 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 1439, 6923, 33876, 198, 11748, 33918, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 6738, 350, 4146, 1330, 7412, 198...
3.613636
88
#!python """ ANNOTATE FUNCTIONS WITH TIME AND SPACE COMPLEXITY!!!!! """ def linear_search(array, item): """return the first index of item in array or None if item is not found""" return linear_search_iterative(array, item) # return linear_search_recursive(array, item) def linear_search_iterative(array, item): """Time complexity: O(n) because you iterate through n amount of items in array Space Complexity: O(n) because there are n amount of items""" # loop over all array values until item is found for index, value in enumerate(array): #O(n) if item == value: #O(1) return index # found O(1) return None # not found O(1) def linear_search_recursive(array, item, index=0): """Time complexity: O(n) because you are returning the function continuously until index equals to nth-item """ if len(array) <= index: return index if array[index] == item: return index else: return linear_search_recursive(array, item, index + 1) def binary_search(array, item): """return the index of item in sorted array or None if item is not found""" return binary_search_iterative(array, item) # return binary_search_recursive(array, item) def binary_search_iterative(array, item): """Time Complexity: O(log*n) because you are constantly dividing the length of array by 2 until array length is 1 Space Complexity: O(1) """ left, right = 0, len(array) - 1 if len(array) == 0: return None while left <= right: middle = left + (right - left) // 2 if item == array[middle]: return middle elif item > array[middle]: left = middle + 1 else: right = middle - 1 return None def binary_search_recursive(array, item, left=None, right=None): """Time Complexity: O(log*n) Space Complexity: 0(log*n) recursion call stack space""" # TODO: implement binary search recursively here if left is None and right is None: left, right = 0, len(array) - 1 middle = left + (right - left) // 2 if left > right: return None if array[middle] == item: return middle elif item > array[middle]: return binary_search_recursive(array, item, middle + 1, right) else: return binary_search_recursive(array, item, left, middle - 1)
[ 2, 0, 29412, 198, 198, 37811, 198, 1565, 11929, 6158, 29397, 4177, 11053, 13315, 20460, 5357, 37253, 49269, 55, 9050, 50184, 198, 198, 37811, 628, 628, 198, 4299, 14174, 62, 12947, 7, 18747, 11, 2378, 2599, 198, 220, 220, 220, 37227, ...
2.631579
931
#!/usr/bin/python ''' memory class stored in sqlite data base holds raw input and memories in parse taged columns ''' import sys import re import sqlite3 import os from datetime import date, datetime from pattern.en import parse from pattern.en import pprint from pattern.en import parsetree from pattern.en import wordnet from pattern.en import pluralize, singularize from pattern.en import conjugate, lemma, lexeme #dir = os.path.dirname(os.path.abspath(__file__)) dir = '/home/erni/catkin_ws/src/max_ros/max_ai/src/max_ai/' RM = sqlite3.connect(dir +'robbie_memory.sqlite') #RM = sqlite3.connect(dir + '/data/robbie_memory.db') cursor = RM.cursor() # Information about a single concept # Robbie memory class. Collection of concepts
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 7061, 6, 198, 31673, 1398, 220, 198, 301, 1850, 287, 44161, 578, 1366, 2779, 198, 2946, 82, 8246, 5128, 290, 9846, 287, 21136, 256, 1886, 15180, 198, 198, 7061, 6, 198, 198, 11748, 250...
3
248
#!/usr/bin/env python '''imshow with masked array input and out-of-range colors. The second subplot illustrates the use of BoundaryNorm to get a filled contour effect. ''' from pylab import * from numpy import ma import matplotlib.colors as colors delta = 0.025 x = y = arange(-3.0, 3.0, delta) X, Y = meshgrid(x, y) Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0) Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1) Z = 10 * (Z2-Z1) # difference of Gaussians # Set up a colormap: palette = cm.gray palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha = 0.0) # to make the bad region transparent. This is the default. # If you comment out all the palette.set* lines, you will see # all the defaults; under and over will be colored with the # first and last colors in the palette, respectively. Zm = ma.masked_where(Z > 1.2, Z) # By setting vmin and vmax in the norm, we establish the # range to which the regular palette color scale is applied. # Anything above that range is colored based on palette.set_over, etc. subplot(1,2,1) im = imshow(Zm, interpolation='bilinear', cmap=palette, norm = colors.Normalize(vmin = -1.0, vmax = 1.0, clip = False), origin='lower', extent=[-3,3,-3,3]) title('Green=low, Red=high, Blue=bad') colorbar(im, extend='both', orientation='horizontal', shrink=0.8) subplot(1,2,2) im = imshow(Zm, interpolation='nearest', cmap=palette, norm = colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=256, clip = False), origin='lower', extent=[-3,3,-3,3]) title('With BoundaryNorm') colorbar(im, extend='both', spacing='proportional', orientation='horizontal', shrink=0.8) show()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 7061, 6, 320, 12860, 351, 29229, 7177, 5128, 290, 503, 12, 1659, 12, 9521, 7577, 13, 628, 220, 220, 220, 383, 1218, 850, 29487, 21290, 262, 779, 286, 30149, 560, 35393, 284, 198, 220...
2.529328
699
from pydantic import BaseModel, validator, Field from typing import List, Dict from datetime import datetime
[ 198, 6738, 279, 5173, 5109, 1330, 7308, 17633, 11, 4938, 1352, 11, 7663, 198, 6738, 19720, 1330, 7343, 11, 360, 713, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198 ]
3.793103
29
import discord from discord.ext import commands import requests import random from box import Box
[ 11748, 36446, 198, 6738, 36446, 13, 2302, 1330, 9729, 198, 11748, 7007, 198, 11748, 4738, 198, 6738, 3091, 1330, 8315, 198 ]
4.666667
21
#!/usr/bin/python ################################################################################ # 20bdcef0-5cc5-11e4-af55-00155d01fe08 # # Justin Dierking # justindierking@hardbitsolutions.com # phnomcobra@gmail.com # # 10/24/2014 Original Construction ################################################################################
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 29113, 29113, 14468, 198, 2, 1160, 17457, 344, 69, 15, 12, 20, 535, 20, 12, 1157, 68, 19, 12, 1878, 2816, 12, 405, 18742, 67, 486, 5036, 2919, 198, 2, 198, 2, 10799, 360, 959, 3364, 198...
4.060241
83
from django.urls import path from django.contrib.auth import views as auth_views from . import views urlpatterns = [ path('register/', views.register, name='register'), path('login/', views.userlogin, name='login'), path('logout/', views.userlogout, name='logout'), path('password_change/', auth_views.PasswordChangeView.as_view(), name='password_change'), path('password_change/done/', auth_views.PasswordChangeDoneView.as_view(), name='password_change_done'), path('password_reset/', auth_views.PasswordResetView.as_view(), name='password_reset'), path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'), path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'), path('reset/done/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'), path('applicantdashboard/', views.applicantdashboard, name='applicantdashboard'), path('recruiterdashboard/', views.recruiterdashboard, name='recruiterdashboard'), path('applicantdashboard/profile-edit/', views.applicantedit, name='editapplicantprofile'), path('recruiterdashboard/profile-edit/', views.recruiteredit, name='editrecruiterprofile'), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 5009, 355, 6284, 62, 33571, 198, 6738, 764, 1330, 5009, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 30238...
2.660784
510
from datetime import datetime from difflib import unified_diff from logging import basicConfig, getLogger, INFO import os from pathlib import Path import shutil import subprocess import sys import yaml from urllib.parse import urlparse from notebook import notebookapp from IPython.core.display import HTML WORKDIR = 'edit' META_YML = '.vcp-meta.yml' MOODLE_DIR = '/opt/moodle' CONF_RELATIVE = '/etc' ENV_INHERIT = ['VAULT_ADDR', 'VAULT_TOKEN', 'PATH', 'REQUESTS_CA_BUNDLE'] logger = getLogger(__name__) basicConfig(level=INFO, format='%(message)s')
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 814, 8019, 1330, 22706, 62, 26069, 198, 6738, 18931, 1330, 4096, 16934, 11, 651, 11187, 1362, 11, 24890, 198, 11748, 28686, 198, 6738, 3108, 8019, 1330, 10644, 198, 11748, 4423, 346, 198, 1...
2.818182
209
""" Minimum edit distance computes the cost it takes to get from one string to another string. This implementation uses the Levenshtein distance with a cost of 1 for insertions or deletions and a cost of 2 for substitutions. Resource: https://en.wikipedia.org/wiki/Edit_distance For example, getting from "intention" to "execution" is a cost of 8. minimum_edit_distance("intention", "execution") # 8 """
[ 37811, 198, 44046, 4370, 5253, 552, 1769, 262, 1575, 340, 2753, 284, 651, 422, 530, 4731, 284, 1194, 4731, 13, 220, 198, 1212, 7822, 3544, 262, 1004, 574, 1477, 22006, 5253, 351, 257, 1575, 286, 352, 329, 7550, 507, 393, 28128, 507, ...
3.743119
109
from operator import attrgetter import logging import os import shutil import subprocess import pyfastaq import pymummer from cluster_vcf_records import vcf_record from varifier import utils # We only want the .snps file from the dnadiff script from MUMmer. From reading # the docs inspecting that script, we need to run these commands: # # nucmer --maxmatch --delta out.delta ref.fasta query.fasta # delta-filter -1 out.delta > out.1delta # show-snps -rlTHC out.1delta > out.snps # # This is instead of just running show-snps, which runs several other commands # in addition to making the snps file. def _snps_file_to_vcf(snps_file, query_fasta, outfile): """Loads the .snps file made by dnadiff. query_fasta = fasta file of query sequences. Writes a new VCF file unmerged records.""" vcf_records = {} variants = pymummer.snp_file.get_all_variants(snps_file) query_seqs = utils.file_to_dict_of_seqs(query_fasta) for variant in variants: # If the variant is reversed, it means that either the ref or query had to be # reverse complemented when aligned by mummer. Need to do the appropriate # reverse (complement) fixes so the VCF has the correct REF and ALT sequences if variant.reverse: qry_seq = pyfastaq.sequences.Fasta("x", variant.qry_base) qry_seq.revcomp() variant.qry_base = "".join(reversed(qry_seq.seq)) ref_seq = pyfastaq.sequences.Fasta("x", variant.ref_base) ref_seq.revcomp() variant.ref_base = ref_seq.seq if variant.var_type == pymummer.variant.SNP: new_record = vcf_record.VcfRecord( "\t".join( [ variant.qry_name, str(variant.qry_start + 1), ".", variant.qry_base, variant.ref_base, ".", ".", "SVTYPE=DNADIFF_SNP", "GT", "1/1", ] ) ) elif variant.var_type == pymummer.variant.DEL: # The query has sequence missing, compared to the # reference. We're making VCF records w.r.t. the # query, so this is an insertion. So need to # get the nucleotide before the insertion as well. new_record = vcf_record.VcfRecord( "\t".join( [ variant.qry_name, str(variant.qry_start + 1), ".", query_seqs[variant.qry_name][variant.qry_start], query_seqs[variant.qry_name][variant.qry_start] + variant.ref_base, ".", ".", "SVTYPE=DNADIFF_INS", "GT", "1/1", ] ) ) elif variant.var_type == pymummer.variant.INS: # The ref has sequence missing, compared to the # query. We're making VCF records w.r.t. the # query, so this is a deletion. So need to # get the nucleotide before the deletion as well. new_record = vcf_record.VcfRecord( "\t".join( [ variant.qry_name, str(variant.qry_start), ".", query_seqs[variant.qry_name][variant.qry_start - 1] + variant.qry_base, query_seqs[variant.qry_name][variant.qry_start - 1], ".", ".", "SVTYPE=DNADIFF_DEL", "GT", "1/1", ] ) ) else: raise Exception("Unknown variant type: " + str(variant)) assert ( new_record.REF == query_seqs[new_record.CHROM][ new_record.POS : new_record.POS + len(new_record.REF) ] ) if new_record.CHROM not in vcf_records: vcf_records[new_record.CHROM] = [] vcf_records[new_record.CHROM].append(new_record) for vcf_list in vcf_records.values(): vcf_list.sort(key=attrgetter("POS")) with open(outfile, "w") as f: print("##fileformat=VCFv4.2", file=f) for seq in query_seqs.values(): print(f"##contig=<ID={seq.id},length={len(seq)}>", file=f) print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tsample", file=f) for key, vcf_list in sorted(vcf_records.items()): for record in vcf_list: print(record, file=f)
[ 6738, 10088, 1330, 708, 81, 1136, 353, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 4423, 346, 198, 11748, 850, 14681, 198, 198, 11748, 12972, 7217, 30188, 198, 11748, 279, 4948, 31647, 198, 6738, 13946, 62, 85, 12993, 62, 8344, 36...
1.803236
2,719
from dataclasses import asdict, dataclass from typing import Any, Dict, List, Type
[ 6738, 4818, 330, 28958, 1330, 355, 11600, 11, 4818, 330, 31172, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 7343, 11, 5994, 628, 628, 628 ]
3.384615
26
from __future__ import absolute_import import datetime from dateutil import parser import pytz from .base import FieldFilter, DictFilterMixin, DjangoQueryFilterMixin from .queryfilter import QueryFilter WHOLE_DAY = datetime.timedelta(days=1) ONE_SECOND = datetime.timedelta(seconds=1) min_datetime = datetime.datetime.min.replace(tzinfo=pytz.utc) max_datetime = datetime.datetime.max.replace(tzinfo=pytz.utc)
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 11748, 4818, 8079, 198, 198, 6738, 3128, 22602, 1330, 30751, 198, 11748, 12972, 22877, 198, 198, 6738, 764, 8692, 1330, 7663, 22417, 11, 360, 713, 22417, 35608, 259, 11, 37770, 2...
2.971831
142
# -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """Import all torch operators.""" import torch.nn.functional as F import torch.nn as nn import torch from vega.search_space.networks.network_factory import NetworkFactory from vega.search_space.networks.net_utils import NetTypes from vega.search_space.networks.pytorch.utils.anchor_utils.anchor_target import AnchorTarget from vega.search_space.networks.pytorch.utils.bbox_utils.anchor_generator import AnchorGenerator from vega.core.common.config import Config from functools import partial import numpy as np from six.moves import map, zip from vega.search_space.networks.pytorch.losses.reduce_loss import weighted_loss def multi_apply(func, *args, **kwargs): """Multi apply. :param func: function :param args: args of function :return: result """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): """Cross entropy losses. :param pred: predict result :param label: gt label :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss """ loss = F.cross_entropy(pred, label, reduction='none') if weight is not None: weight = weight.float() loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def _expand_binary_labels(labels, label_weights, label_channels): """Expand binary labels. :param labels: labels :param label_weights: label weights :param label_channels: label channels :return: binary label and label weights """ bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 if label_weights is None: bin_label_weights = None else: bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size(0), label_channels) return bin_labels, bin_label_weights def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): """Binary cross entropy loss. :param pred: predict result :param label: gt label :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss """ if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits( pred, label.float(), weight, reduction='none') loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None): """Mask cross entropy loss. :param pred: predict result :param target: target :param label: gt label :param reduction: reduce function :param avg_factor: avg factor :return: loss """ assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None] def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Weight reduce loss. :param loss: losses :param weight: weight :param reduction: reduce function :param avg_factor: avg factor :return: loss """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) else: if reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def reduce_loss(loss, reduction): """Reduce loss compute. :param loss: losses :param reduction: reduce funtion :return: loss """ reduction_function = F._Reduction.get_enum(reduction) if reduction_function == 0: return loss elif reduction_function == 1: return loss.mean() elif reduction_function == 2: return loss.sum()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 15069, 357, 34, 8, 12131, 13, 43208, 21852, 1766, 1539, 12052, 13, 1439, 2489, 10395, 13, 198, 2, 770, 1430, 318, 1479, 3788, 26, 345, 460, 17678, 4163, 340, ...
2.790332
1,717
""" This module implements the Request class which is used to represent HTTP requests in Scrapy. See documentation in docs/topics/request-response.rst """ from w3lib.url import safe_url_string from scrapy.http.headers import Headers from scrapy.utils.python import to_bytes from scrapy.utils.trackref import object_ref from scrapy.utils.url import escape_ajax from scrapy.http.common import obsolete_setter from scrapy.utils.curl import curl_to_request_kwargs url = property(_get_url, obsolete_setter(_set_url, 'url')) body = property(_get_body, obsolete_setter(_set_body, 'body')) __repr__ = __str__ def copy(self): """Return a copy of this Request""" return self.replace() def replace(self, *args, **kwargs): """Create a new Request with the same attributes except for those given new values. """ for x in ['url', 'method', 'headers', 'body', 'cookies', 'meta', 'flags', 'encoding', 'priority', 'dont_filter', 'callback', 'errback', 'cb_kwargs']: kwargs.setdefault(x, getattr(self, x)) cls = kwargs.pop('cls', self.__class__) return cls(*args, **kwargs)
[ 37811, 198, 1212, 8265, 23986, 262, 19390, 1398, 543, 318, 973, 284, 2380, 14626, 198, 8897, 3558, 287, 1446, 2416, 88, 13, 198, 198, 6214, 10314, 287, 34165, 14, 4852, 873, 14, 25927, 12, 26209, 13, 81, 301, 198, 37811, 198, 6738, ...
2.659142
443