content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
#!/usr/bin/env python import unittest import sys import os import string import time import socket import fileinput import platform import re try: import subprocess32 as subprocess except: import subprocess import pg """ Global Values """ MYD = os.path.abspath(os.path.dirname(__file__)) mkpath = lambda *x: os.path.join(MYD, *x) UPD = os.path.abspath(mkpath('..')) if UPD not in sys.path: sys.path.append(UPD) DBNAME = "postgres" USER = os.environ.get( "LOGNAME" ) HOST = socket.gethostname() GPHOME = os.getenv("GPHOME") PGPORT = get_port() PGUSER = os.environ.get("PGUSER") if PGUSER is None: PGUSER = USER PGHOST = os.environ.get("PGHOST") if PGHOST is None: PGHOST = HOST d = mkpath('config') if not os.path.exists(d): os.mkdir(d) def psql_run(ifile = None, ofile = None, cmd = None, flag = '-e',dbname = None, username = None, PGOPTIONS = None, host = None, port = None): ''' Run a command or file against psql. Return True if OK. @param dbname: database name @param ifile: input file @param cmd: command line @param flag: -e Run SQL with no comments (default) -a Run SQL with comments and psql notice @param username: psql user @param host : to connect to a different host @param port : port where gpdb is running @param PGOPTIONS: connects to postgres via utility mode ''' if dbname is None: dbname = DBNAME if username is None: username = PGUSER # Use the default login user if PGOPTIONS is None: PGOPTIONS = "" else: PGOPTIONS = "PGOPTIONS='%s'" % PGOPTIONS if host is None: host = "-h %s" % PGHOST else: host = "-h %s" % host if port is None: port = "" else: port = "-p %s" % port if cmd: arg = '-c "%s"' % cmd elif ifile: arg = ' < ' + ifile if not (flag == '-q'): # Don't echo commands sent to server arg = '-e < ' + ifile if flag == '-a': arg = '-f ' + ifile else: raise PSQLError('missing cmd and ifile') if ofile == '-': ofile = '2>&1' elif not ofile: ofile = '> /dev/null 2>&1' else: ofile = '> %s 2>&1' % ofile return run('%s psql -d %s %s %s -U %s %s %s %s' % (PGOPTIONS, dbname, host, port, username, flag, arg, ofile)) def run(cmd): """ Run a shell command. Return (True, [result]) if OK, or (False, []) otherwise. @params cmd: The command to run at the shell. oFile: an optional output file. mode: What to do if the output file already exists: 'a' = append; 'w' = write. Defaults to append (so that the function is backwards compatible). Yes, this is passed to the open() function, so you can theoretically pass any value that is valid for the second parameter of open(). """ p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate()[0] ret = [] ret.append(out) rc = False if p.wait() else True return (rc,ret) def read_diff(ifile, outputPath): """ Opens the diff file that is assocated with the given input file and returns its contents as a string. """ dfile = diffFile(ifile, outputPath) with open(dfile, 'r') as diff: return diff.read() hostNameAddrs = get_ip(HOST) masterPort = getPortMasterOnly() if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner = unittest.TextTestRunner(verbosity=2) ret = not runner.run(suite).wasSuccessful() sys.exit(ret)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 555, 715, 395, 198, 11748, 25064, 198, 11748, 28686, 198, 11748, 4731, 198, 11748, 640, 198, 11748, 17802, 198, 11748, 2393, 15414, 198, 11748, 3859, 198, 11748, 302, 198, 28...
2.304509
1,619
''' "+100""5e2""-123""3.1416""-1E-16""0123""12e""1a3.14""1.2.3""+-5""12e+5.4" LeetCode https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof '''
[ 7061, 6, 198, 1, 10, 3064, 15931, 20, 68, 17, 15931, 12, 10163, 15931, 18, 13, 1415, 1433, 15931, 12, 16, 36, 12, 1433, 15931, 486, 1954, 15931, 1065, 68, 15931, 16, 64, 18, 13, 1415, 15931, 16, 13, 17, 13, 18, 15931, 10, 12, ...
1.613861
101
""" Copyright 2013 Rackspace, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import collections from teeth_overlord import config from teeth_overlord.networks import neutron from teeth_overlord import tests from keystoneclient.apiclient import exceptions as keystone_exceptions from keystoneclient.v2_0 import client as keystone_client from neutronclient.common import exceptions as neutron_exceptions from neutronclient.neutron import client as neutron_client NETWORK1_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET1'], u'name': u'private', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': False, u'shared': False, u'id': u'NETWORK1', u'provider:segmentation_id': None } NETWORK2_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET2'], u'name': u'public', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': True, u'shared': False, u'id': u'NETWORK2', u'provider:segmentation_id': None } PORT1_RESPONSE = { u'status': u'ACTIVE', u'binding:host_id': u'precise64', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ], u'id': u'PORT1', u'security_groups': [], u'device_id': u'' } PORT2_RESPONSE = { u'status': u'DOWN', u'binding:host_id': u'', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound', u'device_owner': u'', u'binding:capabilities': {u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [ { u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3' } ], u'id': u'PORT2', u'security_groups': [u'SECGRP'], u'device_id': u'' } SUBNET1_RESPONSE = { u'name': u'private-subnet', u'enable_dhcp': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'10.0.0.2', u'end': u'10.0.0.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1' } SUBNET2_RESPONSE = { u'name': u'public-subnet', u'enable_dhcp': False, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'192.168.27.1', u'end': u'192.168.27.1' }, { u'start': u'192.168.27.3', u'end': u'192.168.27.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2' } SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id', u'NETWORK1'), ('name', u'private'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True) ]) ]) ]) SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id', u'NETWORK2'), ('name', u'public'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp', False) ]) ]) ]) SERIALIZED_PORT1 = collections.OrderedDict([ ('id', u'PORT1'), ('name', u''), ('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ]), ('network', SERIALIZED_NETWORK1) ])
[ 37811, 198, 15269, 2211, 37927, 13200, 11, 3457, 13, 198, 198, 26656, 15385, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 5832, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198...
1.989324
2,529
from datetime import date from django.core.cache import cache from django.db.models import Q, F from django.shortcuts import render from django.shortcuts import get_object_or_404 from django.views.generic import ListView, DetailView #from silk.profiling.profiler import silk_profile from config.models import SideBar from .models import Post, Tag, Category from comment.models import Comment ''' def post_list(request, category_id=None, tag_id=None): tag = None category = None if tag_id: post_list, tag = Post.get_by_tag(tag_id) elif category_id: post_list, category=Post.get_by_category(category_id) else: post_list = Post.latest_posts() context = { 'category': category, 'tag': tag, 'post_list': post_list, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/list.html', context=context) def post_detail(request, post_id=None): try: post = Post.objects.get(id=post_id) except Post.DoesNotExist: raise Http404('Post does not exist!') context={ 'post': post, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/detail.html', context=context) '''
[ 6738, 4818, 8079, 1330, 3128, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 23870, 1330, 12940, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 1330, 1195, 11, 376, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 42625, 14...
2.784404
436
# -*- coding: utf-8 -*- """ Created on Sat May 7 11:38:18 2016 @author: thomasbarillot VMI control """ from ctypes import cdll #slib="VMIcrtl_ext.dll" #hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%% print test.GetFilename() #%% test.setFilename('20161115_1841.dat') print test.GetFilename() #%% test.StartAcquisitionPrev() #%% test.StopAcquisition() #%% img=test.RecallImagePrev() #%% import numpy as np print np.shape(img) a=np.array(img) print a #%% from matplotlib import pyplot as plt #%% b=np.reshape(a,[400,400]) print b plt.figure() plt.pcolor(np.reshape(a,[400,400]))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 7031, 1737, 220, 767, 1367, 25, 2548, 25, 1507, 1584, 198, 198, 31, 9800, 25, 294, 16911, 5657, 359, 313, 198, 198, 53, 8895, 1630, 198, 37811,...
2.255474
274
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import llnl.util.tty as tty from spack import * import spack.architecture import os
[ 2, 15069, 2211, 12, 42334, 13914, 45036, 3549, 2351, 4765, 11, 11419, 290, 584, 198, 2, 1338, 441, 4935, 34152, 13, 4091, 262, 1353, 12, 5715, 27975, 38162, 9947, 2393, 329, 3307, 13, 198, 2, 198, 2, 30628, 55, 12, 34156, 12, 33234,...
3.275862
87
# -*- coding: utf-8 -*- # Copyright (c) 2014, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # Adapted from PyQtGraph import sys from . import ptime from .. import config
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 1946, 11, 6911, 9078, 7712, 4816, 13, 198, 2, 4307, 6169, 739, 262, 357, 3605, 8, 347, 10305, 13789, 13, 4091, 38559, 24290, 13, 14116, 329, 51...
3.097222
72
from django.contrib.auth import get_user_model from djangosaml2idp.processors import BaseProcessor User = get_user_model()
[ 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 198, 6738, 42625, 648, 418, 43695, 17, 312, 79, 13, 14681, 669, 1330, 7308, 18709, 273, 198, 198, 12982, 796, 651, 62, 7220, 62, 19849, 3419, 628 ]
3.04878
41
if __name__ == '__main__': list = SingleLinkedList(); list.add(5) list.add(4) list.add(12) list.add(13) list.add(19) list.print_list(); print("List contains element 4", list.contains(4)) print("List contains element 6", list.contains(6)) print("Removing element 13", list.remove(13)) list.print_list(); print("List contains element 13", list.contains(13))
[ 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1351, 796, 14206, 11280, 276, 8053, 9783, 198, 220, 220, 220, 1351, 13, 2860, 7, 20, 8, 198, 220, 220, 220, 1351, 13, 2860, 7, 19, 8, 198, 2...
2.466667
165
__all__ = ["data_setup", "chart_params", "base_params"]
[ 834, 439, 834, 796, 14631, 7890, 62, 40406, 1600, 366, 40926, 62, 37266, 1600, 366, 8692, 62, 37266, 8973 ]
2.894737
19
from datetime import datetime from typing import Any, List, Optional, Union from pydantic import BaseModel, Field, HttpUrl, validator from pydantic.dataclasses import dataclass class Result(BaseModel): url_key: str = Field(alias="urlkey") timestamp: datetime url: str mime: str mime_detected: str = Field(alias="mime-detected") status: int digest: str length: int offset: int filename: str languages: Optional[str] encoding: Optional[str] index_id: Optional[str] body: Optional[ResultBody] meta: Optional[ResultMeta]
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 19720, 1330, 4377, 11, 7343, 11, 32233, 11, 4479, 198, 198, 6738, 279, 5173, 5109, 1330, 7308, 17633, 11, 7663, 11, 367, 29281, 28165, 11, 4938, 1352, 198, 6738, 279, 5173, 5109, 13, 1960...
2.777251
211
"""Tools for managing OS errors. """ from __future__ import print_function from __future__ import unicode_literals import errno from contextlib import contextmanager import sys import platform from . import errors from six import reraise _WINDOWS_PLATFORM = platform.system() == 'Windows' # Stops linter complaining about invalid class name convert_os_errors = _ConvertOSErrors
[ 37811, 33637, 329, 11149, 7294, 8563, 13, 198, 37811, 198, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 11748, 11454, 3919, 198, 6738, 4732, 8019, 1330, 4...
3.685714
105
#!/usr/bin/python # Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. """ @authors: Sergei Garbuzov @status: Development @version: 1.1.0 """ import time import json from pysdn.controller.controller import Controller from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600 from pysdn.common.status import STATUS from pysdn.common.utils import load_dict_from_file if __name__ == "__main__": vr_demo_3()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 2, 15069, 357, 66, 8, 1853, 11, 220, 11177, 4503, 19266, 48811, 2149, 18421, 36230, 50, 11, 19387, 198, 198, 2, 1439, 2489, 10395, 13, 198, 198, 2, 2297, 396, 3890, 290, 779, 287, 27...
3.442652
558
# @Author: Stijn Van Hulle <stijnvanhulle> # @Date: 2016-11-28T13:51:38+01:00 # @Email: me@stijnvanhulle.be # @Last modified by: stijnvanhulle # @Last modified time: 2016-12-20T12:51:07+01:00 # @License: stijnvanhulle.be #!/usr/bin/env python import time import datetime import math import sys import json import paho.mqtt.client as mqtt import paho.mqtt.publish as publish import lib.faceDetection as faceDetection import lib.levelCalculation as levelCalculation MQTT_BROKER="localhost" client = mqtt.Client() #classes if __name__ == '__main__': try: if len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else: input_text = input("Ip of MQTT-broker: ") if input_text: MQTT_BROKER=input_text #executor = ProcessPoolExecutor(2) #loop = trollius.get_event_loop() #_main = trollius.async(loop.run_in_executor(executor, main)) main() except (TypeError) as ex: error="Error: " + str(ex) #print(error) except (KeyboardInterrupt): exit() print("\nIOT is afgesloten\n") sys.exit(0) except (SystemExit): print("\nIOT is geforceert afgelosten\n")
[ 2, 2488, 13838, 25, 520, 48848, 6656, 367, 377, 293, 1279, 301, 48848, 10438, 71, 377, 293, 29, 198, 2, 2488, 10430, 25, 220, 220, 1584, 12, 1157, 12, 2078, 51, 1485, 25, 4349, 25, 2548, 10, 486, 25, 405, 198, 2, 2488, 15333, 25...
2.319829
469
import re import os from bs4 import BeautifulSoup from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.files import File from pages.models import Page, Image PEP_TEMPLATE = 'pages/pep-page.html' pep_url = lambda num: 'dev/peps/pep-{}/'.format(num) def check_paths(): """ Checks to ensure our PEP_REPO_PATH is setup correctly """ if not hasattr(settings, 'PEP_REPO_PATH'): raise ImproperlyConfigured("No PEP_REPO_PATH in settings") if not os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured("PEP_REPO_PATH in settings does not exist") def convert_pep0(): """ Take existing generated pep-0000.html and convert to something suitable for a Python.org Page returns the core body HTML necessary only """ check_paths() pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content = open(pep0_path).read() soup = BeautifulSoup(pep0_content) body_children = list(soup.body.children) # Grab header and PEP body header = body_children[3] pep_content = body_children[7] # Fix PEP links body_links = pep_content.find_all("a") pep_href_re = re.compile(r'pep-(\d+)\.html') for b in body_links: m = pep_href_re.search(b.attrs['href']) # Skip anything not matching 'pep-XXXX.html' if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) # Remove Version from header header_rows = header.find_all('th') for t in header_rows: if 'Version:' in t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return ''.join([header.prettify(), pep_content.prettify()]) def get_pep0_page(commit=True): """ Using convert_pep0 above, create a CMS ready pep0 page and return it pep0 is used as the directory index, but it's also an actual pep, so we return both Page objects. """ pep0_content = convert_pep0() pep0_page, _ = Page.objects.get_or_create(path='dev/peps/') pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/') for page in [pep0_page, pep0000_page]: page.content = pep0_content page.content_markup_type = 'html' page.title = "PEP 0 -- Index of Python Enhancement Proposals (PEPs)" page.template_name = PEP_TEMPLATE if commit: page.save() return pep0_page, pep0000_page def fix_headers(soup, data): """ Remove empty or unwanted headers and find our title """ header_rows = soup.find_all('th') for t in header_rows: if 'Version:' in t.text: if t.next_sibling.text == '$Revision$': t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if 'Last-Modified:' in t.text: if '$Date$'in t.next_sibling.text: t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if t.text == 'Title:': data['title'] = t.next_sibling.text if t.text == 'Content-Type:': t.parent.extract() if 'Version:' in t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return soup, data def convert_pep_page(pep_number, content): """ Handle different formats that pep2html.py outputs """ check_paths() data = { 'title': None, } if '<html>' in content: soup = BeautifulSoup(content) data['title'] = soup.title.text if not re.search(r'PEP \d+', data['title']): data['title'] = 'PEP {} -- {}'.format( pep_number, soup.title.text, ) header = soup.body.find('div', class_="header") header, data = fix_headers(header, data) data['header'] = header.prettify() main_content = soup.body.find('div', class_="content") data['main_content'] = main_content.prettify() data['content'] = ''.join([ data['header'], data['main_content'] ]) else: soup = BeautifulSoup(content) soup, data = fix_headers(soup, data) if not data['title']: data['title'] = "PEP {} -- ".format(pep_number) else: if not re.search(r'PEP \d+', data['title']): data['title'] = "PEP {} -- {}".format( pep_number, data['title'], ) data['content'] = soup.prettify() # Fix PEP links pep_content = BeautifulSoup(data['content']) body_links = pep_content.find_all("a") pep_href_re = re.compile(r'pep-(\d+)\.html') for b in body_links: m = pep_href_re.search(b.attrs['href']) # Skip anything not matching 'pep-XXXX.html' if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) data['content'] = pep_content.prettify() hg_link = "https://hg.python.org/peps/file/tip/pep-{0}.txt".format(pep_number) data['content'] += """Source: <a href="{0}">{0}</a>""".format(hg_link) return data def get_pep_page(pep_number, commit=True): """ Given a pep_number retrieve original PEP source text, rst, or html. Get or create the associated Page and return it """ pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if not os.path.exists(pep_path): print("PEP Path '{}' does not exist, skipping".format(pep_path)) pep_content = convert_pep_page(pep_number, open(pep_path).read()) pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number)) # Remove leading zeros from PEP number for display purposes pep_number_string = str(pep_number) pep_number_string = re.sub(r'^0+', '', pep_number_string) pep_page.title = pep_content['title'] pep_page.content = pep_content['content'] pep_page.content_markup_type = 'html' pep_page.template_name = PEP_TEMPLATE if commit: pep_page.save() return pep_page
[ 11748, 302, 198, 11748, 28686, 198, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 12205, 525, 306, 16934, 1522, 198, 6738, ...
2.201369
2,776
from .core import EqualityHashKey, unzip from .parallel import fold
[ 6738, 764, 7295, 1330, 31428, 26257, 9218, 11, 555, 13344, 198, 6738, 764, 1845, 29363, 1330, 5591, 198 ]
3.777778
18
from flask import Flask app = Flask(__name__, static_folder='static') from app import routes
[ 6738, 42903, 1330, 46947, 198, 198, 1324, 796, 46947, 7, 834, 3672, 834, 11, 9037, 62, 43551, 11639, 12708, 11537, 198, 198, 6738, 598, 1330, 11926, 198 ]
3.518519
27
import os import pysatl from pysatl import CAPDU if __name__ == "__main__": #check __repr__ expected = "pysatl.CAPDU.from_hexstr('00112233015502')" capdu=None exec("capdu="+expected) assert(expected==repr(capdu)) #check well formed inputs check("00112233", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("00 11 22 33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("0x00,0x11,0x22,0x33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) #check we tolerate less well formed inputs check("00-11,22_33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("""0x00 0x11 0x22 0x33""", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("1 2 304", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases + [65536] for LC in LC_cases: for LE in LE_cases: print(LC,LE) check(*gencase(LC=LC, LE=LE))
[ 11748, 28686, 198, 198, 11748, 279, 893, 25864, 198, 6738, 279, 893, 25864, 1330, 20176, 35, 52, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 628, 198, 220, 220, 220, 1303, 9122, 11593, 260, 1050, 834, 198, 220, ...
1.865125
519
#!/usr/bin/python from math import sin, cos, tan, atan, pi, acos, sqrt, exp, log10 import sys, os import copy import random import numpy as np import multiprocessing as mp import ConfigParser sys.path.append('./bin') import mGLS, mMGLS sys.path.append('./src') from EnvGlobals import Globals import mgls_io import mgls_mc from mgls_lib import * #definitions and constants to_radians = pi/180.0 to_deg = 1.0/to_radians #------------------------- def _gls_instance_Ndim_bootstrapping(n_runs): """executes n_runs instances of MGLS for with previous data shuffle """ cpu_periodogram = list() for iter in range(n_runs): """ #shuffle RV's and their errors. Repetition is not allowed comb_rv_err = zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err) """ #allowing repetition rv = [0.0]*len(Globals.time) rv_err = [0.0]*len(Globals.time) for i in range(len(Globals.time)): index = int(random.uniform(0,len(Globals.time))) rv[i] = Globals.rv[index] rv_err[i] = Globals.rv_err[index] Globals.rv = rv Globals.rv_err = rv_err opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000) pwr_opt, fitting_coeffs, A = mgls(opt_state) cpu_periodogram.append(pwr_opt) #save the best period determination (highest power) return cpu_periodogram def fap(bootstrapping_stats, pwr): """returns FAP for a given pwr. i.e. how many realizations overcome a given power, over unit. """ return float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats): """determines which power a FAP of 1, 0.1, 0.01 % is reached """ FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS to compute in % n_bs = len(bootstrapping_stats) #sort bootstrapping_stats vector ascendently sorted_pwr = sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping): """ """ n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus available #run parallell execution try: out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() """ except ZeroDivisionError: print "Error: Zero division error. Restarted parallel bootstapping" """ #join the output bunches out_spectra = list() for cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def parallel_bootstrapping(n_bootstrapping): """ """ n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus available #run parallell execution try: out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() #join the output bunches out_spectra = list() for cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def Mdim_bootstrapping(max_pow): """ """ #n_bootstrapping = 500 #iterations bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print "\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%" print "FAP Levels:", fap_levels(bootstrapping_stats) print "Total bootstapping samples: ", len(bootstrapping_stats) return bootstrapping_stats
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 6738, 10688, 1330, 7813, 11, 8615, 11, 25706, 11, 379, 272, 11, 31028, 11, 936, 418, 11, 19862, 17034, 11, 1033, 11, 2604, 940, 198, 11748, 25064, 11, 28686, 198, 11748, 4866, 198, 11748, 4...
2.285633
1,747
#! /usr/bin/env python3 """ constants.py - Contains all constants used by the device manager Author: - Pablo Caruana (pablo dot caruana at gmail dot com) Date: 12/3/2016 """ number_of_rows = 3 # total number rows of Index Servers number_of_links = 5 # number of links to be sent to Crawler number_of_chunks = 5 # number of chunks to be sent to Index Builder number_of_comps = 10 # number of components managed by each watchdog
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 198, 220, 220, 220, 38491, 13, 9078, 532, 49850, 477, 38491, 973, 416, 262, 3335, 4706, 198, 220, 220, 220, 6434, 25, 198, 220, 220, 220, 220, 220, 220, 220, 532, 3318...
2.605263
190
import cv2 import numpy as np # This config is found by the author # modify if not the desired output XDoG_config = dict( size=0, sigma=0.6, eps=-15, phi=10e8, k=2.5, gamma=0.97 ) if __name__ == "__main__": gen_xdog_image('sample.jpg', 'dog.jpg')
[ 198, 11748, 269, 85, 17, 198, 11748, 299, 32152, 355, 45941, 198, 198, 2, 770, 4566, 318, 1043, 416, 262, 1772, 198, 2, 13096, 611, 407, 262, 10348, 5072, 198, 55, 5211, 38, 62, 11250, 796, 8633, 7, 198, 220, 220, 220, 2546, 28, ...
2.130769
130
# Original work Copyright 2018 The Google AI Language Team Authors. # Modified work Copyright 2019 Rowan Zellers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from lm.modeling import model_fn_builder, GroverConfig import tensorflow as tf from lm.dataloader import input_fn_builder import numpy as np import tempfile import h5py from google.cloud import storage flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "config_file", 'configs/base.json', "The config json file corresponding to the pre-trained news model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "Input TF example files (can be a glob or comma separated).") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") flags.DEFINE_string( "validation_name", 'preds.h5', "Name to use") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained model).") flags.DEFINE_integer( "max_seq_length", 1024, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("batch_size", 32, "Batch size used for eval") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") # This is a handy little utility so that we can save the perplexities to TPU def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1): """ :param array: Single dimension array :param target: target to search for :param return_first_match: If true, return the first index that matches, otherwise, return the last one :param default_value: Index to return if there was no match :return: index of the first match, or -1 if nothing """ assert array.ndim == 1 matching_inds = np.where(array == target)[0] if len(matching_inds) > 0: if return_first_match: return int(matching_inds[0]) else: return int(matching_inds[-1]) return default_value if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("output_dir") tf.app.run()
[ 2, 13745, 670, 15069, 2864, 383, 3012, 9552, 15417, 4816, 46665, 13, 198, 2, 40499, 670, 15069, 13130, 11314, 272, 1168, 695, 364, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, ...
3.020701
1,256
import logging import george import numpy as np from robo.priors.default_priors import DefaultPrior from robo.models.gaussian_process import GaussianProcess from robo.models.gaussian_process_mcmc import GaussianProcessMCMC from robo.maximizers.random_sampling import RandomSampling from robo.maximizers.scipy_optimizer import SciPyOptimizer from robo.maximizers.differential_evolution import DifferentialEvolution from robo.solver.bayesian_optimization import BayesianOptimization from robo.acquisition_functions.information_gain import InformationGain from robo.acquisition_functions.ei import EI from robo.acquisition_functions.marginalization import MarginalizationGPMCMC from robo.initial_design import init_latin_hypercube_sampling logger = logging.getLogger(__name__) def entropy_search(objective_function, lower, upper, num_iterations=30, maximizer="random", model="gp_mcmc", n_init=3, output_path=None, rng=None): """ Entropy search for global black box optimization problems. This is a reimplemenation of the entropy search algorithm by Henning and Schuler[1]. [1] Entropy search for information-efficient global optimization. P. Hennig and C. Schuler. JMLR, (1), 2012. Parameters ---------- objective_function: function The objective function that is minimized. This function gets a numpy array (D,) as input and returns the function value (scalar) lower: np.ndarray (D,) The lower bound of the search space upper: np.ndarray (D,) The upper bound of the search space num_iterations: int The number of iterations (initial design + BO) maximizer: {"random", "scipy", "differential_evolution"} Defines how the acquisition function is maximized. model: {"gp", "gp_mcmc"} The model for the objective function. n_init: int Number of points for the initial design. Make sure that it is <= num_iterations. output_path: string Specifies the path where the intermediate output after each iteration will be saved. If None no output will be saved to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict with all results """ assert upper.shape[0] == lower.shape[0], "Dimension miss match" assert np.all(lower < upper), "Lower bound >= upper bound" assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims = lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel prior = DefaultPrior(len(kernel) + 1) n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 if model == "gp": gp = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif model == "gp_mcmc": gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng, lower=lower, upper=upper) else: print("ERROR: %s is not a valid model!" % model) return a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI) if model == "gp": acquisition_func = a elif model == "gp_mcmc": acquisition_func = MarginalizationGPMCMC(a) if maximizer == "random": max_func = RandomSampling(acquisition_func, lower, upper, rng=rng) elif maximizer == "scipy": max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng) elif maximizer == "differential_evolution": max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng) else: print("ERROR: %s is not a valid function to maximize the acquisition function!" % maximizer) return bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min = bo.run(num_iterations) results = dict() results["x_opt"] = x_best results["f_opt"] = f_min results["incumbents"] = [inc for inc in bo.incumbents] results["incumbent_values"] = [val for val in bo.incumbents_values] results["runtime"] = bo.runtime results["overhead"] = bo.time_overhead results["X"] = [x.tolist() for x in bo.X] results["y"] = [y for y in bo.y] return results
[ 11748, 18931, 198, 11748, 4903, 3643, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 686, 2127, 13, 3448, 669, 13, 12286, 62, 3448, 669, 1330, 15161, 22442, 198, 6738, 686, 2127, 13, 27530, 13, 4908, 31562, 62, 14681, 1330, 12822, ...
2.407143
2,100
# Generated by Django 3.1.13 on 2021-10-29 11:07 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 1485, 319, 33448, 12, 940, 12, 1959, 1367, 25, 2998, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.875
32
from app.api.utils.models_mixins import Base from app.extensions import db
[ 6738, 598, 13, 15042, 13, 26791, 13, 27530, 62, 19816, 1040, 1330, 7308, 198, 6738, 598, 13, 2302, 5736, 1330, 20613, 628 ]
3.454545
22
from enum import Enum
[ 6738, 33829, 1330, 2039, 388, 628, 628 ]
3.571429
7
import sys from com.bridgelabz.utility.Utility import Utility PowerOf2().start()
[ 11748, 25064, 198, 6738, 401, 13, 10236, 25280, 397, 89, 13, 315, 879, 13, 18274, 879, 1330, 34030, 198, 198, 13434, 5189, 17, 22446, 9688, 3419 ]
3.115385
26
# This program scraps data from job postings on the website workinstartups.com and appends it to an excel worksheet. import os from datetime import datetime, timedelta from selenium import webdriver from app import web_scraper from app import excel job_list, last_date = [], None file_path = os.path.abspath("main.py").rstrip('/app/main.py') + '//Workbooks' + "//Job_Openings.xlsx" print("-" * 75, "-" * 75, "\n\t\t\t\t\t\t\t JOB WEB SCRAPER", "-" * 75, "-" * 75, sep="\n") print("\n") # If the Job_Openings workbook already exists then append the jobs not already in the worksheet # by checking the date of the first job in excel, since the last time the site was scraped. if os.path.isfile(file_path): print("Job_Opening excel file already exists. Loading workbook.", "-" * 75, sep="\n") workbook, worksheet = excel.load_xlsx(file_path) last_scrape_date = excel.get_first_job_date(worksheet) last_scrape_date = datetime.strptime(last_scrape_date, "%d-%b-%Y") # If not, create a new workbook and append all of the jobs posted within the month else: print("Creating new Excel workbook.", "-" * 75, sep="\n") current_date = datetime.today() date_month_ago = current_date - timedelta(weeks=4.348) # Average amount of weeks in a month last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default to midnight workbook, worksheet = excel.init_xlsx(worksheet_title="Job Openings") # Open webdriver to workinstartups.com and create soup print("Creating soup and opening Chrome webdriver", "-"*75, sep="\n") URL = "https://workinstartups.com/job-board/jobs-in/london" soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0) driver = webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click() # Scrap the jobs from workinstartups.com and update the worksheet with the found jobs print("Scraping jobs from workinstartups.com. Please wait.", "-" * 75, sep="\n") job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver) print("Scraping finished. Updating and saving Excel workbook.", "-" * 75, sep="\n") driver.close() excel.update_xlsx(worksheet, job_list) excel.save_xlsx(workbook, file_path) print("Finished!", sep="\n")
[ 2, 770, 1430, 44496, 1366, 422, 1693, 44656, 319, 262, 3052, 670, 8625, 433, 4739, 13, 785, 290, 598, 2412, 340, 284, 281, 27336, 2499, 25473, 13, 198, 198, 11748, 28686, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198,...
2.848485
792
#!/usr/bin/python2 # # Copyright (c) 2012 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # """ Some common boilerplates and helper functions for source code generation in files dgen_test_output.py and dgen_decode_output.py. """ HEADER_BOILERPLATE ="""/* * Copyright 2013 The Native Client Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can * be found in the LICENSE file. */ // DO NOT EDIT: GENERATED CODE """ NOT_TCB_BOILERPLATE="""#ifndef NACL_TRUSTED_BUT_NOT_TCB #error This file is not meant for use in the TCB #endif """ NEWLINE_STR=""" """ COMMENTED_NEWLINE_STR=""" //""" """Adds comment '// ' string after newlines.""" def ifdef_name(filename): """ Generates the ifdef name to use for the given filename""" return filename.replace("/", "_").replace(".", "_").upper() + "_" def GetNumberCodeBlocks(separators): """Gets the number of code blocks to break classes into.""" num_blocks = len(separators) + 1 assert num_blocks >= 2 return num_blocks def FindBlockIndex(filename, format, num_blocks): """Returns true if the filename matches the format with an index in the range [1, num_blocks].""" for block in range(1, num_blocks+1): suffix = format % block if filename.endswith(suffix): return block raise Exception("Can't find block index: %s" % filename) def GetDecodersBlock(n, separators, decoders, name_fcn): """Returns the (sorted) list of decoders to include in block n, assuming decoders are split using the list of separators.""" num_blocks = GetNumberCodeBlocks(separators) assert n > 0 and n <= num_blocks return [decoder for decoder in decoders if ((n == 1 or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and (n == num_blocks or not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))] def IsPrefixLeDecoder(prefix, decoder, name_fcn): """Returns true if the prefix is less than or equal to the corresponding prefix length of the decoder name.""" decoder_name = name_fcn(decoder) prefix_len = len(prefix) decoder_len = len(decoder_name) decoder_prefix = (decoder_name[0:prefix_len] if prefix_len < decoder_len else decoder_name) return prefix <= decoder_prefix
[ 2, 48443, 14629, 14, 8800, 14, 29412, 17, 198, 2, 198, 2, 15069, 357, 66, 8, 2321, 383, 12547, 20985, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460...
2.830035
859
""" Functions for loading input data. Author: Patrick Henriksen <patrick@henriksen.as> """ import os import numpy as np def load_img(path: str, img_nums: list, shape: tuple) -> np.array: """ Loads a image in the human-readable format. Args: path: The path to the to the folder with mnist images. img_nums: A list with the numbers of the images we want to load. shape: The shape of a single image. Returns: The images as a MxCx28x28 numpy array. """ images = np.zeros((len(img_nums), *shape), dtype=float) for idx, i in enumerate(img_nums): file = os.path.join(path, "image" + str(i)) with open(file, "r") as f: data = [float(pixel) for pixel in f.readlines()[0].split(",")[:-1]] images[idx, :, :] = np.array(data).reshape(*shape) return images def load_mnist_human_readable(path: str, img_nums: list) -> np.array: """ Loads a mnist image from the neurify dataset. Args: path: The path to the to the folder with mnist images. img_nums: A list with the numbers of the images we want to load. Returns: The images as a Mx28x28 numpy array. """ return load_img(path, img_nums, (28, 28)) def load_cifar10_human_readable(path: str, img_nums: list) -> np.array: """ Loads the Cifar10 images in human readable format. Args: path: The path to the to the folder with mnist images. img_nums: A list with the numbers of the images we want to load. Returns: The images as a Mx3x32x32 numpy array. """ return load_img(path, img_nums, (3, 32, 32)) def load_images_eran(img_csv: str = "../../resources/images/cifar10_test.csv", num_images: int = 100, image_shape: tuple = (3, 32, 32)) -> tuple: """ Loads the images from the eran csv. Args: The csv path Returns: images, targets """ num_images = 100 images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32) targets_array = np.zeros(num_images, dtype=int) with open(img_csv, "r") as file: for j in range(num_images): line_arr = file.readline().split(",") targets_array[j] = int(line_arr[0]) images_array[j] = [float(pixel) for pixel in line_arr[1:]] return images_array.reshape((num_images, *image_shape)), targets_array
[ 198, 37811, 198, 24629, 2733, 329, 11046, 5128, 1366, 13, 198, 198, 13838, 25, 9925, 6752, 39370, 268, 1279, 29615, 31, 831, 39370, 268, 13, 292, 29, 198, 37811, 198, 198, 11748, 28686, 198, 198, 11748, 299, 32152, 355, 45941, 628, 19...
2.24843
1,115
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'splash_screen.ui' ## ## Created by: Qt User Interface Compiler version 5.15.1 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide2.QtCore import * from PySide2.QtGui import * from PySide2.QtWidgets import *
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 29113, 29113, 14468, 198, 2235, 5178, 7560, 422, 3555, 12454, 2393, 705, 22018, 1077, 62, 9612, 13, 9019, 6, 198, 2235, 198, 2235, 15622, 416, 25, 33734, 11787, 264...
4.146552
116
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: # @param {TreeNode} root # @param {integer} sum # @return {boolean} def hasPathSum(self, root, sum): if not root: return False if not root.right and not root.left: return sum == root.val r = False l = False if root.right: r = self.hasPathSum(root.right,sum-root.val) if root.left: l = self.hasPathSum(root.left,sum-root.val) return r or l
[ 2, 30396, 329, 257, 13934, 5509, 10139, 13, 201, 2, 1398, 12200, 19667, 25, 201, 2, 220, 220, 220, 220, 825, 11593, 15003, 834, 7, 944, 11, 2124, 2599, 201, 2, 220, 220, 220, 220, 220, 220, 220, 220, 2116, 13, 2100, 796, 2124, 2...
2.058442
308
""" Collection of query wrappers / abstractions to both facilitate data retrieval and to reduce dependency on DB-specific API. """ from __future__ import print_function, division from datetime import datetime, date, timedelta import warnings import traceback import itertools import re import numpy as np import pandas.core.common as com from pandas.compat import lzip, map, zip, raise_with_traceback, string_types from pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject from pandas.tseries.tools import to_datetime #------------------------------------------------------------------------------ # Helper functions def _convert_params(sql, params): """convert sql and params args to DBAPI2.0 compliant format""" args = [sql] if params is not None: if hasattr(params, 'keys'): # test if params is a mapping args += [params] else: args += [list(params)] return args def _handle_date_column(col, format=None): if isinstance(format, dict): return to_datetime(col, **format) else: if format in ['D', 's', 'ms', 'us', 'ns']: return to_datetime(col, coerce=True, unit=format) elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer): # parse dates as timestamp format = 's' if format is None else format return to_datetime(col, coerce=True, unit=format) else: return to_datetime(col, coerce=True, format=format) def _parse_date_columns(data_frame, parse_dates): """ Force non-datetime columns to be read as such. Supports both string formatted and integer timestamp columns """ # handle non-list entries for parse_dates gracefully if parse_dates is True or parse_dates is None or parse_dates is False: parse_dates = [] if not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for col_name in parse_dates: df_col = data_frame[col_name] try: fmt = parse_dates[col_name] except TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def execute(sql, con, cur=None, params=None): """ Execute the given SQL query using the provided connection object. Parameters ---------- sql : string Query to be executed con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. cur : depreciated, cursor is obtained from connection params : list or tuple, optional List of parameters to pass to execute method. Returns ------- Results Iterable """ if cur is None: pandas_sql = pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) #------------------------------------------------------------------------------ #--- Deprecated tquery and uquery def tquery(sql, con=None, cur=None, retry=True): """ DEPRECATED. Returns list of tuples corresponding to each row in given sql query. If only one column selected, then plain list is returned. To obtain the same result in the future, you can use the following: >>> execute(sql, con, params).fetchall() Parameters ---------- sql: string SQL query to be executed con: DBAPI2 connection cur: depreciated, cursor is obtained from connection Returns ------- Results Iterable """ warnings.warn( "tquery is depreciated, and will be removed in future versions. " "You can use ``execute(...).fetchall()`` instead.", FutureWarning) cur = execute(sql, con, cur=cur) result = _safe_fetch(cur) if con is not None: try: cur.close() con.commit() except Exception as e: excName = e.__class__.__name__ if excName == 'OperationalError': # pragma: no cover print('Failed to commit, may need to restart interpreter') else: raise traceback.print_exc() if retry: return tquery(sql, con=con, retry=False) if result and len(result[0]) == 1: # python 3 compat result = list(lzip(*result)[0]) elif result is None: # pragma: no cover result = [] return result def uquery(sql, con=None, cur=None, retry=True, params=None): """ DEPRECATED. Does the same thing as tquery, but instead of returning results, it returns the number of rows affected. Good for update queries. To obtain the same result in the future, you can use the following: >>> execute(sql, con).rowcount Parameters ---------- sql: string SQL query to be executed con: DBAPI2 connection cur: depreciated, cursor is obtained from connection params: list or tuple, optional List of parameters to pass to execute method. Returns ------- Number of affected rows """ warnings.warn( "uquery is depreciated, and will be removed in future versions. " "You can use ``execute(...).rowcount`` instead.", FutureWarning) cur = execute(sql, con, cur=cur, params=params) result = cur.rowcount try: con.commit() except Exception as e: excName = e.__class__.__name__ if excName != 'OperationalError': raise traceback.print_exc() if retry: print('Looks like your connection failed, reconnecting...') return uquery(sql, con, retry=False) return result #------------------------------------------------------------------------------ #--- Read and write to DataFrames def read_sql_table(table_name, con, index_col=None, coerce_float=True, parse_dates=None, columns=None): """Read SQL database table into a DataFrame. Given a table name and an SQLAlchemy engine, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- table_name : string Name of SQL table in database con : SQLAlchemy engine Sqlite DBAPI conncection mode not supported index_col : string, optional Column to set as index coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. parse_dates : list or dict - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite columns : list List of column names to select from sql table Returns ------- DataFrame See also -------- read_sql_query : Read SQL query into a DataFrame. read_sql """ pandas_sql = PandasSQLAlchemy(con) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) if table is not None: return table else: raise ValueError("Table %s not found" % table_name, con) def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None): """Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- sql : string SQL query to be executed con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : string, optional Column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets params : list, tuple or dict, optional List of parameters to pass to execute method. parse_dates : list or dict - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite Returns ------- DataFrame See also -------- read_sql_table : Read SQL database table into a DataFrame read_sql """ pandas_sql = pandasSQL_builder(con) return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None): """ Read SQL query or database table into a DataFrame. Parameters ---------- sql : string SQL query to be executed or database table name. con : SQLAlchemy engine or DBAPI2 connection (legacy mode) Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : string, optional column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets params : list, tuple or dict, optional List of parameters to pass to execute method. parse_dates : list or dict - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite columns : list List of column names to select from sql table (only used when reading a table). Returns ------- DataFrame Notes ----- This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (and for backward compatibility) and will delegate to the specific function depending on the provided input (database table name or sql query). See also -------- read_sql_table : Read SQL database table into a DataFrame read_sql_query : Read SQL query into a DataFrame """ pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql): return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) else: return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=None): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. 'mysql' is deprecated and will be removed in future versions, but it will be further supported through SQLAlchemy engines. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. """ if if_exists not in ('fail', 'replace', 'append'): raise ValueError("'{0}' is not valid for if_exists".format(if_exists)) pandas_sql = pandasSQL_builder(con, flavor=flavor) if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label) def has_table(table_name, con, flavor='sqlite'): """ Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table con: SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. flavor: {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. 'mysql' is deprecated and will be removed in future versions, but it will be further supported through SQLAlchemy engines. Returns ------- boolean """ pandas_sql = pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) table_exists = has_table _MYSQL_WARNING = ("The 'mysql' flavor with DBAPI connection is deprecated " "and will be removed in future versions. " "MySQL will be further supported with SQLAlchemy engines.") def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): """ Convenience function to return the correct PandasSQL subclass based on the provided parameters """ # When support for DBAPI connections is removed, # is_cursor should not be necessary. try: import sqlalchemy if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) # ---- SQL without SQLAlchemy --- # Flavour specific sql strings and handler class for access to DBs without # SQLAlchemy installed # SQL type convertions for each DB _SQL_TYPES = { 'text': { 'mysql': 'VARCHAR (63)', 'sqlite': 'TEXT', }, 'float': { 'mysql': 'FLOAT', 'sqlite': 'REAL', }, 'int': { 'mysql': 'BIGINT', 'sqlite': 'INTEGER', }, 'datetime': { 'mysql': 'DATETIME', 'sqlite': 'TIMESTAMP', }, 'date': { 'mysql': 'DATE', 'sqlite': 'TIMESTAMP', }, 'bool': { 'mysql': 'BOOLEAN', 'sqlite': 'INTEGER', } } # SQL enquote and wildcard symbols _SQL_SYMB = { 'mysql': { 'br_l': '`', 'br_r': '`', 'wld': '%s' }, 'sqlite': { 'br_l': '[', 'br_r': ']', 'wld': '?' } } _SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. " "In pandas versions < 0.14, spaces were converted to " "underscores.") def get_schema(frame, name, flavor='sqlite', keys=None, con=None): """ Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : string name of SQL table flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. 'mysql' is deprecated and will be removed in future versions, but it will be further supported through SQLAlchemy engines. keys : string or sequence columns to use a primary key con: an open SQL database connection object or an SQLAlchemy engine Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. """ if con is None: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame, name, flavor, keys) pandas_sql = pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame, name) def _get_schema_legacy(frame, name, flavor, keys=None): """Old function from 0.13.1. To keep backwards compatibility. When mysql legacy support is dropped, it should be possible to remove this code """ lookup_type = lambda dtype: get_sqltype(dtype, flavor) column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) if flavor == 'sqlite': columns = ',\n '.join('[%s] %s' % x for x in column_types) else: columns = ',\n '.join('`%s` %s' % x for x in column_types) keystr = '' if keys is not None: if isinstance(keys, string_types): keys = (keys,) keystr = ', PRIMARY KEY (%s)' % ','.join(keys) template = """CREATE TABLE %(name)s ( %(columns)s %(keystr)s );""" create_statement = template % {'name': name, 'columns': columns, 'keystr': keystr} return create_statement # legacy names, with depreciation warnings and copied docs def read_frame(*args, **kwargs): """DEPRECIATED - use read_sql """ warnings.warn("read_frame is depreciated, use read_sql", FutureWarning) return read_sql(*args, **kwargs) def frame_query(*args, **kwargs): """DEPRECIATED - use read_sql """ warnings.warn("frame_query is depreciated, use read_sql", FutureWarning) return read_sql(*args, **kwargs) def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): """DEPRECIATED - use to_sql Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string con : DBAPI2 connection flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default False Write DataFrame index as a column Notes ----- This function is deprecated in favor of ``to_sql``. There are however two differences: - With ``to_sql`` the index is written to the sql database by default. To keep the behaviour this function you need to specify ``index=False``. - The new ``to_sql`` function supports sqlalchemy engines to work with different sql flavors. See also -------- pandas.DataFrame.to_sql """ warnings.warn("write_frame is depreciated, use to_sql", FutureWarning) # for backwards compatibility, set index=False when not specified index = kwargs.pop('index', False) return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists, index=index, **kwargs) # Append wrapped function docstrings read_frame.__doc__ += read_sql.__doc__ frame_query.__doc__ += read_sql.__doc__
[ 37811, 198, 36307, 286, 12405, 7917, 11799, 1220, 12531, 507, 284, 1111, 15570, 1366, 198, 1186, 380, 18206, 290, 284, 4646, 20203, 319, 20137, 12, 11423, 7824, 13, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 729...
2.621936
7,996
"""Meta is a module contains objects that will customize the behavior of python.""" from abc import ABC from abc import ABCMeta from abc import abstractmethod from typing import Any from typing import Callable import Systerm # Metaclass # Object class # List class # Dictionary class # Recreating ABC ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name in dir(ABC)}) def get_namespaces(object: Object) -> Dictionary: """Gets the namespaces of an object.""" return object.__namespaces__ def get_magics(object: Object) -> Dictionary: """Gets the magic methods of an object.""" return object.__magics__ def get_attributes(object: Object) -> Dictionary: """Gets the attributes of an object.""" return object.__attributes__ def get_publics(object: Object) -> Dictionary: """Gets the public namespaces of an object.""" return object.__publics__ def get_privates(object: Object) -> Dictionary: """Gets the private namespaces of an object.""" return object.__privates__ def get_protecteds(object: Object) -> Dictionary: """Gets the protected namespaces of an object.""" return object.__protecteds__ # Initializing Systerm.module from Systerm._setup import init_module module = init_module() # MetaMod class module.modules[__name__].__class__ = MetaMod
[ 37811, 48526, 318, 257, 8265, 4909, 5563, 326, 481, 24184, 262, 4069, 286, 21015, 526, 15931, 198, 6738, 450, 66, 1330, 9738, 198, 6738, 450, 66, 1330, 9738, 48526, 198, 6738, 450, 66, 1330, 12531, 24396, 198, 6738, 19720, 1330, 4377, ...
3.201439
417
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache 2.0 License. import infra.e2e_args import infra.ccf import infra.jsonrpc import logging from time import gmtime, strftime import csv import random from loguru import logger as LOG if __name__ == "__main__": args = infra.e2e_args.cli_args(add) args.package = args.app_script and "libluageneric" or "liblogging" run(args)
[ 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 24843, 362, 13, 15, 13789, 13, 198, 11748, 1167, 430, 13, 68, 17, 68, 62, 22046, 198, 11748, 1167, 430, 13, 535, 69, 198, 11748, 1167, 430, 1...
2.885135
148
import os import re import glob import logging import textwrap import fileinput import numpy as np from finmag.energies import Zeeman from finmag.util.helpers import norm log = logging.getLogger(name="finmag") def hysteresis(sim, H_ext_list, fun=None, **kwargs): """ Set the applied field to the first value in `H_ext_list` (which should be a list of external field vectors) and then call the relax() method. When convergence is reached, the field is changed to the next one in H_ext_list, and so on until all values in H_ext_list are exhausted. Note: The fields in H_ext_list are applied *in addition to* any Zeeman interactions that are already present in the simulation. In particular, if only one external field should be present then do not add any Zeeman interactions before calling this method. If you would like to perform a certain action (e.g. save a VTK snapshot of the magnetisation) at the end of each relaxation stage, use the sim.schedule() command with the directive 'at_end=True' as in the following example: sim.schedule('save_vtk', at_end=True, ...) sim.hysteresis(...) *Arguments* H_ext_list: list of 3-vectors List of external fields, where each field can have any of the forms accepted by Zeeman.__init__() (see its docstring for more details). fun: callable The user can pass a function here (which should accept the Simulation object as its only argument); this function is called after each relaxation and determines the return value (see below). For example, if fun = (lambda sim: sim.m_average[0]) then the return value is a list of values representing the average x-component of the magnetisation at the end of each relaxation. All other keyword arguments are passed on to the relax() method. See its documentation for details. *Return value* If `fun` is not None then the return value is a list containing an accumulation of all the return values of `fun` after each stage. Otherwise the return value is None. """ if H_ext_list == []: return # Add a new Zeeman interaction, initialised to zero. H = Zeeman((0, 0, 0)) sim.add(H) # We keep track of the current stage of the hysteresis loop. cur_stage = 0 num_stages = len(H_ext_list) res = [] try: while True: H_cur = H_ext_list[cur_stage] log.info( "Entering hysteresis stage #{} ({} out of {}). Current field: " "{}".format(cur_stage, cur_stage + 1, num_stages, H_cur)) H.set_value(H_cur) sim.relax(**kwargs) cur_stage += 1 if fun is not None: retval = fun(sim) res.append(retval) log.debug("hysteresis callback function '{}' returned " "value: {}".format(fun.__name__, retval)) except IndexError: log.info("Hysteresis is finished.") log.info("Removing the applied field used for hysteresis.") sim.remove_interaction(H.name) return res or None def hysteresis_loop(sim, H_max, direction, N, **kwargs): """ Compute a hysteresis loop. This is a specialised convenience version of the more general `hysteresis` method. It computes a hysteresis loop where the external field is applied along a single axis and changes magnitude from +H_max to -H_max and back (using N steps in each direction). The return value is a pair (H_vals, m_vals), where H_vals is the list of field strengths at which a relaxation is performed and m_vals is a list of scalar values containing, for each field value, the averaged value of the magnetisation along the axis `direction` (after relaxation has been reached). Thus the command plot(H_vals, m_vals) could be used to plot the hysteresis loop. direction -- a vector indicating the direction of the external field (will be normalised automatically) H_max -- maximum field strength N -- number of data points to compute in each direction (thus the total number of data points for the entire loop will be 2*N-1) kwargs -- any keyword argument accepted by the hysteresis() method """ d = np.array(direction) H_dir = d / norm(d) H_norms = list(np.linspace(H_max, -H_max, N)) + \ list(np.linspace(-H_max, H_max, N)) H_vals = [h * H_dir for h in H_norms] m_avg = hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs) # projected lengths of the averaged magnetisation values along the axis # `H_dir` m_vals = [np.dot(m, H_dir) for m in m_avg] return (H_norms, m_vals)
[ 11748, 28686, 198, 11748, 302, 198, 11748, 15095, 198, 11748, 18931, 198, 11748, 2420, 37150, 198, 11748, 2393, 15414, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 957, 19726, 13, 877, 70, 444, 1330, 9033, 8463, 198, 6738, 957, 19726, ...
2.600317
1,894
from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5 import QtGui, QtCore if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
[ 201, 198, 201, 198, 6738, 9485, 48, 83, 20, 1330, 33734, 14055, 11, 33734, 8205, 72, 11, 33734, 54, 312, 11407, 201, 198, 6738, 9485, 48, 83, 20, 1330, 33734, 8205, 72, 11, 33734, 14055, 201, 198, 201, 198, 201, 198, 361, 11593, 3...
2.126667
150
# Generated by Django 3.0.2 on 2020-03-17 08:44 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 17, 319, 12131, 12, 3070, 12, 1558, 8487, 25, 2598, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Controllers for miscellaneous services.""" __author__ = 'Tarashish Mishra' import base64 import json from core.controllers import base
[ 2, 15069, 2321, 3012, 3457, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, ...
3.77551
196
# Copyright (c) Microsoft Corporation # # All rights reserved. # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # compat imports from __future__ import ( absolute_import, division, print_function, unicode_literals ) from builtins import ( # noqa bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip) # stdlib imports import base64 import collections import getpass import logging import os try: import pathlib2 as pathlib except ImportError: import pathlib import tempfile import stat import subprocess # local imports from . import settings from . import util # create logger logger = logging.getLogger(__name__) util.setup_logger(logger) # global defines _SSH_KEY_PREFIX = 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX) # named tuples PfxSettings = collections.namedtuple( 'PfxSettings', ['filename', 'passphrase', 'sha1']) def get_ssh_key_prefix(): # type: (None) -> str """Get SSH key prefix :rtype: str :return: ssh key prefix """ return _SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix(): # type: (None) -> str """Get remote fs SSH key prefix :rtype: str :return: ssh key prefix for remote fs """ return _REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password(): # type: (None) -> str """Generate an RDP password :rtype: str :return: rdp password """ return base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path, prefix=None): # type: (str, str) -> tuple """Generate an ssh keypair for use with user logins :param str export_path: keypair export path :param str prefix: key prefix :rtype: tuple :return: (private key filename, public key filename) """ if util.is_none_or_empty(prefix): prefix = _SSH_KEY_PREFIX privkey = pathlib.Path(export_path, prefix) pubkey = pathlib.Path(export_path, prefix + '.pub') if privkey.exists(): old = pathlib.Path(export_path, prefix + '.old') if old.exists(): old.unlink() privkey.rename(old) if pubkey.exists(): old = pathlib.Path(export_path, prefix + '.pub.old') if old.exists(): old.unlink() pubkey.rename(old) logger.info('generating ssh key pair to path: {}'.format(export_path)) subprocess.check_call( ['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', '''''']) return (privkey, pubkey) def check_ssh_private_key_filemode(ssh_private_key): # type: (pathlib.Path) -> bool """Check SSH private key filemode :param pathlib.Path ssh_private_key: SSH private key :rtype: bool :return: private key filemode is ok """ if util.on_windows(): return True fstat = ssh_private_key.stat().st_mode modes = frozenset((stat.S_IRWXG, stat.S_IRWXO)) return not any([_mode_check(fstat, x) for x in modes]) def connect_or_exec_ssh_command( remote_ip, remote_port, ssh_private_key, username, sync=True, shell=False, tty=False, ssh_args=None, command=None): # type: (str, int, pathlib.Path, str, bool, bool, tuple, tuple) -> bool """Connect to node via SSH or execute SSH command :param str remote_ip: remote ip address :param int remote_port: remote port :param pathlib.Path ssh_private_key: SSH private key :param str username: username :param bool sync: synchronous execution :param bool shell: execute with shell :param bool tty: allocate pseudo-tty :param tuple ssh_args: ssh args :param tuple command: command :rtype: int or subprocess.Process :return: return code or subprocess handle """ if not ssh_private_key.exists(): raise RuntimeError('SSH private key file not found at: {}'.format( ssh_private_key)) # ensure file mode is set properly for the private key if not check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH private key filemode is too permissive: {}'.format( ssh_private_key)) # execute SSH command ssh_cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key), '-p', str(remote_port), ] if tty: ssh_cmd.append('-t') if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip)) if util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{} node {}:{} with key {}'.format( 'connecting to' if util.is_none_or_empty(command) else 'executing command on', remote_ip, remote_port, ssh_private_key)) if sync: return util.subprocess_with_output(ssh_cmd, shell=shell) else: return util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell, pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str) -> str """Derive a private key pem file from a pfx :param str pfxfile: pfx file :param str passphrase: passphrase for pfx :param str pemfile: path of pem file to write to :rtype: str :return: path of pem file """ if pfxfile is None: raise ValueError('pfx file is invalid') if passphrase is None: passphrase = getpass.getpass('Enter password for PFX: ') # convert pfx to pem if pemfile is None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile = f.name try: # create pem from pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase] ) except Exception: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile = None return pemfile def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str) -> str """Derive a public key pem file from a pfx :param str pfxfile: pfx file :param str passphrase: passphrase for pfx :param str pemfile: path of pem file to write to :rtype: str :return: path of pem file """ if pfxfile is None: raise ValueError('pfx file is invalid') if passphrase is None: passphrase = getpass.getpass('Enter password for PFX: ') # convert pfx to pem if pemfile is None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile = f.name try: # create pem from pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase] ) # extract public key from private key subprocess.check_call( ['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform', 'PEM', '-out', pemfile] ) except Exception: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile = None return pemfile def _parse_sha1_thumbprint_openssl(output): # type: (str) -> str """Get SHA1 thumbprint from buffer :param str buffer: buffer to parse :rtype: str :return: sha1 thumbprint of buffer """ # return just thumbprint (without colons) from the above openssl command # in lowercase. Expected openssl output is in the form: # SHA1 Fingerprint=<thumbprint> return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile, passphrase): # type: (str, str) -> str """Get SHA1 thumbprint of PFX :param str pfxfile: name of the pfx file to export :param str passphrase: passphrase for pfx :rtype: str :return: sha1 thumbprint of pfx """ if pfxfile is None: raise ValueError('pfxfile is invalid') if passphrase is None: passphrase = getpass.getpass('Enter password for PFX: ') # compute sha1 thumbprint of pfx pfxdump = subprocess.check_output( ['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin', 'pass:' + passphrase] ) proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile): # type: (str) -> str """Get SHA1 thumbprint of PEM :param str pfxfile: name of the pfx file to export :rtype: str :return: sha1 thumbprint of pem """ proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile], stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config): # type: (dict) -> str """Generate a pem and a derived pfx file :param dict config: configuration dict :rtype: str :return: sha1 thumbprint of pfx """ # gather input pemfile = settings.batch_shipyard_encryption_public_key_pem(config) pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile is None: pemfile = util.get_input('Enter public key PEM filename to create: ') if pfxfile is None: pfxfile = util.get_input('Enter PFX filename to create: ') if passphrase is None: while util.is_none_or_empty(passphrase): passphrase = getpass.getpass('Enter password for PFX: ') if len(passphrase) == 0: print('passphrase cannot be empty') privatekey = pemfile + '.key' # generate pem file with private key and no password f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() try: subprocess.check_call( ['openssl', 'req', '-new', '-nodes', '-x509', '-newkey', 'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) # extract public key from private key subprocess.check_call( ['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform', 'PEM', '-out', pemfile] ) logger.debug('created public key PEM file: {}'.format(pemfile)) # convert pem to pfx for Azure Batch service subprocess.check_call( ['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey', privatekey, '-in', f.name, '-certfile', f.name, '-passin', 'pass:', '-passout', 'pass:' + passphrase] ) logger.debug('created PFX file: {}'.format(pfxfile)) finally: # remove rsa private key file fp = pathlib.Path(privatekey) if fp.exists(): fp.unlink() # remove temp cert pem fp = pathlib.Path(f.name) if fp.exists(): fp.unlink() # get sha1 thumbprint of pfx return get_sha1_thumbprint_pfx(pfxfile, passphrase) def get_encryption_pfx_settings(config): # type: (dict) -> tuple """Get PFX encryption settings from configuration :param dict config: configuration settings :rtype: tuple :return: pfxfile, passphrase, sha1 tp """ pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) # manually get thumbprint of pfx if not exists in config if util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase is None: pfx_passphrase = getpass.getpass('Enter password for PFX: ') sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp) return PfxSettings( filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp) def _rsa_encrypt_string(data, config): # type: (str, dict) -> str """RSA encrypt a string :param str data: clear text data to encrypt :param dict config: configuration dict :rtype: str :return: base64-encoded cipher text """ if util.is_none_or_empty(data): raise ValueError('invalid data to encrypt') inkey = settings.batch_shipyard_encryption_public_key_pem(config) derived = False if inkey is None: # derive pem from pfx derived = True pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase( config) inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None) try: if inkey is None: raise RuntimeError('public encryption key is invalid') proc = subprocess.Popen( ['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext = util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if proc.returncode != 0: raise RuntimeError( 'openssl encryption failed with returncode: {}'.format( proc.returncode)) return ciphertext finally: if derived: fp = pathlib.Path(inkey) if fp.exists(): fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext, config): # type: (str, dict) -> str """RSA decrypt a string :param str ciphertext: cipher text in base64 :param dict config: configuration dict :rtype: str :return: decrypted cipher text """ if util.is_none_or_empty(ciphertext): raise ValueError('invalid ciphertext to decrypt') pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None) if pemfile is None: raise RuntimeError('cannot decrypt without valid private key') cleartext = None try: data = util.base64_decode_string(ciphertext) proc = subprocess.Popen( ['openssl', 'rsautl', '-decrypt', '-inkey', pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext = proc.communicate(input=data)[0] finally: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() return cleartext def encrypt_string(enabled, string, config): # type: (bool, str, dict) -> str """Encrypt a string :param bool enabled: if encryption is enabled :param str string: string to encrypt :param dict config: configuration dict :rtype: str :return: encrypted string if enabled """ if enabled: return _rsa_encrypt_string(string, config) else: return string
[ 2, 15069, 357, 66, 8, 5413, 10501, 198, 2, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 17168, 13789, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 198, 2, 4866, 286, 428, ...
2.397504
6,732
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import os import tensorflow as tf ''' gluoncv backbone + multi_gpu ''' # ------------------------------------------------ VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME = 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD = True # ---------------------------------------- System_config ROOT_PATH = os.path.abspath('../') print(20*"++--") print(ROOT_PATH) GPU_GROUP = "0,1,2,3,4,5,6,7" NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER = 200 SAVE_WEIGHTS_INTE = 80000 SUMMARY_PATH = ROOT_PATH + '/output/summary' TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result' INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image' INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results' if NET_NAME.startswith("resnet"): weights_name = NET_NAME elif NET_NAME.startswith("MobilenetV2"): weights_name = "mobilenet/mobilenet_v2_1.0_224" else: raise NotImplementedError PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/' # ------------------------------------------ Train config RESTORE_FROM_RPN = False IS_FILTER_OUTSIDE_BOXES = False FIXED_BLOCKS = 0 # allow 0~3 FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone USE_07_METRIC = True CUDA9 = True EVAL_THRESHOLD = 0.5 RPN_LOCATION_LOSS_WEIGHT = 1. RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0 RPN_SIGMA = 3.0 FASTRCNN_SIGMA = 1.0 MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip EPSILON = 1e-5 MOMENTUM = 0.9 BATCH_SIZE = 1 WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE) LR = 5e-4 * 2 * 1.25 * NUM_GPU * BATCH_SIZE DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000 MAX_ITERATION = 20*SAVE_WEIGHTS_INTE # -------------------------------------------- Data_preprocess_config DATASET_NAME = 'coco' # 'pascal', 'coco' PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR PIXEL_MEAN_ = [0.485, 0.456, 0.406] PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH = 1333 CLASS_NUM = 80 # --------------------------------------------- Network_config INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001 IS_ASSIGN = True # ---------------------------------------------Anchor config USE_CENTER_OFFSET = True LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6'] BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512] ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64] ANCHOR_SCALES = [1.0] ANCHOR_RATIOS = [0.5, 1., 2.0] ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]] ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0] # --------------------------------------------FPN config SHARE_HEADS = True KERNEL_SIZE = 3 RPN_IOU_POSITIVE_THRESHOLD = 0.7 RPN_IOU_NEGATIVE_THRESHOLD = 0.3 TRAIN_RPN_CLOOBER_POSITIVES = False RPN_MINIBATCH_SIZE = 256 RPN_POSITIVE_RATE = 0.5 RPN_NMS_IOU_THRESHOLD = 0.7 RPN_TOP_K_NMS_TRAIN = 12000 RPN_MAXIMUM_PROPOSAL_TARIN = 2000 RPN_TOP_K_NMS_TEST = 6000 RPN_MAXIMUM_PROPOSAL_TEST = 1000 # -------------------------------------------Fast-RCNN config ROI_SIZE = 14 ROI_POOL_KERNEL_SIZE = 2 USE_DROPOUT = False KEEP_PROB = 1.0 SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is negative FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that is train with OHEM FAST_RCNN_POSITIVE_RATE = 0.25 ADD_GTBOXES_TO_TRAIN = False
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 7297, 11, 3601, 62, 8818, 11, 4112, 62, 11748, 198, 11748, 28686, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 7061, 6, 198, 70, 2290, ...
2.281879
1,788
# # Copyright (2020) The Delta Lake Project Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import delta.exceptions as exceptions from delta.testing.utils import DeltaTestCase if __name__ == "__main__": try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=4)
[ 2, 198, 2, 15069, 357, 42334, 8, 383, 16978, 6233, 4935, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351,...
3.366197
284
print("Enter the 1st matrix") first_matrix = matrix_form() print(first_matrix) print("Enter the 2nd matrix") sec_matrix = matrix_form() print(sec_matrix) check_matrix(first_matrix,sec_matrix)
[ 198, 198, 4798, 7203, 17469, 262, 352, 301, 17593, 4943, 198, 11085, 62, 6759, 8609, 796, 17593, 62, 687, 3419, 198, 4798, 7, 11085, 62, 6759, 8609, 8, 198, 4798, 7203, 17469, 262, 362, 358, 17593, 4943, 198, 2363, 62, 6759, 8609, 7...
2.71831
71
#!/home/ubuntu/miniconda2/bin/python from __future__ import division import sys import glob, os, gc import uuid import os.path import csv import numpy as np from time import time from subprocess import (call, Popen, PIPE) from itertools import product import shutil import re import pickle from boto3.session import Session import boto3 import h5py import umap import hdbscan from keras.models import load_model from keras.models import Model from keras import backend as K from keras.utils import multi_gpu_model ##Path to Data basepath = "/home/ubuntu/" subject = sys.argv[1] with open("config.txt") as f: config = [line.rstrip() for line in f] print config[0] print config[1] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3 = boto3.client ('s3') s3.download_file('for-ndar',os.path.join("metadata/", subject + ".txt"),os.path.join(basepath,subject + ".txt")) with open(subject + ".txt") as f: Cells = [line.rstrip() for line in f] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model = multi_gpu_model(feat_extractor, gpus=2) count = 0 for cell in Cells: print(cell) cell_size=0 cell_ids = [] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids = f['ID'] for cid in cell_ids: cid = cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count == 0: X = xyz['X'] Y = xyz['Y'] Z = parallel_model.predict(X, batch_size = 128) count+=1 length = len(Y) U = [cid] * length else: X = xyz['X'] Y = np.append(Y,xyz['Y'], axis=0) z = feat_extractor.predict(X, batch_size = 128) Z = np.append(Z,z, axis=0) length = len(xyz['Y']) U = U + ([cid] * length) print(Z.shape) hf = h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y', data=Y) hf.create_dataset('Z', data=Z) hf.close() session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5')) call(['sudo', 'shutdown', '-h', 'now'])
[ 2, 48443, 11195, 14, 32230, 14, 1084, 291, 13533, 17, 14, 8800, 14, 29412, 198, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, 11748, 25064, 198, 11748, 15095, 11, 28686, 11, 308, 66, 198, 11748, 334, 27112, 198, 11748, 28686, 13, 697...
2.204893
1,308
import numpy as np from stumpff import C, S from CelestialBody import BODIES from numerical import newton, laguerre from lagrange import calc_f, calc_fd, calc_g, calc_gd def kepler_chi(chi, alpha, r0, vr0, mu, dt): ''' Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \ (1 - alpha*r0)*chi**3*S(z) + \ r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0, vr0, mu, dt): ''' Derivative of Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \ (1 - alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt): ''' Second derivative of Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \ chi*(1 - z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): ''' Solve Kepler's Equation of the universal anomaly chi using the specified numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering Students, 4 ed, Curtis. :param r_0: `iterable` (km) initial position 3-vector :param v_0: `iterable` (km/s) initial velocity 3-vector :param dt: `float` (s) time after initial state to solve for r, v as 3-vectors :param body: `CelestialBody` (--) the celestial body to use for orbital parameters :param method: `str` (--) which numerical method to use to solve Kepler's Equation :param tol: `float` (--) decimal tolerance for numerical method (default 1e-7 is IEEE 745 single precision) :param max_iters: `int` (--) maximum number of iterations in numerical method before breaking :return: (km) final position 3-vector, (km/s) final velocity 3-vector ''' VALID_METHODS = ('laguerre', 'newton') mu = body.mu # (km**3/s**2) gravitational parameter of the specified primary body r0 = np.linalg.norm(r_0) # (km) initial position magnitude v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt if method not in VALID_METHODS: print(f'Method \'{method}\' is not valid, must be one of {VALID_METHODS}.\nDefaulting to laguerre method.') chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) elif method == 'newton': chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt) else: # method == 'laguerre' chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) f = calc_f(chi, r0, alpha) g = calc_g(dt, mu, chi, alpha) r_1 = f*r_0 + g*v_0 r1 = np.linalg.norm(r_1) fd = calc_fd(mu, r1, r0, alpha, chi) gd = calc_gd(chi, r1, alpha) v_1 = fd*r_0 + gd*v_0 return r_1, v_1 def solve_kepler_E(e, Me, tol=1e-7, max_iters=100): ''' Solve Kepler's Equation in the form containing Eccentric Anomaly (E), eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital Mechanics for Engineering Students, 4 ed, Curtis. ''' # TODO: have this function make use of one of the numerical methods in numerical.py E = Me + e/2 if Me < np.pi else Me - e/2 ratio = f(E, e, Me)/fp(E, e) iters = 0 while abs(ratio) > tol and iters < max_iters: E -= ratio ratio = f(E, e, Me)/fp(E, e) iters += 1 E -= ratio converged = np.abs(ratio) <= tol return E, iters, converged def test(): ''' Test the functionality of solve_kepler_chi and solve_kepler_laguerre using Problem 3.20 from Orbital Mechanics for Engineering Students, 4 ed, Curtis. ''' # given starting information Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and all the Earth things r_0 = np.array([20000, -105000, -19000]) # (km) initial position vector v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector dt = 2*60*60 # (s) time of interest after initial time # given correct answer from textbook correct_r_1 = np.array([26338, -128750, -29656]) # (km) final position vector correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector # solve using above methods r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton') r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre') # check correctness # tolerance based on significant figures of given answers newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4) laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4) return all([newton_valid, laguerre_valid]) if __name__ == '__main__': print(test())
[ 11748, 299, 32152, 355, 45941, 198, 6738, 42497, 487, 1330, 327, 11, 311, 198, 6738, 37231, 25842, 1330, 347, 3727, 11015, 198, 6738, 29052, 1330, 649, 1122, 11, 300, 11433, 263, 260, 198, 6738, 19470, 9521, 1330, 42302, 62, 69, 11, 4...
2.358087
2,195
description = 'PGAA setup with XYZOmega sample table' group = 'basic' sysconfig = dict( datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink'] ) includes = [ 'system', 'reactor', 'nl4b', 'pressure', 'sampletable', 'pilz', 'detector', 'collimation', ] devices = dict( mcasink = device('nicos_mlz.pgaa.devices.MCASink', settypes = {'point'}, detectors = ['_60p', 'LEGe'], ), chnsink = device('nicos_mlz.pgaa.devices.CHNSink', settypes = {'point'}, detectors = ['_60p', 'LEGe'], ), csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink', settypes = {'point'}, ), ) startupcode = """ SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure) printinfo("============================================================") printinfo("Welcome to the NICOS PGAI demo setup.") printinfo("============================================================") """
[ 11213, 796, 705, 6968, 3838, 9058, 351, 41420, 57, 46, 13731, 6291, 3084, 6, 198, 198, 8094, 796, 705, 35487, 6, 198, 198, 17597, 11250, 796, 8633, 7, 198, 220, 220, 220, 19395, 2973, 796, 37250, 23209, 292, 676, 3256, 705, 1349, 82...
2.492147
382
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Support level2 operator test cases. """ import numpy as np import tvm from tvm import autotvm from tvm import relay from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type from tvm.contrib import util import topi.testing if __name__ == "__main__": test_pool1d() test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run() test_conv2d_winograd() test_conv3d_run() test_conv3d_ndhwc_run() test_bitserial_conv2d_infer_type() test_batch_flatten() test_upsampling() test_upsampling3d() test_conv2d_int8_intrinsics() test_depthwise_conv2d_int8()
[ 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 5115, 6634, 9238, 13, 220, 383, 7054,...
2.683844
718
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test Subtokenizer and string helper methods.""" import collections import tempfile import tensorflow as tf from official.nlp.transformer.utils import tokenizer if __name__ == "__main__": tf.test.main()
[ 2, 15069, 33448, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 201, 198, 2, 201, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 201, 198, 2, 345, 743, 407, 779, 428, 2393, ...
3.390438
251
from .form7_search import Form7Search from .parse_form7 import Form7Parsing
[ 6738, 764, 687, 22, 62, 12947, 1330, 5178, 22, 18243, 198, 6738, 764, 29572, 62, 687, 22, 1330, 5178, 22, 47, 945, 278, 198 ]
3.166667
24
# Copyright 2020 Soil, Inc. from soil.openstack.base import DataBase from soil.openstack.base import SourceBase
[ 2, 15069, 12131, 1406, 346, 11, 3457, 13, 198, 198, 6738, 9260, 13, 9654, 25558, 13, 8692, 1330, 6060, 14881, 198, 6738, 9260, 13, 9654, 25558, 13, 8692, 1330, 8090, 14881, 628, 220, 220, 220, 220, 198 ]
3.216216
37
# -*- coding: utf-8 -*- # Thanks to @skelsec for his awesome tool Pypykatz # Checks his project here: https://github.com/skelsec/pypykatz import codecs import traceback from lazagne.config.module_info import ModuleInfo from lazagne.config.constant import constant from pypykatz.pypykatz import pypykatz
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 201, 198, 2, 6930, 284, 2488, 82, 7750, 2363, 329, 465, 7427, 2891, 350, 4464, 48361, 27906, 201, 198, 2, 47719, 465, 1628, 994, 25, 3740, 1378, 12567, 13, 785, ...
2.773913
115
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Tests for discogs plugin. """ from __future__ import division, absolute_import, print_function import unittest from test import _common from test._common import Bag from test.helper import capture_log from beetsplug.discogs import DiscogsPlugin if __name__ == '__main__': unittest.main(defaultTest='suite')
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 770, 2393, 318, 636, 286, 307, 1039, 13, 198, 2, 15069, 1584, 11, 21462, 42168, 1559, 13, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, ...
3.807692
260
"""Queue represented by a pseudo stack (represented by a list with pop and append)"""
[ 37811, 34991, 7997, 416, 257, 24543, 8931, 357, 33469, 416, 257, 1351, 351, 1461, 290, 24443, 8, 37811, 628 ]
4.578947
19
#! /usr/bin/env python # coding: utf-8 import configparser import numpy as np import re,sys,os from graph import MyGraph from collections import OrderedDict def unique_config_sections(config_file): """Convert all config sections to have unique names. Adds unique suffixes to config sections for compability with configparser. """ from collections import defaultdict import io section_counters = defaultdict(int) output_stream = io.StringIO() with open(config_file) as fin: for line in fin: if line.startswith('['): section = line.strip().strip('[]') _section = section + '_' + str(section_counters[section]) section_counters[section] += 1 line = line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return output_stream if __name__ == '__main__': config_path = sys.argv[1] weights_path = sys.argv[2] mygraph = buildGraph(config_path, weights_path) # outputNodes = ['region_0', 'softmax_0'] stopNodes = [] inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') # mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1] + '.ncnn')
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 11748, 4566, 48610, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 302, 11, 17597, 11, 418, 198, 6738, 4823, 1330, 2011, 37065, 198, 6738...
2.472727
550
from django.contrib.auth.models import Permission, User from django.db import models
[ 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 2448, 3411, 11, 11787, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198 ]
3.541667
24
import datetime import calendar import requests import pandas as pd import json import os.path import time import MySQLdb as M from gdax_history import timestamp_to_utcstr if __name__ == "__main__": main()
[ 11748, 4818, 8079, 198, 11748, 11845, 198, 11748, 7007, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 33918, 198, 11748, 28686, 13, 6978, 198, 11748, 640, 198, 11748, 33476, 9945, 355, 337, 198, 198, 6738, 308, 67, 897, 62, 23569, ...
2.972973
74
"""Configures a Kafka Connector for Postgres Station data""" import json import logging import requests from settings import Settings logger = logging.getLogger(__name__) KAFKA_CONNECT_URL = f"{Settings.URLs.KAFKA_CONNECT_URL}/connectors" CONNECTOR_NAME = "stations" def configure_connector(): """Starts and configures the Kafka Connect connector""" logging.debug("Creating or updating kafka connect connector...") resp = requests.get(f"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}") if resp.status_code == 200: logging.debug("Connector already created skipping recreation") return config = { "connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector", "key.converter": "org.apache.kafka.connect.json.JsonConverter", "key.converter.schemas.enable": "false", "value.converter": "org.apache.kafka.connect.json.JsonConverter", "value.converter.schemas.enable": "false", "topic.prefix": "com.connect.transportation.", "connection.url": "jdbc:postgresql://postgres:5432/cta", "connection.user": "cta_admin", "connection.password": "chicago", "batch.max.rows": "500", "table.whitelist": "stations", "poll.interval.ms": "5000", # Poll every 5 seconds "mode": "incrementing", "incrementing.column.name": "stop_id", } # TODO: Complete the Kafka Connect Config below. # Directions: Use the JDBC Source Connector to connect to Postgres. Load the `stations` table # using incrementing mode, with `stop_id` as the incrementing column name. # Make sure to think about what an appropriate topic prefix would be, and how frequently Kafka # Connect should run this connector (hint: not very often!) data = json.dumps({"name": CONNECTOR_NAME, "config": config}) resp = requests.post( KAFKA_CONNECT_URL, headers={"Content-Type": "application/json"}, data=data, ) # Ensure a healthy response was given resp.raise_for_status() logging.info("-------Connector created successfully-------") if __name__ == "__main__": configure_connector()
[ 37811, 16934, 942, 257, 46906, 8113, 273, 329, 2947, 34239, 9327, 1366, 37811, 198, 11748, 33918, 198, 11748, 18931, 198, 198, 11748, 7007, 198, 6738, 6460, 1330, 16163, 198, 198, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 7, 834, 3...
2.69587
799
import io import logging import json import numpy import torch import numpy as np from tqdm import tqdm from clie.inputters import constant from clie.objects import Sentence from torch.utils.data import Dataset from torch.utils.data.sampler import Sampler logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ # Data loading # ------------------------------------------------------------------------------ def vectorize(ex, model, iseval): """Torchify a single example.""" words = ['!{}_{}'.format(ex.language, w) for w in ex.words] words = [model.word_dict[w] for w in words] knn_word = None if ex.knn_words: knn_word = [[model.word_dict[w] for w in knn] for knn in ex.knn_words] knn_word = torch.LongTensor(knn_word) word = torch.LongTensor(words) pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos]) ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner]) deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel]) assert any([x == 0 for x in ex.head]) head = torch.LongTensor(ex.head) subj_position = torch.LongTensor(ex.subj_position) obj_position = torch.LongTensor(ex.obj_position) type = [0] * len(ex.words) ttype = model.type_dict[ex.subj_type] start, end = ex.subject type[start: end + 1] = [ttype] * (end - start + 1) atype = model.type_dict[ex.obj_type] start, end = ex.object type[start: end + 1] = [atype] * (end - start + 1) type = torch.LongTensor(type) return { 'id': ex.id, 'language': ex.language, 'word': word, 'pos': pos, 'ner': ner, 'deprel': deprel, 'type': type, 'head': head, 'subject': ex.subj_text, 'object': ex.obj_text, 'subject_pos': subj_position, 'object_pos': obj_position, 'relation': model.label_dict[ex.relation], 'knn_word': knn_word } def batchify(batch): """Gather a batch of individual examples into one batch.""" # batch is a list of vectorized examples batch_size = len(batch) ids = [ex['id'] for ex in batch] language = [ex['language'] for ex in batch] use_knn = batch[0]['knn_word'] is not None # NOTE. batch[0]['knn_word'] is a 2d list knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0 # --------- Prepare Code tensors --------- max_len = max([ex['word'].size(0) for ex in batch]) # Batch Code Representations len_rep = torch.LongTensor(batch_size).fill_(constant.PAD) word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels = torch.LongTensor(batch_size) subject = [] object = [] knn_rep = None if use_knn: knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD) for i, ex in enumerate(batch): len_rep[i] = ex['word'].size(0) labels[i] = ex['relation'] word_rep[i, :len_rep[i]] = ex['word'] head_rep[i, :len_rep[i]] = ex['head'] subject_pos_rep[i, :len_rep[i]] = ex['subject_pos'] object_pos_rep[i, :len_rep[i]] = ex['object_pos'] pos_rep[i, :len_rep[i]] = ex['pos'] ner_rep[i, :len_rep[i]] = ex['ner'] deprel_rep[i, :len_rep[i]] = ex['deprel'] type_rep[i, :len_rep[i]] = ex['type'] subject.append(ex['subject']) object.append(ex['object']) if use_knn: knn_rep[i, :len_rep[i]] = ex['knn_word'] return { 'ids': ids, 'language': language, 'batch_size': batch_size, 'len_rep': len_rep, 'word_rep': word_rep, 'knn_rep': knn_rep, 'head_rep': head_rep, 'subject': subject, 'object': object, 'subject_pos_rep': subject_pos_rep, 'object_pos_rep': object_pos_rep, 'labels': labels, 'pos_rep': pos_rep, 'ner_rep': ner_rep, 'deprel_rep': deprel_rep, 'type_rep': type_rep }
[ 11748, 33245, 198, 11748, 18931, 198, 11748, 33918, 198, 11748, 299, 32152, 198, 11748, 28034, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 6738, 537, 494, 13, 15414, 1010, 1330, 6937, 198, 6...
2.257171
2,022
from distutils.extension import Extension cmdclass = {} try: # with Cython from Cython.Build import build_ext cmdclass["build_ext"] = build_ext module_src = "cgranges/python/cgranges.pyx" except ImportError: # without Cython module_src = "cgranges/python/cgranges.c" def build(setup_kwargs): """ This function is mandatory in order to build the extensions. """ setup_kwargs.update( { "ext_modules": [ Extension( "cgranges", sources=[module_src, "cgranges/cgranges.c"], depends=[ "cgranges/cgranges.h", "cgranges/khash.h", "cgranges/python/cgranges.pyx" ], include_dirs=["cgranges"] ) ], "cmdclass": cmdclass } )
[ 6738, 1233, 26791, 13, 2302, 3004, 1330, 27995, 628, 198, 28758, 4871, 796, 23884, 198, 198, 28311, 25, 198, 220, 220, 220, 1303, 351, 327, 7535, 198, 220, 220, 220, 422, 327, 7535, 13, 15580, 1330, 1382, 62, 2302, 198, 220, 220, 22...
1.817097
503
# -*- coding: utf-8 -*- from .compSpot import *
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 764, 5589, 32565, 1330, 1635, 198 ]
2.285714
21
# pylint: disable=line-too-long,too-many-lines,missing-docstring """Kinetics400 action classification dataset.""" import os import random import numpy as np from mxnet import nd from mxnet.gluon.data import dataset __all__ = ['Kinetics400']
[ 2, 279, 2645, 600, 25, 15560, 28, 1370, 12, 18820, 12, 6511, 11, 18820, 12, 21834, 12, 6615, 11, 45688, 12, 15390, 8841, 198, 37811, 49681, 14596, 7029, 2223, 17923, 27039, 526, 15931, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, ...
3.102564
78
# Copyright 2016-present CERN European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime from qf_lib.common.tickers.tickers import Ticker from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
[ 2, 220, 220, 220, 220, 15069, 1584, 12, 25579, 327, 28778, 220, 3427, 12275, 329, 19229, 4992, 198, 2, 198, 2, 220, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 220, 2...
3.349794
243
# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Action Plan <action_plan_definition>` specifies a flow of :ref:`Actions <action_definition>` that should be executed in order to satisfy a given :ref:`Goal <goal_definition>`. It also contains an estimated :ref:`global efficacy <efficacy_definition>` alongside a set of :ref:`efficacy indicators <efficacy_indicator_definition>`. An :ref:`Action Plan <action_plan_definition>` is generated by Watcher when an :ref:`Audit <audit_definition>` is successful which implies that the :ref:`Strategy <strategy_definition>` which was used has found a :ref:`Solution <solution_definition>` to achieve the :ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`. In the default implementation of Watcher, an action plan is composed of a list of successive :ref:`Actions <action_definition>` (i.e., a Workflow of :ref:`Actions <action_definition>` belonging to a unique branch). However, Watcher provides abstract interfaces for many of its components, allowing other implementations to generate and handle more complex :ref:`Action Plan(s) <action_plan_definition>` composed of two types of Action Item(s): - simple :ref:`Actions <action_definition>`: atomic tasks, which means it can not be split into smaller tasks or commands from an OpenStack point of view. - composite Actions: which are composed of several simple :ref:`Actions <action_definition>` ordered in sequential and/or parallel flows. An :ref:`Action Plan <action_plan_definition>` may be described using standard workflow model description formats such as `Business Process Model and Notation 2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML) <http://www.uml.org/>`_. To see the life-cycle and description of :ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action Plan state machine <action_plan_state_machine>`. """ import datetime from http import HTTPStatus from oslo_log import log import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.applier import rpcapi from watcher.common import exception from watcher.common import policy from watcher.common import utils from watcher import objects from watcher.objects import action_plan as ap_objects LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 2211, 2297, 10983, 11, 3457, 13, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 3415...
3.465649
1,048
from flask import * albums = Blueprint('albums', __name__, template_folder='templates')
[ 6738, 42903, 1330, 1635, 198, 198, 40916, 82, 796, 39932, 10786, 40916, 82, 3256, 11593, 3672, 834, 11, 11055, 62, 43551, 11639, 11498, 17041, 11537, 198 ]
3.423077
26
from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import redirect, render from django.urls import reverse from django.views.generic import DetailView, ListView from django.views.generic.edit import CreateView, UpdateView from dfirtrack_main.forms import DivisionForm from dfirtrack_main.logger.default_logger import debug_logger from dfirtrack_main.models import Division
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 6218, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 19816, 1040, 1330, 23093, 37374, 35608, 259, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 18941, 11, 8543, 198, 6738, 42625, 14208, ...
3.680672
119
# coding: UTF-8 import time import torch import numpy as np from train_eval import train, init_network from importlib import import_module import argparse parser = argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN') parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained') parser.add_argument('--word', default=False, type=bool, help='True for word, False for char') args = parser.parse_args() if __name__ == '__main__': dataset = 'THUCNews' # # :embedding_SougouNews.npz, :embedding_Tencent.npz, :random # embedding = 'random' model_name = args.model # TextCNN from utils import build_dataset, build_iterator, get_time_dif x = import_module('models.' + model_name) from config import Config config = Config(dataset) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic = True # start_time = time.time() print("Loading data...") vocab, train_data, dev_data, test_data = build_dataset(config, args.word) train_iter = build_iterator(train_data, config) dev_iter = build_iterator(dev_data, config) test_iter = build_iterator(test_data, config) time_dif = get_time_dif(start_time) print("Time usage:", time_dif) # train config.n_vocab = len(vocab) model = x.Model().to(config.device) init_network(model) print(model.parameters) train(config, model, train_iter, dev_iter, test_iter)
[ 2, 19617, 25, 41002, 12, 23, 198, 11748, 640, 198, 11748, 28034, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 4512, 62, 18206, 1330, 4512, 11, 2315, 62, 27349, 198, 6738, 1330, 8019, 1330, 1330, 62, 21412, 198, 11748, 1822, 29572, 1...
2.733677
582
import pytest from django.conf import settings from django.core import mail as djmail from django.core.files.uploadedfile import SimpleUploadedFile from django.urls import reverse from django_scopes import scope from rest_framework.authtoken.models import Token from pretalx.submission.models import SubmissionStates
[ 11748, 12972, 9288, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 7295, 1330, 6920, 355, 42625, 4529, 198, 6738, 42625, 14208, 13, 7295, 13, 16624, 13, 25850, 276, 7753, 1330, 17427, 41592, 276, 8979, 198, ...
3.49505
101
# MIT License # Copyright (c) 2021 Vasily Denisenko, Sergey Kuznetsov # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import mb_bsp PDU_SIZE_REG = 0 CONFIG_REG = 1 SLAVE_ADDR_REG = 2 CS_REG = 3 MB_MAX_WRITE_REGNUM = 123 MB_MAX_READ_REGNUM = 125 MB_MAX_REG_ADDR = 65535 MB_MAX_REG_VAL = 65535 MB_MAX_SLAVE_ADDR = 247 MB_MIN_SLAVE_ADDR = 1 MB_MAX_PDU_SIZE = 253 MB_MIN_PDU_SIZE = 1 FCODE_0x3 = 0x3 FCODE_0x6 = 0x6 FCODE_0x10 = 0x10 setattr(incr_err_count, 'count', 0)
[ 2, 17168, 13789, 201, 198, 201, 198, 2, 15069, 357, 66, 8, 33448, 23663, 813, 5601, 13254, 7204, 11, 36106, 509, 10277, 3262, 47272, 201, 198, 201, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727...
2.704274
585
import fix_path import json import datetime from google.appengine.ext import ndb # Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler = lambda obj: ( obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else None )
[ 11748, 4259, 62, 6978, 198, 11748, 33918, 198, 11748, 4818, 8079, 198, 198, 6738, 23645, 13, 1324, 18392, 13, 2302, 1330, 299, 9945, 198, 198, 2, 30222, 422, 2638, 1378, 25558, 2502, 11125, 13, 785, 14, 6138, 507, 14, 2231, 2816, 1795...
2.858407
113
#!/usr/bin/python3 # -*- coding: UTF-8 -*- import time busyTime = 10 idleTime = busyTime while True: start = time.clock() while time.clock() - start < busyTime: pass time.sleep(busyTime / 1000)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 198, 198, 11748, 640, 198, 198, 10885, 88, 7575, 796, 838, 198, 312, 293, 7575, 796, 8179, 7575, 198, 198, 4514, 6407, 25, 628, ...
2.344086
93
""" Module for working with named and anonymous maps .. module:: carto.maps :platform: Unix, Windows :synopsis: Module for working with named and anonymous maps .. moduleauthor:: Daniel Carrion <daniel@carto.com> .. moduleauthor:: Alberto Romeu <alrocar@carto.com> """ try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin from pyrestcli.resources import Manager, Resource from .exceptions import CartoException, CartoRateLimitException API_VERSION = "v1" NAMED_API_ENDPOINT = "api/{api_version}/map/named/" ANONYMOUS_API_ENDPOINT = "api/{api_version}/map/"
[ 37811, 198, 26796, 329, 1762, 351, 3706, 290, 11614, 8739, 198, 198, 492, 8265, 3712, 6383, 78, 13, 31803, 198, 220, 220, 1058, 24254, 25, 33501, 11, 3964, 198, 220, 220, 1058, 28869, 24608, 25, 19937, 329, 1762, 351, 3706, 290, 11614...
3.02451
204
from kv_client.kv_client import KVClient if __name__ == "__main__": main()
[ 6738, 479, 85, 62, 16366, 13, 74, 85, 62, 16366, 1330, 509, 53, 11792, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419 ]
2.46875
32
import operator_benchmark as op_bench import torch import numpy from . import configs """EmbeddingBag Operator Benchmark""" op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) if __name__ == "__main__": op_bench.benchmark_runner.main()
[ 11748, 10088, 62, 26968, 4102, 355, 1034, 62, 26968, 198, 11748, 28034, 198, 11748, 299, 32152, 198, 6738, 764, 1330, 4566, 82, 198, 198, 37811, 31567, 6048, 278, 33, 363, 35946, 25187, 4102, 37811, 198, 198, 404, 62, 26968, 13, 8612, ...
2.875969
129
"""Python interfaces to DGL farthest point sampler.""" from dgl._ffi.base import DGLError import numpy as np from .._ffi.function import _init_api from .. import backend as F from .. import ndarray as nd def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result): r"""Farthest Point Sampler Parameters ---------- data : tensor A tensor of shape (N, d) where N is the number of points and d is the dimension. batch_size : int The number of batches in the ``data``. N should be divisible by batch_size. sample_points : int The number of points to sample in each batch. dist : tensor Pre-allocated tensor of shape (N, ) for to-sample distance. start_idx : tensor of int Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch. result : tensor of int Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index. Returns ------- No return value. The input variable ``result`` will be overwriten with sampled indices. """ assert F.shape(data)[0] >= sample_points * batch_size assert F.shape(data)[0] % batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): """ Description ----------- The neighbor matching procedure of edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This procedure keeps picking an unmarked vertex and matching it with one its unmarked neighbors (that maximizes its edge weight) until no match can be done. If no edge weight is given, this procedure will randomly pick neighbor for each vertex. The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if you are not sure your graph is bi-directed. Parameters ---------- graph : HeteroGraphIndex The input homogeneous graph. num_nodes : int The number of nodes in this homogeneous graph. edge_weight : tensor, optional The edge weight tensor holding non-negative scalar weight for each edge. default: :obj:`None` relabel_idx : bool, optional If true, relabel resulting node labels to have consecutive node ids. default: :obj:`True` Returns ------- a 1-D tensor A vector with each element that indicates the cluster ID of a vertex. """ edge_weight_capi = nd.NULL["int64"] if edge_weights is not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label < 0).item() != 0: raise DGLError("Find unmatched node") # reorder node id # TODO: actually we can add `return_inverse` option for `unique` # function in backend for efficiency. if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np = np.unique(node_label_np, return_inverse=True) return F.tensor(node_label_np) else: return node_label _init_api('dgl.geometry', __name__)
[ 37811, 37906, 20314, 284, 360, 8763, 15189, 3634, 966, 6072, 20053, 526, 15931, 198, 6738, 288, 4743, 13557, 487, 72, 13, 8692, 1330, 46133, 2538, 81, 1472, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 11485, 62, 487, 72, 13, 8818, ...
2.505994
1,585
import shutil import hashlib from pathlib import Path from typing import TextIO, BinaryIO, IO, Union from datetime import datetime from os.path import getmtime from .low import ObservableDict def rename(self, new_name: str): shutil.move(str(self.path), str(self.__parent__.path / new_name)) self.__data_name__ = new_name def reader(self, binary: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]: mode = 'r' mode += 'b' if binary else '' return open(str(self.path), mode=mode, **kwargs) def creator(self, binary: bool = False, confirm: bool = False, feedback: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]: if confirm and not feedback: return None mode = 'x' mode += 'b' if binary else '' return open(str(self.path), mode=mode, **kwargs) def writer(self, binary: bool = False, append: bool = True, allow_overwrite: bool = False, confirm: bool = True, feedback: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]: if not allow_overwrite and not append: raise PermissionError('Trying to overwrite existed data.') if confirm and not feedback: return mode = 'a' if append else 'w' mode += 'b' if binary else '' return open(str(self.path), mode=mode, **kwargs) def __repr__(self): return f"Data('{self.__data_name__}')" def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False): if self.path.exists() and not allow_overwrite: return if confirm and not feedback: return shutil.copyfile(str(src_path), str(self.path)) def export_file(self, dst_path: [str, Path], allow_overwrite=False): if Path(dst_path).exists() and not allow_overwrite: return shutil.copyfile(str(self.path), str(dst_path)) def __calc_hash__(self, h, buffer_size: int = 131072): if not self.path.exists(): return None with open(str(self.path), 'rb') as file_reader: while True: data = file_reader.read(buffer_size) if not data: break h.update(data) return h.hexdigest() def md5(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]: if not self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if require_update \ or 'md5' not in self.metadata \ or 'md5-timestamp' not in self.metadata \ or self.metadata['md5-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5'] = result self.metadata['md5-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['md5'] def sha1(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]: if not self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if require_update \ or 'sha1' not in self.metadata \ or 'sha1-timestamp' not in self.metadata \ or self.metadata['sha1-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1'] = result self.metadata['sha1-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['sha1'] def sha256(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]: if not self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if require_update \ or 'sha256' not in self.metadata \ or 'sha256-timestamp' not in self.metadata \ or self.metadata['sha256-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256'] = result self.metadata['sha256-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['sha256']
[ 11748, 4423, 346, 198, 11748, 12234, 8019, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 19720, 1330, 8255, 9399, 11, 45755, 9399, 11, 24418, 11, 4479, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 28686, 13, 6978, 1330, 651, 76,...
2.137685
2,099
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Version 2 of class Optimizer.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import functools import six from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import reduce_util as ds_reduce_util from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export def _deduplicate_indexed_slices(values, indices): """Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index. """ unique_indices, new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices) def _filter_grads(grads_and_vars): """Filter out iterable with grad equal to None.""" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: raise ValueError("No gradients provided for any variable: %s." % ([v.name for _, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( ("Gradients does not exist for variables %s when minimizing the loss."), ([v.name for v in vars_with_empty_grads])) return filtered def _var_key(var): """Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable. """ # pylint: disable=protected-access # Get the distributed variable if it exists. if getattr(var, "_distributed_container", None) is not None: var = var._distributed_container() if var._in_graph_mode: return var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name): """Get the slot key for the variable: var_name/slot_name.""" name = _var_key(var) return name + "/" + slot_name revived_types.register_revived_type( "optimizer", lambda obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: _RestoredOptimizer(), version=1, min_producer_version=1, min_consumer_version=1, setter=_RestoredOptimizer._set_hyper # pylint: disable=protected-access )])
[ 2, 15069, 2864, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.110682
1,554
import requests headers = { 'content-type': 'application/json', 'Authorization': 'Token 80ca9f249b80e7226cdc7fcaada8d7297352f0f9' } url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado = requests.get(url=url_base_cursos, headers=headers) assert resultado.status_code == 200
[ 11748, 7007, 198, 198, 50145, 796, 1391, 198, 220, 220, 220, 705, 11299, 12, 4906, 10354, 705, 31438, 14, 17752, 3256, 198, 220, 220, 220, 705, 13838, 1634, 10354, 705, 30642, 4019, 6888, 24, 69, 21626, 65, 1795, 68, 22, 24909, 10210,...
2.291139
158
# Copyright 2021 VMware, Inc. import argparse import json import re import logging import os import sys from avi.sdk.avi_api import ApiSession API_VERSION = "18.2.13" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger = logging.getLogger(__name__) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-u', '--user', action="store", help='controller user', default='admin') parser.add_argument('-p', '--password', action="store", help='controller user password', default='admin') parser.add_argument('-t', '--tenant', action="store", help='tenant name', default='admin') parser.add_argument('-a', '--authtoken', help='Authentication token') parser.add_argument('-c', '--controller_ip', action="store", help='controller ip') args = parser.parse_args() if args.password: api = ApiSession.get_session(args.controller_ip, args.user, args.password, tenant=args.tenant, api_version=API_VERSION) elif args.authtoken: api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant, token=args.authtoken, api_version=API_VERSION) else: logging.error("Either password or authtokentoken must be provided.") sys.exit(1) waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if not waf_policy_obj: create_vdi_waf_policy(api, args) else: update_waf_policy(api, args, waf_policy_obj)
[ 2, 15069, 33448, 37754, 11, 3457, 13, 198, 198, 11748, 1822, 29572, 198, 11748, 33918, 198, 11748, 302, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 25064, 198, 198, 6738, 1196, 72, 13, 21282, 74, 13, 15820, 62, 15042, 1330, 5949, ...
2.312046
689
# -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making -(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import pytest from bkuser_core.bkiam.constants import ResourceType from bkuser_core.categories.models import Department, ProfileCategory from bkuser_core.tests.utils import make_simple_department pytestmark = pytest.mark.django_db def test_get_resource_nodes_other(self): pc = ProfileCategory.objects.get_default() nodes = ResourceType.get_instance_resource_nodes(pc) assert [(x["type"], x["name"]) for x in nodes] == [("category", "")]
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 24893, 1087, 14573, 15708, 318, 10607, 284, 1104, 262, 1280, 2723, 2055, 416, 1642, 532, 7, 33, 74, 12, 12982, 8, 1695, 13, 198, 15269, 357, 34, 8, 2177, ...
3.454277
339
# from votesim.benchmarks.benchrunner import ( # run_benchmark, # get_benchmarks, # post_benchmark, # plot_benchmark, # ) from votesim.benchmarks import runtools, simple
[ 2, 422, 5690, 320, 13, 26968, 14306, 13, 26968, 16737, 1330, 357, 198, 2, 220, 220, 220, 220, 1057, 62, 26968, 4102, 11, 198, 2, 220, 220, 220, 220, 651, 62, 26968, 14306, 11, 198, 2, 220, 220, 220, 220, 1281, 62, 26968, 4102, 1...
2.5
76
import boto3 import src.app as app import csv import psycopg2 as ps import os from dotenv import load_dotenv load_dotenv() dbname = os.environ["db"] host = os.environ["host"] port = os.environ["port"] user = os.environ["user"] password = os.environ["pass"] connection = ps.connect(dbname=dbname, host=host, port=port, user=user, password=password)
[ 11748, 275, 2069, 18, 198, 11748, 12351, 13, 1324, 355, 598, 198, 11748, 269, 21370, 198, 11748, 17331, 22163, 70, 17, 355, 26692, 198, 11748, 28686, 198, 6738, 16605, 24330, 1330, 3440, 62, 26518, 24330, 628, 198, 2220, 62, 26518, 2433...
1.953782
238
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021 Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Tests of precipitation_type utilities""" import numpy as np import pytest from iris.exceptions import CoordinateNotFoundError from improver.metadata.constants import FLOAT_DTYPE from improver.precipitation_type.utilities import make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube def set_up_test_cube(n_thresholds=1): """Set up a cube testing shower condition conversion""" thresholds = np.arange(n_thresholds) shape = [2, 2] shape = [n_thresholds, *shape] if n_thresholds > 0 else shape data = np.ones(shape, dtype=FLOAT_DTYPE) cube = set_up_probability_cube( data, thresholds, variable_name="texture_of_cloud_area_fraction", threshold_units=1, spatial_grid="equalarea", ) return cube def test_basic(): """Test that with a valid input the cube is transformed into a shower condition cube.""" cube = set_up_test_cube() result = make_shower_condition_cube(cube) threshold_coord = result.coord(var_name="threshold") assert result.name() == "probability_of_shower_condition_above_threshold" assert result.dtype == FLOAT_DTYPE assert (result.data == cube.data).all() assert threshold_coord.name() == "shower_condition" assert threshold_coord.units == 1 def test_no_threshold_coord(): """Test an exception is raised if the proxy diagnostic cube does not have a threshold coordinate.""" cube = set_up_test_cube() cube.remove_coord("texture_of_cloud_area_fraction") expected = "Input has no threshold coordinate and cannot be used" with pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube) def test_multi_valued_threshold_coord(): """Test an exception is raised if the proxy diagnostic cube has a multi valued threshold coordinate.""" cube = set_up_test_cube(n_thresholds=2) expected = "Expected a single valued threshold coordinate.*" with pytest.raises(ValueError, match=expected): make_shower_condition_cube(cube)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 16529, 32501, 198, 2, 357, 34, 8, 3517, 12223, 15069, 2177, 12, 1238, 2481, 3395, 4452, 13, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, ...
3.188615
1,177
from .db_conn import ModelSetup
[ 6738, 764, 9945, 62, 37043, 1330, 9104, 40786, 628 ]
3.666667
9
from docutils.parsers.rst.roles import register_canonical_role, set_classes from docutils.parsers.rst import directives from docutils import nodes from sphinx.writers.html import HTMLTranslator from sphinx.errors import ExtensionError import os import re
[ 6738, 2205, 26791, 13, 79, 945, 364, 13, 81, 301, 13, 305, 829, 1330, 7881, 62, 49883, 605, 62, 18090, 11, 900, 62, 37724, 198, 6738, 2205, 26791, 13, 79, 945, 364, 13, 81, 301, 1330, 34819, 198, 6738, 2205, 26791, 1330, 13760, 19...
3.434211
76
# Copyright 2014 varnishapi authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. import time from feaas import storage
[ 2, 15069, 1946, 1401, 77, 680, 15042, 7035, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 198, 2, 5964, 326, 460, 307, 1043, 287, 262, 38559, 24290, 2393, 13, 198, 198, ...
3.722222
54
# Copyright 2014 Open Source Robotics Foundation, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from .common import PlatformPackageDescriptor from .http_cache import fetch_and_cache_gzip
[ 2, 15069, 1946, 4946, 8090, 47061, 5693, 11, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, ...
3.870968
186
# PLAYER
[ 2, 28180, 1137, 198, 220, 220, 220, 220, 220, 220, 220, 220, 198 ]
1.384615
13
# Copyright 2022 ConvolutedDog (https://github.com/ConvolutedDog/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/python3 import torch import torch.nn as nn import torch.nn.functional as F from graphviz import Digraph, render from torch.autograd import Variable def add_backward(dLoss_dnextz): print('# next_dz.shape: ', list(dLoss_dnextz.shape)) dLoss_dz = dLoss_dnextz print('# dz.shape: ', list(dLoss_dz.shape)) return dLoss_dz def generate_g(model, x): delete_allpths(pth_dir=None) print('\n=========================== Store network model Results Start =========================') y = model(x) print('=========================== Store network model Results End ===========================\n') if 'GoogLeNet' in str(model).split('\n')[0]: g = make_dot(y[0]) return g else: g = make_dot(y) return g def gradient_backward_v2(model, img, label, num_class=1000, g_view=False): x = Variable(img) g = generate_g(model, x) if g_view: g.view() delete_allpths(pth_dir=None) print('\n=========================== Generate Tensors Start ====================================') result = model(img) print('=========================== Generate Tensors End ======================================\n') Loss = nn.CrossEntropyLoss() if 'GoogLeNet' in str(model).split('\n')[0]: loss_torch = Loss(result[0], label) else: loss_torch = Loss(result, label) _, connections = generate_connections(g) last_connections = merge_connections(connections) return_layers = get_layers(last_connections, model) return_tensors = get_tensors(last_connections) parameters, fc_conv_weights = get_structure_parameters(return_layers) ''' print('================') for i in range(len(last_connections)): print(i, last_connections[i]) print('================') print('================') for i in range(len(return_layers)): print(i, return_layers[i]) print('================') print('================') for i in range(len(parameters)): print(i, parameters[i]) print('================') print('================') for i in range(len(return_tensors)): if not isinstance(return_tensors[i], list) and not isinstance(return_tensors[i], str): print('=========', i, return_tensors[i].shape) print('================') ''' import copy return_dz = copy.deepcopy(last_connections) featuremap = return_tensors featuremap.append(img) y_true = F.one_hot(label, num_classes=num_class).float() loss, dLoss_dz = cross_entropy_loss(featuremap[0], y_true) featuremap.pop(0) return_dz.append(dLoss_dz) #####################tensors ''' for i in range(len(last_connections)): print(last_connections[i]) for i in range(len(featuremap)): if not isinstance(featuremap[i], list): print('=========', i, featuremap[i].shape) else: for j in range(len(featuremap[i])): for k in range(len(featuremap[i][j])): print(' =========', i, j, k, featuremap[i][j][k].shape) ''' ##################### # n for i in range(len(parameters)): layer = parameters[i] if not isinstance(layer, list): print('\n======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward Start ========================') if layer['layer_name'] == 'Conv2d': z = featuremap[i] weight_z = fc_conv_weights[i] try: padding = layer['padding'] except: padding = (0, 0) stride = layer['stride'] dLoss_dz, dLoss_dW, dLoss_dB = conv_backward(dLoss_dz, weight_z, z, padding, stride) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'ReLU': z = featuremap[i] dLoss_dz = relu_backward(dLoss_dz, z) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'MaxPool2d': z = featuremap[i] pooling = layer['kernel_size'] stride = layer['stride'] padding = layer['padding'] dLoss_dz = max_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'AvgPool2d': z = featuremap[i] pooling = layer['kernel_size'] stride = layer['stride'] padding = layer['padding'] dLoss_dz = average_pooling_backward(dLoss_dz, z, pooling, stride, padding) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Linear': weight_z = fc_conv_weights[i] z = featuremap[i] dLoss_dz, dLoss_dW, dLoss_dB = fc_backward(dLoss_dz, z, weight_z) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'View': last_z = featuremap[i+1] if 'Pool' in parameters[i+1]['layer_name']: params = (parameters[i+1]['kernel_size'], parameters[i+1]['stride'], parameters[i+1]['padding']) else: params = None dLoss_dz = view_backward(dLoss_dz, last_z, params) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Add': dLoss_dz = add_backward(dLoss_dz) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'Dropout': if parameters[i-1]['layer_name'] == 'Dropout': return_dz[i] = dLoss_dz print('# Skip this layer because the layer has been calcualted!') print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.\ format(layer['layer_name'])+' Backward End ==========================') continue p = layer['p'] mask = featuremap[i] dLoss_dz = dropback_backward(dLoss_dz, mask, p) return_dz[i] = dLoss_dz elif layer['layer_name'] == 'BatchNorm2d': eps = layer['eps'] z = featuremap[i] gamma = fc_conv_weights[i] dLoss_dz = batchnorm2d_backward(dLoss_dz, z, eps, gamma) return_dz[i] = dLoss_dz print('======================== {0:3} Layer: '.format(str(len(parameters)-1-i))+'{0:11}'.format(layer['layer_name'])+' Backward End ==========================') elif isinstance(layer, list): import copy tmp_dLoss_dz = [] for j in range(len(layer)): tmp_dLoss_dz.append(copy.deepcopy(dLoss_dz)) for k in range(len(layer[j])): tmp_layer = layer[j][k] print('\n=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward Start ====================') if tmp_layer['layer_name'] == 'Conv2d': if k+1 >= len(featuremap[i-1][j]): z = featuremap[i] else: z = featuremap[i-1][j][k+1] weight_z = fc_conv_weights[i][j][k] try: padding = tmp_layer['padding'] except: padding = (0, 0) stride = tmp_layer['stride'] tmp_dLoss_dz[-1], dLoss_dW, dLoss_dB = conv_backward(tmp_dLoss_dz[-1], weight_z, z, padding, stride) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'ReLU': z = featuremap[i-1][j][k+1] tmp_dLoss_dz[-1] = relu_backward(tmp_dLoss_dz[-1], z) return_dz[i][j][k] = tmp_dLoss_dz[-1] elif tmp_layer['layer_name'] == 'BatchNorm2d': eps = tmp_layer['eps'] z = featuremap[i-1][j][k+1] gamma = fc_conv_weights[i][j][k] tmp_dLoss_dz[-1] = batchnorm2d_backward(tmp_dLoss_dz[-1], z, eps, gamma) return_dz[i][j][k] = tmp_dLoss_dz[-1] print('=========================== {0:3} Branch: '.format(str(len(parameters)-1-i))+'{0:11}'.format(tmp_layer['layer_name'])+' Backward End ======================') print(tmp_dLoss_dz[0].shape, tmp_dLoss_dz[1].shape) dLoss_dz = tmp_dLoss_dz[0] + tmp_dLoss_dz[1] else: print('Not completed in gradient_backward!') print('# Torch calculated loss: ', loss_torch.detach().numpy()) loss_torch.backward() if 'VGG' in str(model) or 'AlexNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.features[0].weight.grad)) elif 'ResNet' in str(model): print(judge_tensors_equal(dLoss_dW, model.conv1.weight.grad)) delete_allpths(pth_dir=None) return return_dz, dLoss_dW, dLoss_dB
[ 2, 220, 15069, 33160, 1482, 10396, 7241, 32942, 357, 5450, 1378, 12567, 13, 785, 14, 3103, 10396, 7241, 32942, 34729, 201, 198, 2, 201, 198, 2, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, ...
2.241307
3,796
import torch
[ 11748, 28034, 628, 198 ]
3.75
4
import cv2 import torch import yaml import imageio import throttle import numpy as np import matplotlib.pyplot as plt from argparse import ArgumentParser from skimage.transform import resize from scipy.spatial import ConvexHull from modules.generator import OcclusionAwareGenerator from modules.keypoint_detector import KPDetector from sync_batchnorm import DataParallelWithCallback #from animate import normalize_kp # command = [ffmpeg, # '-y', # '-f', 'rawvideo', # '-vcodec','rawvideo', # '-pix_fmt', 'bgr24', # '-s', dimension, # '-i', '-', # '-c:v', 'libx264', # '-pix_fmt', 'yuv420p', # '-preset', 'ultrafast', # '-f', 'flv', # 'rtmp://10.10.10.80/live/mystream'] if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("--config", required=True, help="path to config") parser.add_argument("--source_image", required=True, help="path to source image") parser.add_argument("--checkpoint", default="vox-cpk.pth.tar", help="path to checkpoint") parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates") parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints") parser.add_argument("--cpu", dest="cpu", action="store_true", help="CPU mode") parser.set_defaults(relative=False) parser.set_defaults(adapt_scale=False) opt = parser.parse_args() generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu) source_image = imageio.imread(opt.source_image) source_image = resize(source_image, (256, 256))[..., :3] source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not opt.cpu: source = source.cuda() kp_source = kp_detector(source) #out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256)) kp_driving_initial = None camera = cv2.VideoCapture(0) ret, frame = camera.read() while True: ret, frame = camera.read() resized = resize(frame, (256, 256))[..., :3] if not opt.cpu: resized = resized.cuda() # y = torch.tensor(np.array(resized)) # x = y.cpu().numpy() # image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) # # x = y.permute(1, 2, 0) # plt.imshow(np.array(image)) # plt.show() driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) if not kp_driving_initial: kp_driving_initial = kp_detector(driving_resized) fake_frame = forward( source, driving_resized, kp_source, kp_driving_initial, generator, kp_detector, relative=opt.relative, adapt_scale=opt.adapt_scale, cpu=opt.cpu ) cv2.imshow("frame", fake_frame) #x = np.squeeze(driving_resized, axis=(0,)) #x = driving_resized[0].permute(1, 2, 0) # plt_driving = driving_resized #permute(2, 3, 1) #print(plt_driving.shape) #plt.imshow(x) #plt.show() if cv2.waitKey(1) & 0xFF == ord('q'): break camera.release() cv2.destroyAllWindows()
[ 11748, 269, 85, 17, 198, 11748, 28034, 198, 11748, 331, 43695, 198, 11748, 2939, 952, 198, 11748, 29976, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 198, 6738, 1822, 29572, 1330...
2.451886
1,299
from __future__ import absolute_import, division, print_function from .controlPDEProblem import ControlPDEProblem from .controlPDEProblemMultiPDE import ControlPDEProblemMultiPDE from .costFunctionalConstant import CostFunctionalConstant from .costFunctionalConstantMultiPDE import CostFunctionalConstantMultiPDE from .costFunctionalLinear import CostFunctionalLinear from .costFunctionalLinearMultiPDE import CostFunctionalLinearMultiPDE from .costFunctionalQuadratic import CostFunctionalQuadratic from .costFunctionalQuadraticMultiPDE import CostFunctionalQuadraticMultiPDE # from .chanceConstraintQuadratic import ChanceConstraintQuadratic # from .chanceConstraintLinear import ChanceConstraintLinear # from .chanceConstraintConstant import ChanceConstraintConstant # to do list # 0. implement zero, Hessian term # 1. implement linear # 2. implement quadratic # 3. impelement SAA # to do list # 1. SAA does not run well in ccgo1, multiprocessor does not work, ### not clear bug, simplifing adjoint solver works # 2. quadratic approximation does not converge well, even without variance, does not converge ### record eigenvector after m_tr[i].zero() # 3. check gradient for quadratic + correction # what to show tomorrow # 1. variance reduction by mean square error # 2. trace estimation by MC and randomized SVD # 3. scaling with repsect to mesh (design + uncertainty), trace, variance reduction, #bfgs # 4. show the design and state, for both disk and submarine # 5. random sample and state at different design # April 9, 2018, work on reporting results # 1. random samples and states at different design # 2. table for variance reduction # 3. plot trace estimation # 4. plot #bfgs iterations # obtain all results as planned
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 198, 6738, 764, 13716, 5760, 36, 40781, 1330, 6779, 5760, 36, 40781, 198, 6738, 764, 13716, 5760, 36, 40781, 29800, 5760, 36, 1330, 6779, 5760, 36, 407...
3.739224
464
import numpy as np from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur,CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp) from albumentations.pytorch import ToTensorV2 as ToTensor from get_config import get_config config = get_config() MEAN = np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225])
[ 11748, 299, 32152, 355, 45941, 198, 6738, 435, 65, 1713, 602, 1330, 357, 7293, 577, 11, 6075, 38342, 7414, 541, 11, 38937, 7414, 541, 11, 18481, 378, 11, 14534, 24864, 378, 3829, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220,...
2.051852
405
# -*- coding: utf-8 -*- from math import sqrt def __eq__(self, other): """Equal cuboids have same properties.""" if not isinstance(other, self.__class__): return False return (self.width == other.width and self.height == other.height and self.depth == other.depth and self.x == other.x and self.y == other.y and self.z == other.z) def __hash__(self): return hash( (self.x, self.y, self.z, self.width, self.height, self.depth)) def __iter__(self): """Iterate through cuboid corners""" yield self.corner_top_l yield self.corner_top_r yield self.corner_bot_r yield self.corner_bot_l yield self.corner_top_l_out yield self.corner_top_r_out yield self.corner_bot_r_out yield self.corner_bot_l_out def __repr__(self): return "R({}, {}, {}, {}, {}, {})".format( self.x, self.y, self.z, self.width, self.height, self.depth) def volume(self): """Cuboid volume""" return self.width * self.height * self.depth def move(self, x, y, z): """Move Cuboid to x,y,z coordinates Arguments: x (int, float): X coordinate y (int, float): Y coordinate z (int, float): Z coordinate """ self.x = x self.y = y self.z = z def contains(self, cub): """Tests if another cuboid is contained by this one Arguments: cub (Cuboid): The other cuboiud Returns: bool: True if it is inside this one, False otherwise """ return (cub.y >= self.y and cub.x >= self.x and cub.z >= self.z and cub.y + cub.height <= self.y + self.height and cub.x + cub.width <= self.x + self.width and cub.z + cub.depth <= self.z + self.depth) def intersects(self, cub, edges=False): """Detect intersections between this cuboid and cub. Args: cub (Cuboid): Cuboid to test for intersections. edges (bool): Accept edge touching cuboids as intersects or not Returns: bool: True if the cuboids intersect, False otherwise """ # Not even touching if (self.bottom > cub.top or self.top < cub.bottom or self.left > cub.right or self.right < cub.left or self.outeye > cub.ineye or self.ineye < cub.outeye): return False # Discard edge intersects if not edges: if (self.bottom == cub.top or self.top == cub.bottom or self.left == cub.right or self.right == cub.left or self.outeye == cub.ineye or self.ineye == cub.outeye): return False # Discard corner intersects if (self.left == cub.right and self.bottom == cub.top and self.outeye == cub.ineye or self.left == cub.right and cub.bottom == self.top and self.outeye == cub.ineye or self.left == cub.right and self.bottom == cub.top and cub.outeye == self.ineye or self.left == cub.right and cub.bottom == self.top and cub.outeye == self.ineye or cub.left == self.right and self.bottom == cub.top and self.outeye == cub.ineye or cub.left == self.right and cub.bottom == self.top and self.outeye == cub.ineye or cub.left == self.right and self.bottom == cub.top and cub.outeye == self.ineye or cub.left == self.right and cub.bottom == self.top and cub.outeye == self.ineye): return False return True def intersection(self, cub, edges=False): """Returns the cuboid resulting of the intersection of this and cub If the cuboids are only touching by their edges, and the argument 'edges' is True the cuboid returned will have a volume of 0. Returns None if there is no intersection. Arguments: cub (Cuboid): The other cuboid. edges (bool): If true, touching edges are considered an intersection, and a cuboid of 0 height or width or depth will be returned Returns: Cuboid: Intersection. None: There was no intersection. """ if not self.intersects(cub, edges=edges): return None bottom = max(self.bottom, cub.bottom) left = max(self.left, cub.left) top = min(self.top, cub.top) right = min(self.right, cub.right) outeye = max(self.outeye, cub.outeye) ineye = min(self.ineye, cub.ineye) return Cuboid( left, bottom, outeye, right - left, top - bottom, ineye - outeye) def join(self, other): """Try to join a cuboid to this one. If the result is also a cuboid and the operation is successful then this cuboid is modified to the union. Arguments: other (Cuboid): Cuboid to join Returns: bool: True when successfully joined, False otherwise """ if self.contains(other): return True if other.contains(self): self.x = other.x self.y = other.y self.z = other.z self.width = other.width self.height = other.height self.depth = other.depth return True if not self.intersects(other, edges=True): return False # Other cuboid is Up/Down from this if self.left == other.left and self.width == other.width and \ self.outeye == other.outeye and self.depth == self.depth: y_min = min(self.bottom, other.bottom) y_max = max(self.top, other.top) self.y = y_min self.height = y_max - y_min return True # Other cuboid is Right/Left from this if self.bottom == other.bottom and self.height == other.height and \ self.outeye == other.outeye and self.depth == self.depth: x_min = min(self.left, other.left) x_max = max(self.right, other.right) self.x = x_min self.width = x_max - x_min return True # Other cuboid is Right/Left from this if self.bottom == other.bottom and self.height == other.height and \ self.left == other.left and self.width == other.width: z_min = min(self.outeye, other.outeye) z_max = max(self.ineye, other.ineye) self.z = z_min self.depth = z_max - z_min return True return False
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 10688, 1330, 19862, 17034, 628, 628, 628, 628, 220, 220, 220, 825, 11593, 27363, 834, 7, 944, 11, 584, 2599, 198, 220, 220, 220, 220, 220, 220, 220, 37227, ...
2.080693
3,346
from django.contrib.auth import get_user_model from django.test import TestCase from django.urls import reverse from rest_framework import status from rest_framework.test import APIClient # use that for making our API requests from core.models import Recipe, Tag, Ingredient from ..serializers import RecipeSerializer, RecipeDetailSerializer import tempfile # allows you to call a function which will then create a temp file # somewhere in the system and then you can remove that file after # you've used it import os # this allows us to perform things like # creating path names and also checking if files exist on the system from PIL import Image # pillow, this will import our image class which will let us then # create test images which we can then upload to our API RECIPES_URL = reverse('recipe:recipe-list') # since we're going to need to access the URL in more # or less all the tests let's assign that as a variable # at top of the class in all capitals. # app : identifier of the URL in the app # /api/recipe/recipes # /api/recipe/recipes/1/ (id) --> detail url def image_upload_url(recipe_id): """Return URL for recipe image upload""" return reverse('recipe:recipe-upload-image', args=[recipe_id]) # generate our upload image url # you're going to need the existing recipe ID in order to upload an image def detail_url(recipe_id): """Return recipe detail URL""" return reverse('recipe:recipe-detail', args=[recipe_id]) # name of the end point that the default router will create # for our viewset because we're going to have a detail action # this is how you specify arguments with the reverse function # you just pass in args and then you pass in a list of the # arguments you want to add # here we have single item def sample_tag(user, name='Main course'): """Create and return a sample tag""" return Tag.objects.create(user=user, name=name) def sample_ingredient(user, name='Cinnamon'): """Create and return a sample ingredient""" return Ingredient.objects.create(user=user, name=name) def sample_recipe(user, **params): """Create and return a sample recipe""" defaults = { 'title': 'Sample recipe', 'time_minutes': 10, 'price': 5.00, } defaults.update(params) return Recipe.objects.create(user=user, **defaults) # convert the dictionary into the argument # when you use the two asterisks when calling a # function it has the reverse effect.
[ 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 198, 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 198, 6738, 1334, 62, 30604, 1330, 3722, 198, 6...
3.423989
717
# Assignment 1 Day 8 # write a decorator function for taking input for you # any kind of function you want to build addition() subtraction() multiplication() division() # Assignment 2 day 8 # you need to develop a python program to open a file in read only mode and # try writing something to it and handlethe subsequent errorusing Exception Handling try: f=open("abc.txt","r"); f.write("Heyy, i am prajval"); f.close(); except: print("File is in read only mode...")
[ 2, 50144, 352, 3596, 807, 201, 198, 201, 198, 2, 3551, 257, 11705, 1352, 2163, 329, 2263, 5128, 329, 345, 220, 201, 198, 2, 597, 1611, 286, 2163, 345, 765, 284, 1382, 201, 198, 201, 198, 2860, 653, 3419, 201, 198, 7266, 83, 7861, ...
2.948571
175
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Defines coordinate frames and ties them to data axes. """ from __future__ import absolute_import, division, unicode_literals, print_function import numpy as np from astropy import units as u from astropy import utils as astutil from astropy import coordinates as coord from astropy.extern import six from . import utils as gwutils __all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame', 'CoordinateFrame'] STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__] STANDARD_REFERENCE_POSITION = ["GEOCENTER", "BARYCENTER", "HELIOCENTER", "TOPOCENTER", "LSR", "LSRK", "LSRD", "GALACTIC_CENTER", "LOCAL_GROUP_CENTER"] def coordinates(self, *args): """ Create world coordinates object""" raise NotImplementedError("Subclasses may implement this") class CelestialFrame(CoordinateFrame): """ Celestial Frame Representation Parameters ---------- axes_order : tuple of int A dimension in the input data that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames A reference frame. reference_position : str Reference position. unit : str or units.Unit instance or iterable of those Units on axes. axes_names : list Names of the axes in this frame. name : str Name of this frame. """ def coordinates(self, *args): """ Create a SkyCoord object. Parameters ---------- args : float inputs to wcs.input_frame """ # Reorder axes if necesary. try: return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame) except: raise class SpectralFrame(CoordinateFrame): """ Represents Spectral Frame Parameters ---------- axes_order : tuple or int A dimension in the input data that corresponds to this axis. reference_frame : astropy.coordinates.builtin_frames Reference frame (usually used with output_frame to convert to world coordinate objects). unit : str or units.Unit instance Spectral unit. axes_names : str Spectral axis name. name : str Name for this frame. """ class CompositeFrame(CoordinateFrame): """ Represents one or more frames. Parameters ---------- frames : list List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame). name : str Name for this frame. """ class Frame2D(CoordinateFrame): """ A 2D coordinate frame. Parameters ---------- axes_order : tuple of int A dimension in the input data that corresponds to this axis. unit : list of astropy.units.Unit Unit for each axis. axes_names : list Names of the axes in this frame. name : str Name of this frame. """
[ 2, 49962, 739, 257, 513, 12, 565, 682, 347, 10305, 3918, 5964, 532, 766, 38559, 24290, 13, 81, 301, 198, 37811, 198, 7469, 1127, 20435, 13431, 290, 8470, 606, 284, 1366, 34197, 13, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 4112,...
2.628128
1,159
import lx import modo import select import item from run import run
[ 198, 198, 11748, 300, 87, 198, 11748, 953, 78, 198, 198, 11748, 2922, 198, 11748, 2378, 198, 6738, 1057, 1330, 1057, 628, 198 ]
3.173913
23
from brownie import FundMe from scripts.helpful_scripts import get_account if __name__ == "__main__": main()
[ 6738, 7586, 494, 1330, 7557, 5308, 198, 6738, 14750, 13, 16794, 913, 62, 46521, 1330, 651, 62, 23317, 628, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419, 198 ]
3.025641
39
# Um professor quer sortear um dos seus quatro alunos para apagar o quadro. Faa um programa que ajude ele, lendo o nome dos alunos e escrevendo na tela o nome do escolhido. from random import choice nome1 = input('Digite um nome: ') nome2 = input('Digite outro nome: ') nome3 = input('Digite mais um nome: ') nome4 = input('Digite o ltimo nome: ') nome = [nome1, nome2, nome3, nome4] print(choice(nome))
[ 2, 21039, 6240, 42517, 3297, 451, 23781, 23430, 384, 385, 627, 47756, 435, 403, 418, 31215, 2471, 32452, 267, 15094, 305, 13, 376, 7252, 23781, 1430, 64, 8358, 257, 73, 2507, 9766, 11, 22096, 78, 267, 299, 462, 23430, 435, 403, 418, ...
2.512346
162
from django.contrib import admin from .models import Categoria, Contact admin.site.register(Categoria) admin.site.register(Contact, ContactAdmin)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 764, 27530, 1330, 327, 2397, 7661, 11, 14039, 628, 198, 198, 28482, 13, 15654, 13, 30238, 7, 34, 2397, 7661, 8, 198, 28482, 13, 15654, 13, 30238, 7, 17829, 11, 14039, 46787, ...
3.386364
44